From 861dd6b75956f2c12814ad32b05624d8d8537d52 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 13 Jun 2025 02:15:06 +0000 Subject: [PATCH 01/41] chore(tests): run tests in parallel --- pyproject.toml | 3 ++- requirements-dev.lock | 4 ++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 9dd31517..03329f8b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -54,6 +54,7 @@ dev-dependencies = [ "importlib-metadata>=6.7.0", "rich>=13.7.1", "nest_asyncio==1.6.0", + "pytest-xdist>=3.6.1", ] [tool.rye.scripts] @@ -125,7 +126,7 @@ replacement = '[\1](https://github.com/digitalocean/genai-python/tree/main/\g<2> [tool.pytest.ini_options] testpaths = ["tests"] -addopts = "--tb=short" +addopts = "--tb=short -n auto" xfail_strict = true asyncio_mode = "auto" asyncio_default_fixture_loop_scope = "session" diff --git a/requirements-dev.lock b/requirements-dev.lock index f784e9a3..1e074a56 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -30,6 +30,8 @@ distro==1.8.0 exceptiongroup==1.2.2 # via anyio # via pytest +execnet==2.1.1 + # via pytest-xdist filelock==3.12.4 # via virtualenv h11==0.14.0 @@ -72,7 +74,9 @@ pygments==2.18.0 pyright==1.1.399 pytest==8.3.3 # via pytest-asyncio + # via pytest-xdist pytest-asyncio==0.24.0 +pytest-xdist==3.7.0 python-dateutil==2.8.2 # via time-machine pytz==2023.3.post1 From abba5be958d03a7e5ce7d1cbf8069c0bcf52ee20 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 13 Jun 2025 02:39:39 +0000 Subject: [PATCH 02/41] fix(client): correctly parse binary response | stream --- src/digitalocean_genai_sdk/_base_client.py | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/src/digitalocean_genai_sdk/_base_client.py b/src/digitalocean_genai_sdk/_base_client.py index 73cd30fc..6fd247cc 100644 --- a/src/digitalocean_genai_sdk/_base_client.py +++ b/src/digitalocean_genai_sdk/_base_client.py @@ -1071,7 +1071,14 @@ def _process_response( ) -> ResponseT: origin = get_origin(cast_to) or cast_to - if inspect.isclass(origin) and issubclass(origin, BaseAPIResponse): + if ( + inspect.isclass(origin) + and issubclass(origin, BaseAPIResponse) + # we only want to actually return the custom BaseAPIResponse class if we're + # returning the raw response, or if we're not streaming SSE, as if we're streaming + # SSE then `cast_to` doesn't actively reflect the type we need to parse into + and (not stream or bool(response.request.headers.get(RAW_RESPONSE_HEADER))) + ): if not issubclass(origin, APIResponse): raise TypeError(f"API Response types must subclass {APIResponse}; Received {origin}") @@ -1574,7 +1581,14 @@ async def _process_response( ) -> ResponseT: origin = get_origin(cast_to) or cast_to - if inspect.isclass(origin) and issubclass(origin, BaseAPIResponse): + if ( + inspect.isclass(origin) + and issubclass(origin, BaseAPIResponse) + # we only want to actually return the custom BaseAPIResponse class if we're + # returning the raw response, or if we're not streaming SSE, as if we're streaming + # SSE then `cast_to` doesn't actively reflect the type we need to parse into + and (not stream or bool(response.request.headers.get(RAW_RESPONSE_HEADER))) + ): if not issubclass(origin, AsyncAPIResponse): raise TypeError(f"API Response types must subclass {AsyncAPIResponse}; Received {origin}") From f8536166320d1d5bacf1d10a5edb2f71691dde8b Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 13 Jun 2025 16:42:53 +0000 Subject: [PATCH 03/41] feat(api): update via SDK Studio --- .github/workflows/create-releases.yml | 38 + .github/workflows/publish-pypi.yml | 10 +- .github/workflows/release-doctor.yml | 5 +- .stats.yml | 4 +- CONTRIBUTING.md | 6 +- LICENSE | 2 +- README.md | 92 +- SECURITY.md | 2 +- api.md | 263 +---- bin/check-release-environment | 6 +- mypy.ini | 2 +- pyproject.toml | 18 +- release-please-config.json | 2 +- scripts/lint | 2 +- .../_utils/_resources_proxy.py | 24 - .../resources/__init__.py | 145 --- .../resources/agents/__init__.py | 89 -- .../resources/agents/agents.py | 965 ------------------ .../resources/agents/api_keys.py | 581 ----------- .../resources/agents/child_agents.py | 508 --------- .../resources/agents/functions.py | 421 -------- .../resources/agents/knowledge_bases.py | 346 ------- .../resources/api_keys/__init__.py | 19 - .../resources/api_keys/api_keys.py | 275 ----- .../resources/api_keys/api_keys_.py | 529 ---------- .../resources/auth/agents/__init__.py | 33 - .../resources/auth/agents/token.py | 173 ---- .../resources/auth/auth.py | 102 -- .../resources/indexing_jobs.py | 543 ---------- .../resources/knowledge_bases/__init__.py | 33 - .../resources/knowledge_bases/data_sources.py | 410 -------- .../knowledge_bases/knowledge_bases.py | 667 ------------ .../resources/providers/__init__.py | 47 - .../resources/providers/anthropic/__init__.py | 33 - .../providers/anthropic/anthropic.py | 102 -- .../resources/providers/anthropic/keys.py | 662 ------------ .../resources/providers/openai/__init__.py | 33 - .../resources/providers/openai/keys.py | 658 ------------ .../resources/providers/openai/openai.py | 102 -- .../resources/providers/providers.py | 134 --- .../resources/regions.py | 191 ---- src/digitalocean_genai_sdk/types/__init__.py | 57 -- .../types/agent_create_params.py | 39 - .../types/agent_create_response.py | 16 - .../types/agent_delete_response.py | 16 - .../types/agent_list_params.py | 18 - .../types/agent_list_response.py | 198 ---- .../types/agent_retrieve_response.py | 16 - .../types/agent_update_params.py | 65 -- .../types/agent_update_response.py | 16 - .../types/agent_update_status_params.py | 16 - .../types/agent_update_status_response.py | 16 - .../types/agents/__init__.py | 31 - .../types/agents/api_key_create_params.py | 15 - .../types/agents/api_key_create_response.py | 12 - .../types/agents/api_key_delete_response.py | 12 - .../types/agents/api_key_list_params.py | 15 - .../types/agents/api_key_list_response.py | 18 - .../agents/api_key_regenerate_response.py | 12 - .../types/agents/api_key_update_params.py | 19 - .../types/agents/api_key_update_response.py | 12 - .../agents/api_link_knowledge_base_output.py | 16 - .../types/agents/child_agent_add_params.py | 22 - .../types/agents/child_agent_add_response.py | 14 - .../agents/child_agent_delete_response.py | 13 - .../types/agents/child_agent_update_params.py | 24 - .../agents/child_agent_update_response.py | 18 - .../types/agents/child_agent_view_response.py | 16 - .../types/agents/function_create_params.py | 25 - .../types/agents/function_create_response.py | 16 - .../types/agents/function_delete_response.py | 16 - .../types/agents/function_update_params.py | 29 - .../types/agents/function_update_response.py | 16 - .../agents/knowledge_base_detach_response.py | 16 - src/digitalocean_genai_sdk/types/api_agent.py | 263 ----- .../types/api_agent_api_key_info.py | 22 - .../types/api_agreement.py | 17 - .../types/api_anthropic_api_key_info.py | 22 - .../types/api_indexing_job.py | 43 - .../types/api_key_list_params.py | 42 - .../types/api_key_list_response.py | 42 - .../types/api_keys/__init__.py | 13 - .../types/api_keys/api_key_create_params.py | 11 - .../types/api_keys/api_key_create_response.py | 12 - .../types/api_keys/api_key_delete_response.py | 12 - .../types/api_keys/api_key_list_params.py | 15 - .../types/api_keys/api_key_list_response.py | 18 - .../types/api_keys/api_key_update_params.py | 15 - .../api_key_update_regenerate_response.py | 12 - .../types/api_keys/api_key_update_response.py | 12 - .../types/api_keys/api_model_api_key_info.py | 22 - .../types/api_knowledge_base.py | 37 - src/digitalocean_genai_sdk/types/api_model.py | 57 -- .../types/api_model_version.py | 15 - .../types/api_openai_api_key_info.py | 25 - .../types/auth/agents/__init__.py | 6 - .../types/auth/agents/token_create_params.py | 13 - .../auth/agents/token_create_response.py | 13 - .../types/indexing_job_create_params.py | 14 - .../types/indexing_job_create_response.py | 12 - .../types/indexing_job_list_params.py | 15 - .../types/indexing_job_list_response.py | 18 - ...xing_job_retrieve_data_sources_response.py | 52 - .../types/indexing_job_retrieve_response.py | 12 - .../indexing_job_update_cancel_params.py | 14 - .../indexing_job_update_cancel_response.py | 12 - .../types/knowledge_base_create_params.py | 64 -- .../types/knowledge_base_create_response.py | 12 - .../types/knowledge_base_delete_response.py | 11 - .../types/knowledge_base_list_params.py | 15 - .../types/knowledge_base_list_response.py | 18 - .../types/knowledge_base_retrieve_response.py | 30 - .../types/knowledge_base_update_params.py | 27 - .../types/knowledge_base_update_response.py | 12 - .../types/knowledge_bases/__init__.py | 16 - .../api_file_upload_data_source.py | 15 - .../api_file_upload_data_source_param.py | 15 - .../api_knowledge_base_data_source.py | 35 - .../knowledge_bases/api_spaces_data_source.py | 15 - .../api_spaces_data_source_param.py | 15 - .../api_web_crawler_data_source.py | 26 - .../api_web_crawler_data_source_param.py | 25 - .../data_source_create_params.py | 33 - .../data_source_create_response.py | 12 - .../data_source_delete_response.py | 13 - .../data_source_list_params.py | 15 - .../data_source_list_response.py | 18 - .../types/providers/anthropic/__init__.py | 14 - .../providers/anthropic/key_create_params.py | 13 - .../anthropic/key_create_response.py | 12 - .../anthropic/key_delete_response.py | 12 - .../anthropic/key_list_agents_params.py | 15 - .../anthropic/key_list_agents_response.py | 22 - .../providers/anthropic/key_list_params.py | 15 - .../providers/anthropic/key_list_response.py | 18 - .../anthropic/key_retrieve_response.py | 12 - .../providers/anthropic/key_update_params.py | 17 - .../anthropic/key_update_response.py | 12 - .../types/providers/openai/__init__.py | 14 - .../providers/openai/key_create_params.py | 13 - .../providers/openai/key_create_response.py | 12 - .../providers/openai/key_delete_response.py | 12 - .../types/providers/openai/key_list_params.py | 15 - .../providers/openai/key_list_response.py | 18 - .../openai/key_retrieve_agents_params.py | 15 - .../openai/key_retrieve_agents_response.py | 22 - .../providers/openai/key_retrieve_response.py | 12 - .../providers/openai/key_update_params.py | 17 - .../providers/openai/key_update_response.py | 12 - .../types/region_list_params.py | 15 - .../types/region_list_response.py | 23 - .../__init__.py | 16 +- .../_base_client.py | 2 +- .../_client.py | 301 +----- .../_compat.py | 0 .../_constants.py | 0 .../_exceptions.py | 4 +- .../_files.py | 0 .../_models.py | 0 .../_qs.py | 0 .../_resource.py | 10 +- .../_response.py | 12 +- .../_streaming.py | 6 +- .../_types.py | 2 +- .../_utils/__init__.py | 0 .../_utils/_logs.py | 6 +- .../_utils/_proxy.py | 0 .../_utils/_reflection.py | 0 src/gradientai/_utils/_resources_proxy.py | 24 + .../_utils/_streams.py | 0 .../_utils/_sync.py | 0 .../_utils/_transform.py | 0 .../_utils/_typing.py | 0 .../_utils/_utils.py | 0 .../_version.py | 2 +- src/gradientai/lib/.keep | 4 + .../py.typed | 0 src/gradientai/resources/__init__.py | 61 ++ .../resources/agents}/__init__.py | 28 +- .../resources}/agents/agents.py | 50 +- .../resources/agents/versions.py | 8 +- .../resources/chat.py | 8 +- .../resources/embeddings.py | 8 +- .../resources/models.py | 8 +- src/gradientai/types/__init__.py | 16 + src/gradientai/types/agents/__init__.py | 10 + .../types/agents/api_links.py | 0 .../types/agents/api_meta.py | 0 .../types/agents/version_list_params.py | 0 .../types/agents/version_list_response.py | 0 .../types/agents/version_update_params.py | 0 .../types/agents/version_update_response.py | 0 .../types/api_deployment_visibility.py | 0 .../types/api_keys}/__init__.py | 0 .../types/api_retrieval_method.py | 0 .../types/auth}/__init__.py | 0 .../gradientai/types/auth/agents}/__init__.py | 2 + ...request_message_content_part_text_param.py | 0 .../types/chat_completion_token_logprob.py | 0 .../types/chat_create_completion_params.py | 0 .../types/chat_create_completion_response.py | 0 .../types/embedding_create_params.py | 0 .../types/embedding_create_response.py | 0 .../types/knowledge_bases}/__init__.py | 2 + .../types/model.py | 0 .../types/model_list_response.py | 0 .../gradientai/types/providers}/__init__.py | 2 + .../types/providers/anthropic}/__init__.py | 2 + .../types/providers/openai/__init__.py | 3 + tests/api_resources/agents/test_api_keys.py | 572 ----------- .../api_resources/agents/test_child_agents.py | 485 --------- tests/api_resources/agents/test_functions.py | 382 ------- .../agents/test_knowledge_bases.py | 314 ------ tests/api_resources/agents/test_versions.py | 44 +- .../api_resources/api_keys/test_api_keys_.py | 446 -------- tests/api_resources/auth/agents/test_token.py | 124 --- .../knowledge_bases/test_data_sources.py | 374 ------- tests/api_resources/providers/__init__.py | 1 - .../providers/anthropic/__init__.py | 1 - .../providers/anthropic/test_keys.py | 555 ---------- .../providers/openai/__init__.py | 1 - .../providers/openai/test_keys.py | 555 ---------- tests/api_resources/test_agents.py | 597 ----------- tests/api_resources/test_api_keys.py | 100 -- tests/api_resources/test_chat.py | 20 +- tests/api_resources/test_embeddings.py | 20 +- tests/api_resources/test_indexing_jobs.py | 446 -------- tests/api_resources/test_knowledge_bases.py | 510 --------- tests/api_resources/test_models.py | 32 +- tests/api_resources/test_regions.py | 96 -- tests/conftest.py | 14 +- tests/test_client.py | 205 ++-- tests/test_deepcopy.py | 2 +- tests/test_extract_files.py | 4 +- tests/test_files.py | 2 +- tests/test_models.py | 6 +- tests/test_qs.py | 2 +- tests/test_required_args.py | 2 +- tests/test_response.py | 40 +- tests/test_streaming.py | 44 +- tests/test_transform.py | 8 +- tests/test_utils/test_proxy.py | 2 +- tests/test_utils/test_typing.py | 2 +- tests/utils.py | 8 +- 244 files changed, 596 insertions(+), 16866 deletions(-) create mode 100644 .github/workflows/create-releases.yml delete mode 100644 src/digitalocean_genai_sdk/_utils/_resources_proxy.py delete mode 100644 src/digitalocean_genai_sdk/resources/__init__.py delete mode 100644 src/digitalocean_genai_sdk/resources/agents/__init__.py delete mode 100644 src/digitalocean_genai_sdk/resources/agents/agents.py delete mode 100644 src/digitalocean_genai_sdk/resources/agents/api_keys.py delete mode 100644 src/digitalocean_genai_sdk/resources/agents/child_agents.py delete mode 100644 src/digitalocean_genai_sdk/resources/agents/functions.py delete mode 100644 src/digitalocean_genai_sdk/resources/agents/knowledge_bases.py delete mode 100644 src/digitalocean_genai_sdk/resources/api_keys/__init__.py delete mode 100644 src/digitalocean_genai_sdk/resources/api_keys/api_keys.py delete mode 100644 src/digitalocean_genai_sdk/resources/api_keys/api_keys_.py delete mode 100644 src/digitalocean_genai_sdk/resources/auth/agents/__init__.py delete mode 100644 src/digitalocean_genai_sdk/resources/auth/agents/token.py delete mode 100644 src/digitalocean_genai_sdk/resources/auth/auth.py delete mode 100644 src/digitalocean_genai_sdk/resources/indexing_jobs.py delete mode 100644 src/digitalocean_genai_sdk/resources/knowledge_bases/__init__.py delete mode 100644 src/digitalocean_genai_sdk/resources/knowledge_bases/data_sources.py delete mode 100644 src/digitalocean_genai_sdk/resources/knowledge_bases/knowledge_bases.py delete mode 100644 src/digitalocean_genai_sdk/resources/providers/__init__.py delete mode 100644 src/digitalocean_genai_sdk/resources/providers/anthropic/__init__.py delete mode 100644 src/digitalocean_genai_sdk/resources/providers/anthropic/anthropic.py delete mode 100644 src/digitalocean_genai_sdk/resources/providers/anthropic/keys.py delete mode 100644 src/digitalocean_genai_sdk/resources/providers/openai/__init__.py delete mode 100644 src/digitalocean_genai_sdk/resources/providers/openai/keys.py delete mode 100644 src/digitalocean_genai_sdk/resources/providers/openai/openai.py delete mode 100644 src/digitalocean_genai_sdk/resources/providers/providers.py delete mode 100644 src/digitalocean_genai_sdk/resources/regions.py delete mode 100644 src/digitalocean_genai_sdk/types/__init__.py delete mode 100644 src/digitalocean_genai_sdk/types/agent_create_params.py delete mode 100644 src/digitalocean_genai_sdk/types/agent_create_response.py delete mode 100644 src/digitalocean_genai_sdk/types/agent_delete_response.py delete mode 100644 src/digitalocean_genai_sdk/types/agent_list_params.py delete mode 100644 src/digitalocean_genai_sdk/types/agent_list_response.py delete mode 100644 src/digitalocean_genai_sdk/types/agent_retrieve_response.py delete mode 100644 src/digitalocean_genai_sdk/types/agent_update_params.py delete mode 100644 src/digitalocean_genai_sdk/types/agent_update_response.py delete mode 100644 src/digitalocean_genai_sdk/types/agent_update_status_params.py delete mode 100644 src/digitalocean_genai_sdk/types/agent_update_status_response.py delete mode 100644 src/digitalocean_genai_sdk/types/agents/__init__.py delete mode 100644 src/digitalocean_genai_sdk/types/agents/api_key_create_params.py delete mode 100644 src/digitalocean_genai_sdk/types/agents/api_key_create_response.py delete mode 100644 src/digitalocean_genai_sdk/types/agents/api_key_delete_response.py delete mode 100644 src/digitalocean_genai_sdk/types/agents/api_key_list_params.py delete mode 100644 src/digitalocean_genai_sdk/types/agents/api_key_list_response.py delete mode 100644 src/digitalocean_genai_sdk/types/agents/api_key_regenerate_response.py delete mode 100644 src/digitalocean_genai_sdk/types/agents/api_key_update_params.py delete mode 100644 src/digitalocean_genai_sdk/types/agents/api_key_update_response.py delete mode 100644 src/digitalocean_genai_sdk/types/agents/api_link_knowledge_base_output.py delete mode 100644 src/digitalocean_genai_sdk/types/agents/child_agent_add_params.py delete mode 100644 src/digitalocean_genai_sdk/types/agents/child_agent_add_response.py delete mode 100644 src/digitalocean_genai_sdk/types/agents/child_agent_delete_response.py delete mode 100644 src/digitalocean_genai_sdk/types/agents/child_agent_update_params.py delete mode 100644 src/digitalocean_genai_sdk/types/agents/child_agent_update_response.py delete mode 100644 src/digitalocean_genai_sdk/types/agents/child_agent_view_response.py delete mode 100644 src/digitalocean_genai_sdk/types/agents/function_create_params.py delete mode 100644 src/digitalocean_genai_sdk/types/agents/function_create_response.py delete mode 100644 src/digitalocean_genai_sdk/types/agents/function_delete_response.py delete mode 100644 src/digitalocean_genai_sdk/types/agents/function_update_params.py delete mode 100644 src/digitalocean_genai_sdk/types/agents/function_update_response.py delete mode 100644 src/digitalocean_genai_sdk/types/agents/knowledge_base_detach_response.py delete mode 100644 src/digitalocean_genai_sdk/types/api_agent.py delete mode 100644 src/digitalocean_genai_sdk/types/api_agent_api_key_info.py delete mode 100644 src/digitalocean_genai_sdk/types/api_agreement.py delete mode 100644 src/digitalocean_genai_sdk/types/api_anthropic_api_key_info.py delete mode 100644 src/digitalocean_genai_sdk/types/api_indexing_job.py delete mode 100644 src/digitalocean_genai_sdk/types/api_key_list_params.py delete mode 100644 src/digitalocean_genai_sdk/types/api_key_list_response.py delete mode 100644 src/digitalocean_genai_sdk/types/api_keys/__init__.py delete mode 100644 src/digitalocean_genai_sdk/types/api_keys/api_key_create_params.py delete mode 100644 src/digitalocean_genai_sdk/types/api_keys/api_key_create_response.py delete mode 100644 src/digitalocean_genai_sdk/types/api_keys/api_key_delete_response.py delete mode 100644 src/digitalocean_genai_sdk/types/api_keys/api_key_list_params.py delete mode 100644 src/digitalocean_genai_sdk/types/api_keys/api_key_list_response.py delete mode 100644 src/digitalocean_genai_sdk/types/api_keys/api_key_update_params.py delete mode 100644 src/digitalocean_genai_sdk/types/api_keys/api_key_update_regenerate_response.py delete mode 100644 src/digitalocean_genai_sdk/types/api_keys/api_key_update_response.py delete mode 100644 src/digitalocean_genai_sdk/types/api_keys/api_model_api_key_info.py delete mode 100644 src/digitalocean_genai_sdk/types/api_knowledge_base.py delete mode 100644 src/digitalocean_genai_sdk/types/api_model.py delete mode 100644 src/digitalocean_genai_sdk/types/api_model_version.py delete mode 100644 src/digitalocean_genai_sdk/types/api_openai_api_key_info.py delete mode 100644 src/digitalocean_genai_sdk/types/auth/agents/__init__.py delete mode 100644 src/digitalocean_genai_sdk/types/auth/agents/token_create_params.py delete mode 100644 src/digitalocean_genai_sdk/types/auth/agents/token_create_response.py delete mode 100644 src/digitalocean_genai_sdk/types/indexing_job_create_params.py delete mode 100644 src/digitalocean_genai_sdk/types/indexing_job_create_response.py delete mode 100644 src/digitalocean_genai_sdk/types/indexing_job_list_params.py delete mode 100644 src/digitalocean_genai_sdk/types/indexing_job_list_response.py delete mode 100644 src/digitalocean_genai_sdk/types/indexing_job_retrieve_data_sources_response.py delete mode 100644 src/digitalocean_genai_sdk/types/indexing_job_retrieve_response.py delete mode 100644 src/digitalocean_genai_sdk/types/indexing_job_update_cancel_params.py delete mode 100644 src/digitalocean_genai_sdk/types/indexing_job_update_cancel_response.py delete mode 100644 src/digitalocean_genai_sdk/types/knowledge_base_create_params.py delete mode 100644 src/digitalocean_genai_sdk/types/knowledge_base_create_response.py delete mode 100644 src/digitalocean_genai_sdk/types/knowledge_base_delete_response.py delete mode 100644 src/digitalocean_genai_sdk/types/knowledge_base_list_params.py delete mode 100644 src/digitalocean_genai_sdk/types/knowledge_base_list_response.py delete mode 100644 src/digitalocean_genai_sdk/types/knowledge_base_retrieve_response.py delete mode 100644 src/digitalocean_genai_sdk/types/knowledge_base_update_params.py delete mode 100644 src/digitalocean_genai_sdk/types/knowledge_base_update_response.py delete mode 100644 src/digitalocean_genai_sdk/types/knowledge_bases/__init__.py delete mode 100644 src/digitalocean_genai_sdk/types/knowledge_bases/api_file_upload_data_source.py delete mode 100644 src/digitalocean_genai_sdk/types/knowledge_bases/api_file_upload_data_source_param.py delete mode 100644 src/digitalocean_genai_sdk/types/knowledge_bases/api_knowledge_base_data_source.py delete mode 100644 src/digitalocean_genai_sdk/types/knowledge_bases/api_spaces_data_source.py delete mode 100644 src/digitalocean_genai_sdk/types/knowledge_bases/api_spaces_data_source_param.py delete mode 100644 src/digitalocean_genai_sdk/types/knowledge_bases/api_web_crawler_data_source.py delete mode 100644 src/digitalocean_genai_sdk/types/knowledge_bases/api_web_crawler_data_source_param.py delete mode 100644 src/digitalocean_genai_sdk/types/knowledge_bases/data_source_create_params.py delete mode 100644 src/digitalocean_genai_sdk/types/knowledge_bases/data_source_create_response.py delete mode 100644 src/digitalocean_genai_sdk/types/knowledge_bases/data_source_delete_response.py delete mode 100644 src/digitalocean_genai_sdk/types/knowledge_bases/data_source_list_params.py delete mode 100644 src/digitalocean_genai_sdk/types/knowledge_bases/data_source_list_response.py delete mode 100644 src/digitalocean_genai_sdk/types/providers/anthropic/__init__.py delete mode 100644 src/digitalocean_genai_sdk/types/providers/anthropic/key_create_params.py delete mode 100644 src/digitalocean_genai_sdk/types/providers/anthropic/key_create_response.py delete mode 100644 src/digitalocean_genai_sdk/types/providers/anthropic/key_delete_response.py delete mode 100644 src/digitalocean_genai_sdk/types/providers/anthropic/key_list_agents_params.py delete mode 100644 src/digitalocean_genai_sdk/types/providers/anthropic/key_list_agents_response.py delete mode 100644 src/digitalocean_genai_sdk/types/providers/anthropic/key_list_params.py delete mode 100644 src/digitalocean_genai_sdk/types/providers/anthropic/key_list_response.py delete mode 100644 src/digitalocean_genai_sdk/types/providers/anthropic/key_retrieve_response.py delete mode 100644 src/digitalocean_genai_sdk/types/providers/anthropic/key_update_params.py delete mode 100644 src/digitalocean_genai_sdk/types/providers/anthropic/key_update_response.py delete mode 100644 src/digitalocean_genai_sdk/types/providers/openai/__init__.py delete mode 100644 src/digitalocean_genai_sdk/types/providers/openai/key_create_params.py delete mode 100644 src/digitalocean_genai_sdk/types/providers/openai/key_create_response.py delete mode 100644 src/digitalocean_genai_sdk/types/providers/openai/key_delete_response.py delete mode 100644 src/digitalocean_genai_sdk/types/providers/openai/key_list_params.py delete mode 100644 src/digitalocean_genai_sdk/types/providers/openai/key_list_response.py delete mode 100644 src/digitalocean_genai_sdk/types/providers/openai/key_retrieve_agents_params.py delete mode 100644 src/digitalocean_genai_sdk/types/providers/openai/key_retrieve_agents_response.py delete mode 100644 src/digitalocean_genai_sdk/types/providers/openai/key_retrieve_response.py delete mode 100644 src/digitalocean_genai_sdk/types/providers/openai/key_update_params.py delete mode 100644 src/digitalocean_genai_sdk/types/providers/openai/key_update_response.py delete mode 100644 src/digitalocean_genai_sdk/types/region_list_params.py delete mode 100644 src/digitalocean_genai_sdk/types/region_list_response.py rename src/{digitalocean_genai_sdk => gradientai}/__init__.py (87%) rename src/{digitalocean_genai_sdk => gradientai}/_base_client.py (99%) rename src/{digitalocean_genai_sdk => gradientai}/_client.py (63%) rename src/{digitalocean_genai_sdk => gradientai}/_compat.py (100%) rename src/{digitalocean_genai_sdk => gradientai}/_constants.py (100%) rename src/{digitalocean_genai_sdk => gradientai}/_exceptions.py (97%) rename src/{digitalocean_genai_sdk => gradientai}/_files.py (100%) rename src/{digitalocean_genai_sdk => gradientai}/_models.py (100%) rename src/{digitalocean_genai_sdk => gradientai}/_qs.py (100%) rename src/{digitalocean_genai_sdk => gradientai}/_resource.py (76%) rename src/{digitalocean_genai_sdk => gradientai}/_response.py (98%) rename src/{digitalocean_genai_sdk => gradientai}/_streaming.py (98%) rename src/{digitalocean_genai_sdk => gradientai}/_types.py (99%) rename src/{digitalocean_genai_sdk => gradientai}/_utils/__init__.py (100%) rename src/{digitalocean_genai_sdk => gradientai}/_utils/_logs.py (67%) rename src/{digitalocean_genai_sdk => gradientai}/_utils/_proxy.py (100%) rename src/{digitalocean_genai_sdk => gradientai}/_utils/_reflection.py (100%) create mode 100644 src/gradientai/_utils/_resources_proxy.py rename src/{digitalocean_genai_sdk => gradientai}/_utils/_streams.py (100%) rename src/{digitalocean_genai_sdk => gradientai}/_utils/_sync.py (100%) rename src/{digitalocean_genai_sdk => gradientai}/_utils/_transform.py (100%) rename src/{digitalocean_genai_sdk => gradientai}/_utils/_typing.py (100%) rename src/{digitalocean_genai_sdk => gradientai}/_utils/_utils.py (100%) rename src/{digitalocean_genai_sdk => gradientai}/_version.py (79%) create mode 100644 src/gradientai/lib/.keep rename src/{digitalocean_genai_sdk => gradientai}/py.typed (100%) create mode 100644 src/gradientai/resources/__init__.py rename src/{digitalocean_genai_sdk/resources/auth => gradientai/resources/agents}/__init__.py (53%) rename src/{digitalocean_genai_sdk/resources/auth => gradientai/resources}/agents/agents.py (64%) rename src/{digitalocean_genai_sdk => gradientai}/resources/agents/versions.py (97%) rename src/{digitalocean_genai_sdk => gradientai}/resources/chat.py (98%) rename src/{digitalocean_genai_sdk => gradientai}/resources/embeddings.py (97%) rename src/{digitalocean_genai_sdk => gradientai}/resources/models.py (97%) create mode 100644 src/gradientai/types/__init__.py create mode 100644 src/gradientai/types/agents/__init__.py rename src/{digitalocean_genai_sdk => gradientai}/types/agents/api_links.py (100%) rename src/{digitalocean_genai_sdk => gradientai}/types/agents/api_meta.py (100%) rename src/{digitalocean_genai_sdk => gradientai}/types/agents/version_list_params.py (100%) rename src/{digitalocean_genai_sdk => gradientai}/types/agents/version_list_response.py (100%) rename src/{digitalocean_genai_sdk => gradientai}/types/agents/version_update_params.py (100%) rename src/{digitalocean_genai_sdk => gradientai}/types/agents/version_update_response.py (100%) rename src/{digitalocean_genai_sdk => gradientai}/types/api_deployment_visibility.py (100%) rename src/{digitalocean_genai_sdk/types/auth => gradientai/types/api_keys}/__init__.py (100%) rename src/{digitalocean_genai_sdk => gradientai}/types/api_retrieval_method.py (100%) rename src/{digitalocean_genai_sdk/types/providers => gradientai/types/auth}/__init__.py (100%) rename {tests/api_resources/api_keys => src/gradientai/types/auth/agents}/__init__.py (70%) rename src/{digitalocean_genai_sdk => gradientai}/types/chat_completion_request_message_content_part_text_param.py (100%) rename src/{digitalocean_genai_sdk => gradientai}/types/chat_completion_token_logprob.py (100%) rename src/{digitalocean_genai_sdk => gradientai}/types/chat_create_completion_params.py (100%) rename src/{digitalocean_genai_sdk => gradientai}/types/chat_create_completion_response.py (100%) rename src/{digitalocean_genai_sdk => gradientai}/types/embedding_create_params.py (100%) rename src/{digitalocean_genai_sdk => gradientai}/types/embedding_create_response.py (100%) rename {tests/api_resources/auth => src/gradientai/types/knowledge_bases}/__init__.py (70%) rename src/{digitalocean_genai_sdk => gradientai}/types/model.py (100%) rename src/{digitalocean_genai_sdk => gradientai}/types/model_list_response.py (100%) rename {tests/api_resources/auth/agents => src/gradientai/types/providers}/__init__.py (70%) rename {tests/api_resources/knowledge_bases => src/gradientai/types/providers/anthropic}/__init__.py (70%) create mode 100644 src/gradientai/types/providers/openai/__init__.py delete mode 100644 tests/api_resources/agents/test_api_keys.py delete mode 100644 tests/api_resources/agents/test_child_agents.py delete mode 100644 tests/api_resources/agents/test_functions.py delete mode 100644 tests/api_resources/agents/test_knowledge_bases.py delete mode 100644 tests/api_resources/api_keys/test_api_keys_.py delete mode 100644 tests/api_resources/auth/agents/test_token.py delete mode 100644 tests/api_resources/knowledge_bases/test_data_sources.py delete mode 100644 tests/api_resources/providers/__init__.py delete mode 100644 tests/api_resources/providers/anthropic/__init__.py delete mode 100644 tests/api_resources/providers/anthropic/test_keys.py delete mode 100644 tests/api_resources/providers/openai/__init__.py delete mode 100644 tests/api_resources/providers/openai/test_keys.py delete mode 100644 tests/api_resources/test_agents.py delete mode 100644 tests/api_resources/test_api_keys.py delete mode 100644 tests/api_resources/test_indexing_jobs.py delete mode 100644 tests/api_resources/test_knowledge_bases.py delete mode 100644 tests/api_resources/test_regions.py diff --git a/.github/workflows/create-releases.yml b/.github/workflows/create-releases.yml new file mode 100644 index 00000000..04dac49f --- /dev/null +++ b/.github/workflows/create-releases.yml @@ -0,0 +1,38 @@ +name: Create releases +on: + schedule: + - cron: '0 5 * * *' # every day at 5am UTC + push: + branches: + - main + +jobs: + release: + name: release + if: github.ref == 'refs/heads/main' && github.repository == 'digitalocean/gradientai-python' + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - uses: stainless-api/trigger-release-please@v1 + id: release + with: + repo: ${{ github.event.repository.full_name }} + stainless-api-key: ${{ secrets.STAINLESS_API_KEY }} + + - name: Install Rye + if: ${{ steps.release.outputs.releases_created }} + run: | + curl -sSf https://rye.astral.sh/get | bash + echo "$HOME/.rye/shims" >> $GITHUB_PATH + env: + RYE_VERSION: '0.44.0' + RYE_INSTALL_OPTION: '--yes' + + - name: Publish to PyPI + if: ${{ steps.release.outputs.releases_created }} + run: | + bash ./bin/publish-pypi + env: + PYPI_TOKEN: ${{ secrets.GRADIENT_AI_PYPI_TOKEN || secrets.PYPI_TOKEN }} diff --git a/.github/workflows/publish-pypi.yml b/.github/workflows/publish-pypi.yml index 2bc5b4b2..bff3a970 100644 --- a/.github/workflows/publish-pypi.yml +++ b/.github/workflows/publish-pypi.yml @@ -1,13 +1,9 @@ -# This workflow is triggered when a GitHub release is created. -# It can also be run manually to re-publish to PyPI in case it failed for some reason. -# You can run this workflow by navigating to https://www.github.com/digitalocean/genai-python/actions/workflows/publish-pypi.yml +# workflow for re-running publishing to PyPI in case it fails for some reason +# you can run this workflow by navigating to https://www.github.com/digitalocean/gradientai-python/actions/workflows/publish-pypi.yml name: Publish PyPI on: workflow_dispatch: - release: - types: [published] - jobs: publish: name: publish @@ -28,4 +24,4 @@ jobs: run: | bash ./bin/publish-pypi env: - PYPI_TOKEN: ${{ secrets.DIGITALOCEAN_GENAI_SDK_PYPI_TOKEN || secrets.PYPI_TOKEN }} + PYPI_TOKEN: ${{ secrets.GRADIENT_AI_PYPI_TOKEN || secrets.PYPI_TOKEN }} diff --git a/.github/workflows/release-doctor.yml b/.github/workflows/release-doctor.yml index 0f23cbc4..94e02117 100644 --- a/.github/workflows/release-doctor.yml +++ b/.github/workflows/release-doctor.yml @@ -9,7 +9,7 @@ jobs: release_doctor: name: release doctor runs-on: ubuntu-latest - if: github.repository == 'digitalocean/genai-python' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch' || startsWith(github.head_ref, 'release-please') || github.head_ref == 'next') + if: github.repository == 'digitalocean/gradientai-python' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch' || startsWith(github.head_ref, 'release-please') || github.head_ref == 'next') steps: - uses: actions/checkout@v4 @@ -18,4 +18,5 @@ jobs: run: | bash ./bin/check-release-environment env: - PYPI_TOKEN: ${{ secrets.DIGITALOCEAN_GENAI_SDK_PYPI_TOKEN || secrets.PYPI_TOKEN }} + STAINLESS_API_KEY: ${{ secrets.STAINLESS_API_KEY }} + PYPI_TOKEN: ${{ secrets.GRADIENT_AI_PYPI_TOKEN || secrets.PYPI_TOKEN }} diff --git a/.stats.yml b/.stats.yml index eb8f1c2d..652e9eac 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 60 +configured_endpoints: 6 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-17838dec38ee8475c4bf4695b8dc70fe42a8f4da8ae9ffd415dc895b6628a952.yml openapi_spec_hash: cfe5453e150989c8a9dbc9d7b4d1f76a -config_hash: 565bf6264bdf2a317cc5e2f02d02a702 +config_hash: 69dc66269416b2e01e8852b5a6788b97 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7d5d60a7..086907ef 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -36,7 +36,7 @@ $ pip install -r requirements-dev.lock Most of the SDK is generated code. Modifications to code will be persisted between generations, but may result in merge conflicts between manual patches and changes from the generator. The generator will never -modify the contents of the `src/digitalocean_genai_sdk/lib/` and `examples/` directories. +modify the contents of the `src/gradientai/lib/` and `examples/` directories. ## Adding and running examples @@ -62,7 +62,7 @@ If you’d like to use the repository from source, you can either install from g To install via git: ```sh -$ pip install git+ssh://git@github.com/digitalocean/genai-python.git +$ pip install git+ssh://git@github.com/digitalocean/gradientai-python.git ``` Alternatively, you can build from source and install the wheel file: @@ -120,7 +120,7 @@ the changes aren't made through the automated pipeline, you may want to make rel ### Publish with a GitHub workflow -You can release to package managers by using [the `Publish PyPI` GitHub action](https://www.github.com/digitalocean/genai-python/actions/workflows/publish-pypi.yml). This requires a setup organization or repository secret to be set up. +You can release to package managers by using [the `Publish PyPI` GitHub action](https://www.github.com/digitalocean/gradientai-python/actions/workflows/publish-pypi.yml). This requires a setup organization or repository secret to be set up. ### Publish manually diff --git a/LICENSE b/LICENSE index 0c1fe1d5..974cb08a 100644 --- a/LICENSE +++ b/LICENSE @@ -186,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2025 Digitalocean Genai SDK + Copyright 2025 Gradient AI Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/README.md b/README.md index 7b7f4731..a6757d3a 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,8 @@ -# Digitalocean Genai SDK Python API library +# Gradient AI Python API library [![PyPI version](https://img.shields.io/pypi/v/c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python.svg)](https://pypi.org/project/c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python/) -The Digitalocean Genai SDK Python library provides convenient access to the Digitalocean Genai SDK REST API from any Python 3.8+ +The Gradient AI Python library provides convenient access to the Gradient AI REST API from any Python 3.8+ application. The library includes type definitions for all request params and response fields, and offers both synchronous and asynchronous clients powered by [httpx](https://github.com/encode/httpx). @@ -25,9 +25,9 @@ The full API of this library can be found in [api.md](api.md). ```python import os -from digitalocean_genai_sdk import DigitaloceanGenaiSDK +from gradientai import GradientAI -client = DigitaloceanGenaiSDK( +client = GradientAI( api_key=os.environ.get( "DIGITALOCEAN_GENAI_SDK_API_KEY" ), # This is the default and can be omitted @@ -46,14 +46,14 @@ so that your API Key is not stored in source control. ## Async usage -Simply import `AsyncDigitaloceanGenaiSDK` instead of `DigitaloceanGenaiSDK` and use `await` with each API call: +Simply import `AsyncGradientAI` instead of `GradientAI` and use `await` with each API call: ```python import os import asyncio -from digitalocean_genai_sdk import AsyncDigitaloceanGenaiSDK +from gradientai import AsyncGradientAI -client = AsyncDigitaloceanGenaiSDK( +client = AsyncGradientAI( api_key=os.environ.get( "DIGITALOCEAN_GENAI_SDK_API_KEY" ), # This is the default and can be omitted @@ -86,42 +86,48 @@ Typed requests and responses provide autocomplete and documentation within your Nested parameters are dictionaries, typed using `TypedDict`, for example: ```python -from digitalocean_genai_sdk import DigitaloceanGenaiSDK - -client = DigitaloceanGenaiSDK() - -data_source = client.knowledge_bases.data_sources.create( - path_knowledge_base_uuid="knowledge_base_uuid", - aws_data_source={}, +from gradientai import GradientAI + +client = GradientAI() + +response = client.chat.create_completion( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + stream_options={}, ) -print(data_source.aws_data_source) +print(response.stream_options) ``` ## Handling errors -When the library is unable to connect to the API (for example, due to network connection problems or a timeout), a subclass of `digitalocean_genai_sdk.APIConnectionError` is raised. +When the library is unable to connect to the API (for example, due to network connection problems or a timeout), a subclass of `gradientai.APIConnectionError` is raised. When the API returns a non-success status code (that is, 4xx or 5xx -response), a subclass of `digitalocean_genai_sdk.APIStatusError` is raised, containing `status_code` and `response` properties. +response), a subclass of `gradientai.APIStatusError` is raised, containing `status_code` and `response` properties. -All errors inherit from `digitalocean_genai_sdk.APIError`. +All errors inherit from `gradientai.APIError`. ```python -import digitalocean_genai_sdk -from digitalocean_genai_sdk import DigitaloceanGenaiSDK +import gradientai +from gradientai import GradientAI -client = DigitaloceanGenaiSDK() +client = GradientAI() try: client.agents.versions.list( uuid="REPLACE_ME", ) -except digitalocean_genai_sdk.APIConnectionError as e: +except gradientai.APIConnectionError as e: print("The server could not be reached") print(e.__cause__) # an underlying Exception, likely raised within httpx. -except digitalocean_genai_sdk.RateLimitError as e: +except gradientai.RateLimitError as e: print("A 429 status code was received; we should back off a bit.") -except digitalocean_genai_sdk.APIStatusError as e: +except gradientai.APIStatusError as e: print("Another non-200-range status code was received") print(e.status_code) print(e.response) @@ -149,10 +155,10 @@ Connection errors (for example, due to a network connectivity problem), 408 Requ You can use the `max_retries` option to configure or disable retry settings: ```python -from digitalocean_genai_sdk import DigitaloceanGenaiSDK +from gradientai import GradientAI # Configure the default for all requests: -client = DigitaloceanGenaiSDK( +client = GradientAI( # default is 2 max_retries=0, ) @@ -169,16 +175,16 @@ By default requests time out after 1 minute. You can configure this with a `time which accepts a float or an [`httpx.Timeout`](https://www.python-httpx.org/advanced/#fine-tuning-the-configuration) object: ```python -from digitalocean_genai_sdk import DigitaloceanGenaiSDK +from gradientai import GradientAI # Configure the default for all requests: -client = DigitaloceanGenaiSDK( +client = GradientAI( # 20 seconds (default is 1 minute) timeout=20.0, ) # More granular control: -client = DigitaloceanGenaiSDK( +client = GradientAI( timeout=httpx.Timeout(60.0, read=5.0, write=10.0, connect=2.0), ) @@ -198,10 +204,10 @@ Note that requests that time out are [retried twice by default](#retries). We use the standard library [`logging`](https://docs.python.org/3/library/logging.html) module. -You can enable logging by setting the environment variable `DIGITALOCEAN_GENAI_SDK_LOG` to `info`. +You can enable logging by setting the environment variable `GRADIENT_AI_LOG` to `info`. ```shell -$ export DIGITALOCEAN_GENAI_SDK_LOG=info +$ export GRADIENT_AI_LOG=info ``` Or to `debug` for more verbose logging. @@ -223,9 +229,9 @@ if response.my_field is None: The "raw" Response object can be accessed by prefixing `.with_raw_response.` to any HTTP method call, e.g., ```py -from digitalocean_genai_sdk import DigitaloceanGenaiSDK +from gradientai import GradientAI -client = DigitaloceanGenaiSDK() +client = GradientAI() response = client.agents.versions.with_raw_response.list( uuid="REPLACE_ME", ) @@ -235,9 +241,9 @@ version = response.parse() # get the object that `agents.versions.list()` would print(version.agent_versions) ``` -These methods return an [`APIResponse`](https://github.com/digitalocean/genai-python/tree/main/src/digitalocean_genai_sdk/_response.py) object. +These methods return an [`APIResponse`](https://github.com/digitalocean/gradientai-python/tree/main/src/gradientai/_response.py) object. -The async client returns an [`AsyncAPIResponse`](https://github.com/digitalocean/genai-python/tree/main/src/digitalocean_genai_sdk/_response.py) with the same structure, the only difference being `await`able methods for reading the response content. +The async client returns an [`AsyncAPIResponse`](https://github.com/digitalocean/gradientai-python/tree/main/src/gradientai/_response.py) with the same structure, the only difference being `await`able methods for reading the response content. #### `.with_streaming_response` @@ -301,10 +307,10 @@ You can directly override the [httpx client](https://www.python-httpx.org/api/#c ```python import httpx -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, DefaultHttpxClient +from gradientai import GradientAI, DefaultHttpxClient -client = DigitaloceanGenaiSDK( - # Or use the `DIGITALOCEAN_GENAI_SDK_BASE_URL` env var +client = GradientAI( + # Or use the `GRADIENT_AI_BASE_URL` env var base_url="http://my.test.server.example.com:8083", http_client=DefaultHttpxClient( proxy="http://my.test.proxy.example.com", @@ -324,9 +330,9 @@ client.with_options(http_client=DefaultHttpxClient(...)) By default the library closes underlying HTTP connections whenever the client is [garbage collected](https://docs.python.org/3/reference/datamodel.html#object.__del__). You can manually close the client using the `.close()` method if desired, or with a context manager that closes when exiting. ```py -from digitalocean_genai_sdk import DigitaloceanGenaiSDK +from gradientai import GradientAI -with DigitaloceanGenaiSDK() as client: +with GradientAI() as client: # make requests here ... @@ -343,7 +349,7 @@ This package generally follows [SemVer](https://semver.org/spec/v2.0.0.html) con We take backwards-compatibility seriously and work hard to ensure you can rely on a smooth upgrade experience. -We are keen for your feedback; please open an [issue](https://www.github.com/digitalocean/genai-python/issues) with questions, bugs, or suggestions. +We are keen for your feedback; please open an [issue](https://www.github.com/digitalocean/gradientai-python/issues) with questions, bugs, or suggestions. ### Determining the installed version @@ -352,8 +358,8 @@ If you've upgraded to the latest version but aren't seeing any new features you You can determine the version that is being used at runtime with: ```py -import digitalocean_genai_sdk -print(digitalocean_genai_sdk.__version__) +import gradientai +print(gradientai.__version__) ``` ## Requirements diff --git a/SECURITY.md b/SECURITY.md index d08f7996..a7593759 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -16,7 +16,7 @@ before making any information public. ## Reporting Non-SDK Related Security Issues If you encounter security issues that are not directly related to SDKs but pertain to the services -or products provided by Digitalocean Genai SDK, please follow the respective company's security reporting guidelines. +or products provided by Gradient AI, please follow the respective company's security reporting guidelines. --- diff --git a/api.md b/api.md index 32dbe7df..d05dac3c 100644 --- a/api.md +++ b/api.md @@ -3,7 +3,7 @@ Types: ```python -from digitalocean_genai_sdk.types import ( +from gradientai.types import ( APIAgent, APIAgentAPIKeyInfo, APIAnthropicAPIKeyInfo, @@ -11,310 +11,81 @@ from digitalocean_genai_sdk.types import ( APIModel, APIOpenAIAPIKeyInfo, APIRetrievalMethod, - AgentCreateResponse, - AgentRetrieveResponse, - AgentUpdateResponse, - AgentListResponse, - AgentDeleteResponse, - AgentUpdateStatusResponse, ) ``` -Methods: - -- client.agents.create(\*\*params) -> AgentCreateResponse -- client.agents.retrieve(uuid) -> AgentRetrieveResponse -- client.agents.update(path_uuid, \*\*params) -> AgentUpdateResponse -- client.agents.list(\*\*params) -> AgentListResponse -- client.agents.delete(uuid) -> AgentDeleteResponse -- client.agents.update_status(path_uuid, \*\*params) -> AgentUpdateStatusResponse - -## APIKeys - -Types: - -```python -from digitalocean_genai_sdk.types.agents import ( - APIKeyCreateResponse, - APIKeyUpdateResponse, - APIKeyListResponse, - APIKeyDeleteResponse, - APIKeyRegenerateResponse, -) -``` - -Methods: - -- client.agents.api_keys.create(path_agent_uuid, \*\*params) -> APIKeyCreateResponse -- client.agents.api_keys.update(path_api_key_uuid, \*, path_agent_uuid, \*\*params) -> APIKeyUpdateResponse -- client.agents.api_keys.list(agent_uuid, \*\*params) -> APIKeyListResponse -- client.agents.api_keys.delete(api_key_uuid, \*, agent_uuid) -> APIKeyDeleteResponse -- client.agents.api_keys.regenerate(api_key_uuid, \*, agent_uuid) -> APIKeyRegenerateResponse - -## Functions - -Types: - -```python -from digitalocean_genai_sdk.types.agents import ( - FunctionCreateResponse, - FunctionUpdateResponse, - FunctionDeleteResponse, -) -``` - -Methods: - -- client.agents.functions.create(path_agent_uuid, \*\*params) -> FunctionCreateResponse -- client.agents.functions.update(path_function_uuid, \*, path_agent_uuid, \*\*params) -> FunctionUpdateResponse -- client.agents.functions.delete(function_uuid, \*, agent_uuid) -> FunctionDeleteResponse - ## Versions Types: ```python -from digitalocean_genai_sdk.types.agents import ( - APILinks, - APIMeta, - VersionUpdateResponse, - VersionListResponse, -) +from gradientai.types.agents import APILinks, APIMeta, VersionUpdateResponse, VersionListResponse ``` Methods: -- client.agents.versions.update(path_uuid, \*\*params) -> VersionUpdateResponse -- client.agents.versions.list(uuid, \*\*params) -> VersionListResponse +- client.agents.versions.update(path_uuid, \*\*params) -> VersionUpdateResponse +- client.agents.versions.list(uuid, \*\*params) -> VersionListResponse ## KnowledgeBases Types: ```python -from digitalocean_genai_sdk.types.agents import ( - APILinkKnowledgeBaseOutput, - KnowledgeBaseDetachResponse, -) -``` - -Methods: - -- client.agents.knowledge_bases.attach(agent_uuid) -> APILinkKnowledgeBaseOutput -- client.agents.knowledge_bases.attach_single(knowledge_base_uuid, \*, agent_uuid) -> APILinkKnowledgeBaseOutput -- client.agents.knowledge_bases.detach(knowledge_base_uuid, \*, agent_uuid) -> KnowledgeBaseDetachResponse - -## ChildAgents - -Types: - -```python -from digitalocean_genai_sdk.types.agents import ( - ChildAgentUpdateResponse, - ChildAgentDeleteResponse, - ChildAgentAddResponse, - ChildAgentViewResponse, -) -``` - -Methods: - -- client.agents.child_agents.update(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> ChildAgentUpdateResponse -- client.agents.child_agents.delete(child_agent_uuid, \*, parent_agent_uuid) -> ChildAgentDeleteResponse -- client.agents.child_agents.add(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> ChildAgentAddResponse -- client.agents.child_agents.view(uuid) -> ChildAgentViewResponse - -# Providers - -## Anthropic - -### Keys - -Types: - -```python -from digitalocean_genai_sdk.types.providers.anthropic import ( - KeyCreateResponse, - KeyRetrieveResponse, - KeyUpdateResponse, - KeyListResponse, - KeyDeleteResponse, - KeyListAgentsResponse, -) -``` - -Methods: - -- client.providers.anthropic.keys.create(\*\*params) -> KeyCreateResponse -- client.providers.anthropic.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse -- client.providers.anthropic.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse -- client.providers.anthropic.keys.list(\*\*params) -> KeyListResponse -- client.providers.anthropic.keys.delete(api_key_uuid) -> KeyDeleteResponse -- client.providers.anthropic.keys.list_agents(uuid, \*\*params) -> KeyListAgentsResponse - -## OpenAI - -### Keys - -Types: - -```python -from digitalocean_genai_sdk.types.providers.openai import ( - KeyCreateResponse, - KeyRetrieveResponse, - KeyUpdateResponse, - KeyListResponse, - KeyDeleteResponse, - KeyRetrieveAgentsResponse, -) -``` - -Methods: - -- client.providers.openai.keys.create(\*\*params) -> KeyCreateResponse -- client.providers.openai.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse -- client.providers.openai.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse -- client.providers.openai.keys.list(\*\*params) -> KeyListResponse -- client.providers.openai.keys.delete(api_key_uuid) -> KeyDeleteResponse -- client.providers.openai.keys.retrieve_agents(uuid, \*\*params) -> KeyRetrieveAgentsResponse - -# Auth - -## Agents - -### Token - -Types: - -```python -from digitalocean_genai_sdk.types.auth.agents import TokenCreateResponse -``` - -Methods: - -- client.auth.agents.token.create(path_agent_uuid, \*\*params) -> TokenCreateResponse - -# Regions - -Types: - -```python -from digitalocean_genai_sdk.types import RegionListResponse +from gradientai.types.agents import APILinkKnowledgeBaseOutput ``` -Methods: - -- client.regions.list(\*\*params) -> RegionListResponse - # IndexingJobs Types: ```python -from digitalocean_genai_sdk.types import ( - APIIndexingJob, - IndexingJobCreateResponse, - IndexingJobRetrieveResponse, - IndexingJobListResponse, - IndexingJobRetrieveDataSourcesResponse, - IndexingJobUpdateCancelResponse, -) +from gradientai.types import APIIndexingJob ``` -Methods: - -- client.indexing_jobs.create(\*\*params) -> IndexingJobCreateResponse -- client.indexing_jobs.retrieve(uuid) -> IndexingJobRetrieveResponse -- client.indexing_jobs.list(\*\*params) -> IndexingJobListResponse -- client.indexing_jobs.retrieve_data_sources(indexing_job_uuid) -> IndexingJobRetrieveDataSourcesResponse -- client.indexing_jobs.update_cancel(path_uuid, \*\*params) -> IndexingJobUpdateCancelResponse - # KnowledgeBases Types: ```python -from digitalocean_genai_sdk.types import ( - APIKnowledgeBase, - KnowledgeBaseCreateResponse, - KnowledgeBaseRetrieveResponse, - KnowledgeBaseUpdateResponse, - KnowledgeBaseListResponse, - KnowledgeBaseDeleteResponse, -) +from gradientai.types import APIKnowledgeBase ``` -Methods: - -- client.knowledge_bases.create(\*\*params) -> KnowledgeBaseCreateResponse -- client.knowledge_bases.retrieve(uuid) -> KnowledgeBaseRetrieveResponse -- client.knowledge_bases.update(path_uuid, \*\*params) -> KnowledgeBaseUpdateResponse -- client.knowledge_bases.list(\*\*params) -> KnowledgeBaseListResponse -- client.knowledge_bases.delete(uuid) -> KnowledgeBaseDeleteResponse - ## DataSources Types: ```python -from digitalocean_genai_sdk.types.knowledge_bases import ( +from gradientai.types.knowledge_bases import ( APIFileUploadDataSource, APIKnowledgeBaseDataSource, APISpacesDataSource, APIWebCrawlerDataSource, - DataSourceCreateResponse, - DataSourceListResponse, - DataSourceDeleteResponse, ) ``` -Methods: - -- client.knowledge_bases.data_sources.create(path_knowledge_base_uuid, \*\*params) -> DataSourceCreateResponse -- client.knowledge_bases.data_sources.list(knowledge_base_uuid, \*\*params) -> DataSourceListResponse -- client.knowledge_bases.data_sources.delete(data_source_uuid, \*, knowledge_base_uuid) -> DataSourceDeleteResponse - # APIKeys Types: ```python -from digitalocean_genai_sdk.types import APIAgreement, APIModelVersion, APIKeyListResponse +from gradientai.types import APIAgreement, APIModelVersion ``` -Methods: - -- client.api_keys.list(\*\*params) -> APIKeyListResponse - ## APIKeys Types: ```python -from digitalocean_genai_sdk.types.api_keys import ( - APIModelAPIKeyInfo, - APIKeyCreateResponse, - APIKeyUpdateResponse, - APIKeyListResponse, - APIKeyDeleteResponse, - APIKeyUpdateRegenerateResponse, -) +from gradientai.types.api_keys import APIModelAPIKeyInfo ``` -Methods: - -- client.api*keys.api_keys.create(\*\*params) -> APIKeyCreateResponse -- client.api*keys.api_keys.update(path_api_key_uuid, \*\*params) -> APIKeyUpdateResponse -- client.api*keys.api_keys.list(\*\*params) -> APIKeyListResponse -- client.api*keys.api_keys.delete(api_key_uuid) -> APIKeyDeleteResponse -- client.api*keys.api_keys.update_regenerate(api_key_uuid) -> APIKeyUpdateRegenerateResponse - # Chat Types: ```python -from digitalocean_genai_sdk.types import ( +from gradientai.types import ( ChatCompletionRequestMessageContentPartText, ChatCompletionTokenLogprob, ChatCreateCompletionResponse, @@ -323,29 +94,29 @@ from digitalocean_genai_sdk.types import ( Methods: -- client.chat.create_completion(\*\*params) -> ChatCreateCompletionResponse +- client.chat.create_completion(\*\*params) -> ChatCreateCompletionResponse # Embeddings Types: ```python -from digitalocean_genai_sdk.types import EmbeddingCreateResponse +from gradientai.types import EmbeddingCreateResponse ``` Methods: -- client.embeddings.create(\*\*params) -> EmbeddingCreateResponse +- client.embeddings.create(\*\*params) -> EmbeddingCreateResponse # Models Types: ```python -from digitalocean_genai_sdk.types import Model, ModelListResponse +from gradientai.types import Model, ModelListResponse ``` Methods: -- client.models.retrieve(model) -> Model -- client.models.list() -> ModelListResponse +- client.models.retrieve(model) -> Model +- client.models.list() -> ModelListResponse diff --git a/bin/check-release-environment b/bin/check-release-environment index 9e89a88a..78967e8b 100644 --- a/bin/check-release-environment +++ b/bin/check-release-environment @@ -2,8 +2,12 @@ errors=() +if [ -z "${STAINLESS_API_KEY}" ]; then + errors+=("The STAINLESS_API_KEY secret has not been set. Please contact Stainless for an API key & set it in your organization secrets on GitHub.") +fi + if [ -z "${PYPI_TOKEN}" ]; then - errors+=("The DIGITALOCEAN_GENAI_SDK_PYPI_TOKEN secret has not been set. Please set it in either this repository's secrets or your organization secrets.") + errors+=("The GRADIENT_AI_PYPI_TOKEN secret has not been set. Please set it in either this repository's secrets or your organization secrets.") fi lenErrors=${#errors[@]} diff --git a/mypy.ini b/mypy.ini index 54f4282a..748d8234 100644 --- a/mypy.ini +++ b/mypy.ini @@ -8,7 +8,7 @@ show_error_codes = True # # We also exclude our `tests` as mypy doesn't always infer # types correctly and Pyright will still catch any type errors. -exclude = ^(src/digitalocean_genai_sdk/_files\.py|_dev/.*\.py|tests/.*)$ +exclude = ^(src/gradientai/_files\.py|_dev/.*\.py|tests/.*)$ strict_equality = True implicit_reexport = True diff --git a/pyproject.toml b/pyproject.toml index 03329f8b..9c6fdd19 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,11 +1,11 @@ [project] name = "c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python" version = "0.1.0-alpha.3" -description = "The official Python library for the digitalocean-genai-sdk API" +description = "The official Python library for the GradientAI API" dynamic = ["readme"] license = "Apache-2.0" authors = [ -{ name = "Digitalocean Genai SDK", email = "" }, +{ name = "Gradient AI", email = "" }, ] dependencies = [ "httpx>=0.23.0, <1", @@ -34,8 +34,8 @@ classifiers = [ ] [project.urls] -Homepage = "https://github.com/digitalocean/genai-python" -Repository = "https://github.com/digitalocean/genai-python" +Homepage = "https://github.com/digitalocean/gradientai-python" +Repository = "https://github.com/digitalocean/gradientai-python" [tool.rye] @@ -76,14 +76,14 @@ format = { chain = [ "check:ruff" = "ruff check ." "fix:ruff" = "ruff check --fix ." -"check:importable" = "python -c 'import digitalocean_genai_sdk'" +"check:importable" = "python -c 'import gradientai'" typecheck = { chain = [ "typecheck:pyright", "typecheck:mypy" ]} "typecheck:pyright" = "pyright" -"typecheck:verify-types" = "pyright --verifytypes digitalocean_genai_sdk --ignoreexternal" +"typecheck:verify-types" = "pyright --verifytypes gradientai --ignoreexternal" "typecheck:mypy" = "mypy ." [build-system] @@ -96,7 +96,7 @@ include = [ ] [tool.hatch.build.targets.wheel] -packages = ["src/digitalocean_genai_sdk"] +packages = ["src/gradientai"] [tool.hatch.build.targets.sdist] # Basically everything except hidden files/directories (such as .github, .devcontainers, .python-version, etc) @@ -122,7 +122,7 @@ path = "README.md" [[tool.hatch.metadata.hooks.fancy-pypi-readme.substitutions]] # replace relative links with absolute links pattern = '\[(.+?)\]\(((?!https?://)\S+?)\)' -replacement = '[\1](https://github.com/digitalocean/genai-python/tree/main/\g<2>)' +replacement = '[\1](https://github.com/digitalocean/gradientai-python/tree/main/\g<2>)' [tool.pytest.ini_options] testpaths = ["tests"] @@ -199,7 +199,7 @@ length-sort = true length-sort-straight = true combine-as-imports = true extra-standard-library = ["typing_extensions"] -known-first-party = ["digitalocean_genai_sdk", "tests"] +known-first-party = ["gradientai", "tests"] [tool.ruff.lint.per-file-ignores] "bin/**.py" = ["T201", "T203"] diff --git a/release-please-config.json b/release-please-config.json index 234b9475..2ff9a58c 100644 --- a/release-please-config.json +++ b/release-please-config.json @@ -61,6 +61,6 @@ ], "release-type": "python", "extra-files": [ - "src/digitalocean_genai_sdk/_version.py" + "src/gradientai/_version.py" ] } \ No newline at end of file diff --git a/scripts/lint b/scripts/lint index 3f725f2d..37b38f6f 100755 --- a/scripts/lint +++ b/scripts/lint @@ -8,4 +8,4 @@ echo "==> Running lints" rye run lint echo "==> Making sure it imports" -rye run python -c 'import digitalocean_genai_sdk' +rye run python -c 'import gradientai' diff --git a/src/digitalocean_genai_sdk/_utils/_resources_proxy.py b/src/digitalocean_genai_sdk/_utils/_resources_proxy.py deleted file mode 100644 index 4ebaf7a4..00000000 --- a/src/digitalocean_genai_sdk/_utils/_resources_proxy.py +++ /dev/null @@ -1,24 +0,0 @@ -from __future__ import annotations - -from typing import Any -from typing_extensions import override - -from ._proxy import LazyProxy - - -class ResourcesProxy(LazyProxy[Any]): - """A proxy for the `digitalocean_genai_sdk.resources` module. - - This is used so that we can lazily import `digitalocean_genai_sdk.resources` only when - needed *and* so that users can just import `digitalocean_genai_sdk` and reference `digitalocean_genai_sdk.resources` - """ - - @override - def __load__(self) -> Any: - import importlib - - mod = importlib.import_module("digitalocean_genai_sdk.resources") - return mod - - -resources = ResourcesProxy().__as_proxied__() diff --git a/src/digitalocean_genai_sdk/resources/__init__.py b/src/digitalocean_genai_sdk/resources/__init__.py deleted file mode 100644 index 6dcbff02..00000000 --- a/src/digitalocean_genai_sdk/resources/__init__.py +++ /dev/null @@ -1,145 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .auth import ( - AuthResource, - AsyncAuthResource, - AuthResourceWithRawResponse, - AsyncAuthResourceWithRawResponse, - AuthResourceWithStreamingResponse, - AsyncAuthResourceWithStreamingResponse, -) -from .chat import ( - ChatResource, - AsyncChatResource, - ChatResourceWithRawResponse, - AsyncChatResourceWithRawResponse, - ChatResourceWithStreamingResponse, - AsyncChatResourceWithStreamingResponse, -) -from .agents import ( - AgentsResource, - AsyncAgentsResource, - AgentsResourceWithRawResponse, - AsyncAgentsResourceWithRawResponse, - AgentsResourceWithStreamingResponse, - AsyncAgentsResourceWithStreamingResponse, -) -from .models import ( - ModelsResource, - AsyncModelsResource, - ModelsResourceWithRawResponse, - AsyncModelsResourceWithRawResponse, - ModelsResourceWithStreamingResponse, - AsyncModelsResourceWithStreamingResponse, -) -from .regions import ( - RegionsResource, - AsyncRegionsResource, - RegionsResourceWithRawResponse, - AsyncRegionsResourceWithRawResponse, - RegionsResourceWithStreamingResponse, - AsyncRegionsResourceWithStreamingResponse, -) -from .api_keys import ( - APIKeysResource, - AsyncAPIKeysResource, - APIKeysResourceWithRawResponse, - AsyncAPIKeysResourceWithRawResponse, - APIKeysResourceWithStreamingResponse, - AsyncAPIKeysResourceWithStreamingResponse, -) -from .providers import ( - ProvidersResource, - AsyncProvidersResource, - ProvidersResourceWithRawResponse, - AsyncProvidersResourceWithRawResponse, - ProvidersResourceWithStreamingResponse, - AsyncProvidersResourceWithStreamingResponse, -) -from .embeddings import ( - EmbeddingsResource, - AsyncEmbeddingsResource, - EmbeddingsResourceWithRawResponse, - AsyncEmbeddingsResourceWithRawResponse, - EmbeddingsResourceWithStreamingResponse, - AsyncEmbeddingsResourceWithStreamingResponse, -) -from .indexing_jobs import ( - IndexingJobsResource, - AsyncIndexingJobsResource, - IndexingJobsResourceWithRawResponse, - AsyncIndexingJobsResourceWithRawResponse, - IndexingJobsResourceWithStreamingResponse, - AsyncIndexingJobsResourceWithStreamingResponse, -) -from .knowledge_bases import ( - KnowledgeBasesResource, - AsyncKnowledgeBasesResource, - KnowledgeBasesResourceWithRawResponse, - AsyncKnowledgeBasesResourceWithRawResponse, - KnowledgeBasesResourceWithStreamingResponse, - AsyncKnowledgeBasesResourceWithStreamingResponse, -) - -__all__ = [ - "AgentsResource", - "AsyncAgentsResource", - "AgentsResourceWithRawResponse", - "AsyncAgentsResourceWithRawResponse", - "AgentsResourceWithStreamingResponse", - "AsyncAgentsResourceWithStreamingResponse", - "ProvidersResource", - "AsyncProvidersResource", - "ProvidersResourceWithRawResponse", - "AsyncProvidersResourceWithRawResponse", - "ProvidersResourceWithStreamingResponse", - "AsyncProvidersResourceWithStreamingResponse", - "AuthResource", - "AsyncAuthResource", - "AuthResourceWithRawResponse", - "AsyncAuthResourceWithRawResponse", - "AuthResourceWithStreamingResponse", - "AsyncAuthResourceWithStreamingResponse", - "RegionsResource", - "AsyncRegionsResource", - "RegionsResourceWithRawResponse", - "AsyncRegionsResourceWithRawResponse", - "RegionsResourceWithStreamingResponse", - "AsyncRegionsResourceWithStreamingResponse", - "IndexingJobsResource", - "AsyncIndexingJobsResource", - "IndexingJobsResourceWithRawResponse", - "AsyncIndexingJobsResourceWithRawResponse", - "IndexingJobsResourceWithStreamingResponse", - "AsyncIndexingJobsResourceWithStreamingResponse", - "KnowledgeBasesResource", - "AsyncKnowledgeBasesResource", - "KnowledgeBasesResourceWithRawResponse", - "AsyncKnowledgeBasesResourceWithRawResponse", - "KnowledgeBasesResourceWithStreamingResponse", - "AsyncKnowledgeBasesResourceWithStreamingResponse", - "APIKeysResource", - "AsyncAPIKeysResource", - "APIKeysResourceWithRawResponse", - "AsyncAPIKeysResourceWithRawResponse", - "APIKeysResourceWithStreamingResponse", - "AsyncAPIKeysResourceWithStreamingResponse", - "ChatResource", - "AsyncChatResource", - "ChatResourceWithRawResponse", - "AsyncChatResourceWithRawResponse", - "ChatResourceWithStreamingResponse", - "AsyncChatResourceWithStreamingResponse", - "EmbeddingsResource", - "AsyncEmbeddingsResource", - "EmbeddingsResourceWithRawResponse", - "AsyncEmbeddingsResourceWithRawResponse", - "EmbeddingsResourceWithStreamingResponse", - "AsyncEmbeddingsResourceWithStreamingResponse", - "ModelsResource", - "AsyncModelsResource", - "ModelsResourceWithRawResponse", - "AsyncModelsResourceWithRawResponse", - "ModelsResourceWithStreamingResponse", - "AsyncModelsResourceWithStreamingResponse", -] diff --git a/src/digitalocean_genai_sdk/resources/agents/__init__.py b/src/digitalocean_genai_sdk/resources/agents/__init__.py deleted file mode 100644 index f41a0408..00000000 --- a/src/digitalocean_genai_sdk/resources/agents/__init__.py +++ /dev/null @@ -1,89 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .agents import ( - AgentsResource, - AsyncAgentsResource, - AgentsResourceWithRawResponse, - AsyncAgentsResourceWithRawResponse, - AgentsResourceWithStreamingResponse, - AsyncAgentsResourceWithStreamingResponse, -) -from .api_keys import ( - APIKeysResource, - AsyncAPIKeysResource, - APIKeysResourceWithRawResponse, - AsyncAPIKeysResourceWithRawResponse, - APIKeysResourceWithStreamingResponse, - AsyncAPIKeysResourceWithStreamingResponse, -) -from .versions import ( - VersionsResource, - AsyncVersionsResource, - VersionsResourceWithRawResponse, - AsyncVersionsResourceWithRawResponse, - VersionsResourceWithStreamingResponse, - AsyncVersionsResourceWithStreamingResponse, -) -from .functions import ( - FunctionsResource, - AsyncFunctionsResource, - FunctionsResourceWithRawResponse, - AsyncFunctionsResourceWithRawResponse, - FunctionsResourceWithStreamingResponse, - AsyncFunctionsResourceWithStreamingResponse, -) -from .child_agents import ( - ChildAgentsResource, - AsyncChildAgentsResource, - ChildAgentsResourceWithRawResponse, - AsyncChildAgentsResourceWithRawResponse, - ChildAgentsResourceWithStreamingResponse, - AsyncChildAgentsResourceWithStreamingResponse, -) -from .knowledge_bases import ( - KnowledgeBasesResource, - AsyncKnowledgeBasesResource, - KnowledgeBasesResourceWithRawResponse, - AsyncKnowledgeBasesResourceWithRawResponse, - KnowledgeBasesResourceWithStreamingResponse, - AsyncKnowledgeBasesResourceWithStreamingResponse, -) - -__all__ = [ - "APIKeysResource", - "AsyncAPIKeysResource", - "APIKeysResourceWithRawResponse", - "AsyncAPIKeysResourceWithRawResponse", - "APIKeysResourceWithStreamingResponse", - "AsyncAPIKeysResourceWithStreamingResponse", - "FunctionsResource", - "AsyncFunctionsResource", - "FunctionsResourceWithRawResponse", - "AsyncFunctionsResourceWithRawResponse", - "FunctionsResourceWithStreamingResponse", - "AsyncFunctionsResourceWithStreamingResponse", - "VersionsResource", - "AsyncVersionsResource", - "VersionsResourceWithRawResponse", - "AsyncVersionsResourceWithRawResponse", - "VersionsResourceWithStreamingResponse", - "AsyncVersionsResourceWithStreamingResponse", - "KnowledgeBasesResource", - "AsyncKnowledgeBasesResource", - "KnowledgeBasesResourceWithRawResponse", - "AsyncKnowledgeBasesResourceWithRawResponse", - "KnowledgeBasesResourceWithStreamingResponse", - "AsyncKnowledgeBasesResourceWithStreamingResponse", - "ChildAgentsResource", - "AsyncChildAgentsResource", - "ChildAgentsResourceWithRawResponse", - "AsyncChildAgentsResourceWithRawResponse", - "ChildAgentsResourceWithStreamingResponse", - "AsyncChildAgentsResourceWithStreamingResponse", - "AgentsResource", - "AsyncAgentsResource", - "AgentsResourceWithRawResponse", - "AsyncAgentsResourceWithRawResponse", - "AgentsResourceWithStreamingResponse", - "AsyncAgentsResourceWithStreamingResponse", -] diff --git a/src/digitalocean_genai_sdk/resources/agents/agents.py b/src/digitalocean_genai_sdk/resources/agents/agents.py deleted file mode 100644 index 6d3ce525..00000000 --- a/src/digitalocean_genai_sdk/resources/agents/agents.py +++ /dev/null @@ -1,965 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List - -import httpx - -from ...types import ( - APIRetrievalMethod, - APIDeploymentVisibility, - agent_list_params, - agent_create_params, - agent_update_params, - agent_update_status_params, -) -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._utils import maybe_transform, async_maybe_transform -from .api_keys import ( - APIKeysResource, - AsyncAPIKeysResource, - APIKeysResourceWithRawResponse, - AsyncAPIKeysResourceWithRawResponse, - APIKeysResourceWithStreamingResponse, - AsyncAPIKeysResourceWithStreamingResponse, -) -from .versions import ( - VersionsResource, - AsyncVersionsResource, - VersionsResourceWithRawResponse, - AsyncVersionsResourceWithRawResponse, - VersionsResourceWithStreamingResponse, - AsyncVersionsResourceWithStreamingResponse, -) -from ..._compat import cached_property -from .functions import ( - FunctionsResource, - AsyncFunctionsResource, - FunctionsResourceWithRawResponse, - AsyncFunctionsResourceWithRawResponse, - FunctionsResourceWithStreamingResponse, - AsyncFunctionsResourceWithStreamingResponse, -) -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from .child_agents import ( - ChildAgentsResource, - AsyncChildAgentsResource, - ChildAgentsResourceWithRawResponse, - AsyncChildAgentsResourceWithRawResponse, - ChildAgentsResourceWithStreamingResponse, - AsyncChildAgentsResourceWithStreamingResponse, -) -from ..._base_client import make_request_options -from .knowledge_bases import ( - KnowledgeBasesResource, - AsyncKnowledgeBasesResource, - KnowledgeBasesResourceWithRawResponse, - AsyncKnowledgeBasesResourceWithRawResponse, - KnowledgeBasesResourceWithStreamingResponse, - AsyncKnowledgeBasesResourceWithStreamingResponse, -) -from ...types.agent_list_response import AgentListResponse -from ...types.api_retrieval_method import APIRetrievalMethod -from ...types.agent_create_response import AgentCreateResponse -from ...types.agent_delete_response import AgentDeleteResponse -from ...types.agent_update_response import AgentUpdateResponse -from ...types.agent_retrieve_response import AgentRetrieveResponse -from ...types.api_deployment_visibility import APIDeploymentVisibility -from ...types.agent_update_status_response import AgentUpdateStatusResponse - -__all__ = ["AgentsResource", "AsyncAgentsResource"] - - -class AgentsResource(SyncAPIResource): - @cached_property - def api_keys(self) -> APIKeysResource: - return APIKeysResource(self._client) - - @cached_property - def functions(self) -> FunctionsResource: - return FunctionsResource(self._client) - - @cached_property - def versions(self) -> VersionsResource: - return VersionsResource(self._client) - - @cached_property - def knowledge_bases(self) -> KnowledgeBasesResource: - return KnowledgeBasesResource(self._client) - - @cached_property - def child_agents(self) -> ChildAgentsResource: - return ChildAgentsResource(self._client) - - @cached_property - def with_raw_response(self) -> AgentsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AgentsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AgentsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AgentsResourceWithStreamingResponse(self) - - def create( - self, - *, - anthropic_key_uuid: str | NotGiven = NOT_GIVEN, - description: str | NotGiven = NOT_GIVEN, - instruction: str | NotGiven = NOT_GIVEN, - knowledge_base_uuid: List[str] | NotGiven = NOT_GIVEN, - model_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - openai_key_uuid: str | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - region: str | NotGiven = NOT_GIVEN, - tags: List[str] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AgentCreateResponse: - """To create a new agent, send a POST request to `/v2/gen-ai/agents`. - - The response - body contains a JSON object with the newly created agent object. - - Args: - instruction: Agent instruction. Instructions help your agent to perform its job effectively. - See - [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) - for best practices. - - model_uuid: Identifier for the foundation model. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._post( - "/v2/genai/agents", - body=maybe_transform( - { - "anthropic_key_uuid": anthropic_key_uuid, - "description": description, - "instruction": instruction, - "knowledge_base_uuid": knowledge_base_uuid, - "model_uuid": model_uuid, - "name": name, - "openai_key_uuid": openai_key_uuid, - "project_id": project_id, - "region": region, - "tags": tags, - }, - agent_create_params.AgentCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=AgentCreateResponse, - ) - - def retrieve( - self, - uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AgentRetrieveResponse: - """To retrieve details of an agent, GET request to `/v2/gen-ai/agents/{uuid}`. - - The - response body is a JSON object containing the agent. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not uuid: - raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") - return self._get( - f"/v2/genai/agents/{uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=AgentRetrieveResponse, - ) - - def update( - self, - path_uuid: str, - *, - anthropic_key_uuid: str | NotGiven = NOT_GIVEN, - description: str | NotGiven = NOT_GIVEN, - instruction: str | NotGiven = NOT_GIVEN, - k: int | NotGiven = NOT_GIVEN, - max_tokens: int | NotGiven = NOT_GIVEN, - model_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - openai_key_uuid: str | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - provide_citations: bool | NotGiven = NOT_GIVEN, - retrieval_method: APIRetrievalMethod | NotGiven = NOT_GIVEN, - tags: List[str] | NotGiven = NOT_GIVEN, - temperature: float | NotGiven = NOT_GIVEN, - top_p: float | NotGiven = NOT_GIVEN, - body_uuid: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AgentUpdateResponse: - """To update an agent, send a PUT request to `/v2/gen-ai/agents/{uuid}`. - - The - response body is a JSON object containing the agent. - - Args: - instruction: Agent instruction. Instructions help your agent to perform its job effectively. - See - [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) - for best practices. - - max_tokens: Specifies the maximum number of tokens the model can process in a single input - or output, set as a number between 1 and 512. This determines the length of each - response. - - model_uuid: Identifier for the foundation model. - - temperature: Controls the model’s creativity, specified as a number between 0 and 1. Lower - values produce more predictable and conservative responses, while higher values - encourage creativity and variation. - - top_p: Defines the cumulative probability threshold for word selection, specified as a - number between 0 and 1. Higher values allow for more diverse outputs, while - lower values ensure focused and coherent responses. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_uuid: - raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") - return self._put( - f"/v2/genai/agents/{path_uuid}", - body=maybe_transform( - { - "anthropic_key_uuid": anthropic_key_uuid, - "description": description, - "instruction": instruction, - "k": k, - "max_tokens": max_tokens, - "model_uuid": model_uuid, - "name": name, - "openai_key_uuid": openai_key_uuid, - "project_id": project_id, - "provide_citations": provide_citations, - "retrieval_method": retrieval_method, - "tags": tags, - "temperature": temperature, - "top_p": top_p, - "body_uuid": body_uuid, - }, - agent_update_params.AgentUpdateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=AgentUpdateResponse, - ) - - def list( - self, - *, - only_deployed: bool | NotGiven = NOT_GIVEN, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AgentListResponse: - """ - To list all agents, send a GET request to `/v2/gen-ai/agents`. - - Args: - only_deployed: only list agents that are deployed. - - page: page number. - - per_page: items per page. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/v2/genai/agents", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "only_deployed": only_deployed, - "page": page, - "per_page": per_page, - }, - agent_list_params.AgentListParams, - ), - ), - cast_to=AgentListResponse, - ) - - def delete( - self, - uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AgentDeleteResponse: - """ - To delete an agent, send a DELETE request to `/v2/gen-ai/agents/{uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not uuid: - raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") - return self._delete( - f"/v2/genai/agents/{uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=AgentDeleteResponse, - ) - - def update_status( - self, - path_uuid: str, - *, - body_uuid: str | NotGiven = NOT_GIVEN, - visibility: APIDeploymentVisibility | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AgentUpdateStatusResponse: - """Check whether an agent is public or private. - - To update the agent status, send a - PUT request to `/v2/gen-ai/agents/{uuid}/deployment_visibility`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_uuid: - raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") - return self._put( - f"/v2/genai/agents/{path_uuid}/deployment_visibility", - body=maybe_transform( - { - "body_uuid": body_uuid, - "visibility": visibility, - }, - agent_update_status_params.AgentUpdateStatusParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=AgentUpdateStatusResponse, - ) - - -class AsyncAgentsResource(AsyncAPIResource): - @cached_property - def api_keys(self) -> AsyncAPIKeysResource: - return AsyncAPIKeysResource(self._client) - - @cached_property - def functions(self) -> AsyncFunctionsResource: - return AsyncFunctionsResource(self._client) - - @cached_property - def versions(self) -> AsyncVersionsResource: - return AsyncVersionsResource(self._client) - - @cached_property - def knowledge_bases(self) -> AsyncKnowledgeBasesResource: - return AsyncKnowledgeBasesResource(self._client) - - @cached_property - def child_agents(self) -> AsyncChildAgentsResource: - return AsyncChildAgentsResource(self._client) - - @cached_property - def with_raw_response(self) -> AsyncAgentsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncAgentsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncAgentsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncAgentsResourceWithStreamingResponse(self) - - async def create( - self, - *, - anthropic_key_uuid: str | NotGiven = NOT_GIVEN, - description: str | NotGiven = NOT_GIVEN, - instruction: str | NotGiven = NOT_GIVEN, - knowledge_base_uuid: List[str] | NotGiven = NOT_GIVEN, - model_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - openai_key_uuid: str | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - region: str | NotGiven = NOT_GIVEN, - tags: List[str] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AgentCreateResponse: - """To create a new agent, send a POST request to `/v2/gen-ai/agents`. - - The response - body contains a JSON object with the newly created agent object. - - Args: - instruction: Agent instruction. Instructions help your agent to perform its job effectively. - See - [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) - for best practices. - - model_uuid: Identifier for the foundation model. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._post( - "/v2/genai/agents", - body=await async_maybe_transform( - { - "anthropic_key_uuid": anthropic_key_uuid, - "description": description, - "instruction": instruction, - "knowledge_base_uuid": knowledge_base_uuid, - "model_uuid": model_uuid, - "name": name, - "openai_key_uuid": openai_key_uuid, - "project_id": project_id, - "region": region, - "tags": tags, - }, - agent_create_params.AgentCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=AgentCreateResponse, - ) - - async def retrieve( - self, - uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AgentRetrieveResponse: - """To retrieve details of an agent, GET request to `/v2/gen-ai/agents/{uuid}`. - - The - response body is a JSON object containing the agent. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not uuid: - raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") - return await self._get( - f"/v2/genai/agents/{uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=AgentRetrieveResponse, - ) - - async def update( - self, - path_uuid: str, - *, - anthropic_key_uuid: str | NotGiven = NOT_GIVEN, - description: str | NotGiven = NOT_GIVEN, - instruction: str | NotGiven = NOT_GIVEN, - k: int | NotGiven = NOT_GIVEN, - max_tokens: int | NotGiven = NOT_GIVEN, - model_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - openai_key_uuid: str | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - provide_citations: bool | NotGiven = NOT_GIVEN, - retrieval_method: APIRetrievalMethod | NotGiven = NOT_GIVEN, - tags: List[str] | NotGiven = NOT_GIVEN, - temperature: float | NotGiven = NOT_GIVEN, - top_p: float | NotGiven = NOT_GIVEN, - body_uuid: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AgentUpdateResponse: - """To update an agent, send a PUT request to `/v2/gen-ai/agents/{uuid}`. - - The - response body is a JSON object containing the agent. - - Args: - instruction: Agent instruction. Instructions help your agent to perform its job effectively. - See - [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) - for best practices. - - max_tokens: Specifies the maximum number of tokens the model can process in a single input - or output, set as a number between 1 and 512. This determines the length of each - response. - - model_uuid: Identifier for the foundation model. - - temperature: Controls the model’s creativity, specified as a number between 0 and 1. Lower - values produce more predictable and conservative responses, while higher values - encourage creativity and variation. - - top_p: Defines the cumulative probability threshold for word selection, specified as a - number between 0 and 1. Higher values allow for more diverse outputs, while - lower values ensure focused and coherent responses. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_uuid: - raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") - return await self._put( - f"/v2/genai/agents/{path_uuid}", - body=await async_maybe_transform( - { - "anthropic_key_uuid": anthropic_key_uuid, - "description": description, - "instruction": instruction, - "k": k, - "max_tokens": max_tokens, - "model_uuid": model_uuid, - "name": name, - "openai_key_uuid": openai_key_uuid, - "project_id": project_id, - "provide_citations": provide_citations, - "retrieval_method": retrieval_method, - "tags": tags, - "temperature": temperature, - "top_p": top_p, - "body_uuid": body_uuid, - }, - agent_update_params.AgentUpdateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=AgentUpdateResponse, - ) - - async def list( - self, - *, - only_deployed: bool | NotGiven = NOT_GIVEN, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AgentListResponse: - """ - To list all agents, send a GET request to `/v2/gen-ai/agents`. - - Args: - only_deployed: only list agents that are deployed. - - page: page number. - - per_page: items per page. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/v2/genai/agents", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "only_deployed": only_deployed, - "page": page, - "per_page": per_page, - }, - agent_list_params.AgentListParams, - ), - ), - cast_to=AgentListResponse, - ) - - async def delete( - self, - uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AgentDeleteResponse: - """ - To delete an agent, send a DELETE request to `/v2/gen-ai/agents/{uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not uuid: - raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") - return await self._delete( - f"/v2/genai/agents/{uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=AgentDeleteResponse, - ) - - async def update_status( - self, - path_uuid: str, - *, - body_uuid: str | NotGiven = NOT_GIVEN, - visibility: APIDeploymentVisibility | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AgentUpdateStatusResponse: - """Check whether an agent is public or private. - - To update the agent status, send a - PUT request to `/v2/gen-ai/agents/{uuid}/deployment_visibility`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_uuid: - raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") - return await self._put( - f"/v2/genai/agents/{path_uuid}/deployment_visibility", - body=await async_maybe_transform( - { - "body_uuid": body_uuid, - "visibility": visibility, - }, - agent_update_status_params.AgentUpdateStatusParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=AgentUpdateStatusResponse, - ) - - -class AgentsResourceWithRawResponse: - def __init__(self, agents: AgentsResource) -> None: - self._agents = agents - - self.create = to_raw_response_wrapper( - agents.create, - ) - self.retrieve = to_raw_response_wrapper( - agents.retrieve, - ) - self.update = to_raw_response_wrapper( - agents.update, - ) - self.list = to_raw_response_wrapper( - agents.list, - ) - self.delete = to_raw_response_wrapper( - agents.delete, - ) - self.update_status = to_raw_response_wrapper( - agents.update_status, - ) - - @cached_property - def api_keys(self) -> APIKeysResourceWithRawResponse: - return APIKeysResourceWithRawResponse(self._agents.api_keys) - - @cached_property - def functions(self) -> FunctionsResourceWithRawResponse: - return FunctionsResourceWithRawResponse(self._agents.functions) - - @cached_property - def versions(self) -> VersionsResourceWithRawResponse: - return VersionsResourceWithRawResponse(self._agents.versions) - - @cached_property - def knowledge_bases(self) -> KnowledgeBasesResourceWithRawResponse: - return KnowledgeBasesResourceWithRawResponse(self._agents.knowledge_bases) - - @cached_property - def child_agents(self) -> ChildAgentsResourceWithRawResponse: - return ChildAgentsResourceWithRawResponse(self._agents.child_agents) - - -class AsyncAgentsResourceWithRawResponse: - def __init__(self, agents: AsyncAgentsResource) -> None: - self._agents = agents - - self.create = async_to_raw_response_wrapper( - agents.create, - ) - self.retrieve = async_to_raw_response_wrapper( - agents.retrieve, - ) - self.update = async_to_raw_response_wrapper( - agents.update, - ) - self.list = async_to_raw_response_wrapper( - agents.list, - ) - self.delete = async_to_raw_response_wrapper( - agents.delete, - ) - self.update_status = async_to_raw_response_wrapper( - agents.update_status, - ) - - @cached_property - def api_keys(self) -> AsyncAPIKeysResourceWithRawResponse: - return AsyncAPIKeysResourceWithRawResponse(self._agents.api_keys) - - @cached_property - def functions(self) -> AsyncFunctionsResourceWithRawResponse: - return AsyncFunctionsResourceWithRawResponse(self._agents.functions) - - @cached_property - def versions(self) -> AsyncVersionsResourceWithRawResponse: - return AsyncVersionsResourceWithRawResponse(self._agents.versions) - - @cached_property - def knowledge_bases(self) -> AsyncKnowledgeBasesResourceWithRawResponse: - return AsyncKnowledgeBasesResourceWithRawResponse(self._agents.knowledge_bases) - - @cached_property - def child_agents(self) -> AsyncChildAgentsResourceWithRawResponse: - return AsyncChildAgentsResourceWithRawResponse(self._agents.child_agents) - - -class AgentsResourceWithStreamingResponse: - def __init__(self, agents: AgentsResource) -> None: - self._agents = agents - - self.create = to_streamed_response_wrapper( - agents.create, - ) - self.retrieve = to_streamed_response_wrapper( - agents.retrieve, - ) - self.update = to_streamed_response_wrapper( - agents.update, - ) - self.list = to_streamed_response_wrapper( - agents.list, - ) - self.delete = to_streamed_response_wrapper( - agents.delete, - ) - self.update_status = to_streamed_response_wrapper( - agents.update_status, - ) - - @cached_property - def api_keys(self) -> APIKeysResourceWithStreamingResponse: - return APIKeysResourceWithStreamingResponse(self._agents.api_keys) - - @cached_property - def functions(self) -> FunctionsResourceWithStreamingResponse: - return FunctionsResourceWithStreamingResponse(self._agents.functions) - - @cached_property - def versions(self) -> VersionsResourceWithStreamingResponse: - return VersionsResourceWithStreamingResponse(self._agents.versions) - - @cached_property - def knowledge_bases(self) -> KnowledgeBasesResourceWithStreamingResponse: - return KnowledgeBasesResourceWithStreamingResponse(self._agents.knowledge_bases) - - @cached_property - def child_agents(self) -> ChildAgentsResourceWithStreamingResponse: - return ChildAgentsResourceWithStreamingResponse(self._agents.child_agents) - - -class AsyncAgentsResourceWithStreamingResponse: - def __init__(self, agents: AsyncAgentsResource) -> None: - self._agents = agents - - self.create = async_to_streamed_response_wrapper( - agents.create, - ) - self.retrieve = async_to_streamed_response_wrapper( - agents.retrieve, - ) - self.update = async_to_streamed_response_wrapper( - agents.update, - ) - self.list = async_to_streamed_response_wrapper( - agents.list, - ) - self.delete = async_to_streamed_response_wrapper( - agents.delete, - ) - self.update_status = async_to_streamed_response_wrapper( - agents.update_status, - ) - - @cached_property - def api_keys(self) -> AsyncAPIKeysResourceWithStreamingResponse: - return AsyncAPIKeysResourceWithStreamingResponse(self._agents.api_keys) - - @cached_property - def functions(self) -> AsyncFunctionsResourceWithStreamingResponse: - return AsyncFunctionsResourceWithStreamingResponse(self._agents.functions) - - @cached_property - def versions(self) -> AsyncVersionsResourceWithStreamingResponse: - return AsyncVersionsResourceWithStreamingResponse(self._agents.versions) - - @cached_property - def knowledge_bases(self) -> AsyncKnowledgeBasesResourceWithStreamingResponse: - return AsyncKnowledgeBasesResourceWithStreamingResponse(self._agents.knowledge_bases) - - @cached_property - def child_agents(self) -> AsyncChildAgentsResourceWithStreamingResponse: - return AsyncChildAgentsResourceWithStreamingResponse(self._agents.child_agents) diff --git a/src/digitalocean_genai_sdk/resources/agents/api_keys.py b/src/digitalocean_genai_sdk/resources/agents/api_keys.py deleted file mode 100644 index 451f5cb5..00000000 --- a/src/digitalocean_genai_sdk/resources/agents/api_keys.py +++ /dev/null @@ -1,581 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import httpx - -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._utils import maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ..._base_client import make_request_options -from ...types.agents import api_key_list_params, api_key_create_params, api_key_update_params -from ...types.agents.api_key_list_response import APIKeyListResponse -from ...types.agents.api_key_create_response import APIKeyCreateResponse -from ...types.agents.api_key_delete_response import APIKeyDeleteResponse -from ...types.agents.api_key_update_response import APIKeyUpdateResponse -from ...types.agents.api_key_regenerate_response import APIKeyRegenerateResponse - -__all__ = ["APIKeysResource", "AsyncAPIKeysResource"] - - -class APIKeysResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> APIKeysResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return APIKeysResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> APIKeysResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return APIKeysResourceWithStreamingResponse(self) - - def create( - self, - path_agent_uuid: str, - *, - body_agent_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyCreateResponse: - """ - To create an agent API key, send a POST request to - `/v2/gen-ai/agents/{agent_uuid}/api_keys`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_agent_uuid: - raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") - return self._post( - f"/v2/genai/agents/{path_agent_uuid}/api_keys", - body=maybe_transform( - { - "body_agent_uuid": body_agent_uuid, - "name": name, - }, - api_key_create_params.APIKeyCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=APIKeyCreateResponse, - ) - - def update( - self, - path_api_key_uuid: str, - *, - path_agent_uuid: str, - body_agent_uuid: str | NotGiven = NOT_GIVEN, - body_api_key_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyUpdateResponse: - """ - To update an agent API key, send a PUT request to - `/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_agent_uuid: - raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") - if not path_api_key_uuid: - raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") - return self._put( - f"/v2/genai/agents/{path_agent_uuid}/api_keys/{path_api_key_uuid}", - body=maybe_transform( - { - "body_agent_uuid": body_agent_uuid, - "body_api_key_uuid": body_api_key_uuid, - "name": name, - }, - api_key_update_params.APIKeyUpdateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=APIKeyUpdateResponse, - ) - - def list( - self, - agent_uuid: str, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyListResponse: - """ - To list all agent API keys, send a GET request to - `/v2/gen-ai/agents/{agent_uuid}/api_keys`. - - Args: - page: page number. - - per_page: items per page. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_uuid: - raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") - return self._get( - f"/v2/genai/agents/{agent_uuid}/api_keys", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - }, - api_key_list_params.APIKeyListParams, - ), - ), - cast_to=APIKeyListResponse, - ) - - def delete( - self, - api_key_uuid: str, - *, - agent_uuid: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyDeleteResponse: - """ - To delete an API key for an agent, send a DELETE request to - `/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_uuid: - raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") - if not api_key_uuid: - raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") - return self._delete( - f"/v2/genai/agents/{agent_uuid}/api_keys/{api_key_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=APIKeyDeleteResponse, - ) - - def regenerate( - self, - api_key_uuid: str, - *, - agent_uuid: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyRegenerateResponse: - """ - To regenerate an agent API key, send a PUT request to - `/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}/regenerate`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_uuid: - raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") - if not api_key_uuid: - raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") - return self._put( - f"/v2/genai/agents/{agent_uuid}/api_keys/{api_key_uuid}/regenerate", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=APIKeyRegenerateResponse, - ) - - -class AsyncAPIKeysResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncAPIKeysResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncAPIKeysResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncAPIKeysResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncAPIKeysResourceWithStreamingResponse(self) - - async def create( - self, - path_agent_uuid: str, - *, - body_agent_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyCreateResponse: - """ - To create an agent API key, send a POST request to - `/v2/gen-ai/agents/{agent_uuid}/api_keys`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_agent_uuid: - raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") - return await self._post( - f"/v2/genai/agents/{path_agent_uuid}/api_keys", - body=await async_maybe_transform( - { - "body_agent_uuid": body_agent_uuid, - "name": name, - }, - api_key_create_params.APIKeyCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=APIKeyCreateResponse, - ) - - async def update( - self, - path_api_key_uuid: str, - *, - path_agent_uuid: str, - body_agent_uuid: str | NotGiven = NOT_GIVEN, - body_api_key_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyUpdateResponse: - """ - To update an agent API key, send a PUT request to - `/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_agent_uuid: - raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") - if not path_api_key_uuid: - raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") - return await self._put( - f"/v2/genai/agents/{path_agent_uuid}/api_keys/{path_api_key_uuid}", - body=await async_maybe_transform( - { - "body_agent_uuid": body_agent_uuid, - "body_api_key_uuid": body_api_key_uuid, - "name": name, - }, - api_key_update_params.APIKeyUpdateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=APIKeyUpdateResponse, - ) - - async def list( - self, - agent_uuid: str, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyListResponse: - """ - To list all agent API keys, send a GET request to - `/v2/gen-ai/agents/{agent_uuid}/api_keys`. - - Args: - page: page number. - - per_page: items per page. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_uuid: - raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") - return await self._get( - f"/v2/genai/agents/{agent_uuid}/api_keys", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - }, - api_key_list_params.APIKeyListParams, - ), - ), - cast_to=APIKeyListResponse, - ) - - async def delete( - self, - api_key_uuid: str, - *, - agent_uuid: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyDeleteResponse: - """ - To delete an API key for an agent, send a DELETE request to - `/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_uuid: - raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") - if not api_key_uuid: - raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") - return await self._delete( - f"/v2/genai/agents/{agent_uuid}/api_keys/{api_key_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=APIKeyDeleteResponse, - ) - - async def regenerate( - self, - api_key_uuid: str, - *, - agent_uuid: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyRegenerateResponse: - """ - To regenerate an agent API key, send a PUT request to - `/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}/regenerate`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_uuid: - raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") - if not api_key_uuid: - raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") - return await self._put( - f"/v2/genai/agents/{agent_uuid}/api_keys/{api_key_uuid}/regenerate", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=APIKeyRegenerateResponse, - ) - - -class APIKeysResourceWithRawResponse: - def __init__(self, api_keys: APIKeysResource) -> None: - self._api_keys = api_keys - - self.create = to_raw_response_wrapper( - api_keys.create, - ) - self.update = to_raw_response_wrapper( - api_keys.update, - ) - self.list = to_raw_response_wrapper( - api_keys.list, - ) - self.delete = to_raw_response_wrapper( - api_keys.delete, - ) - self.regenerate = to_raw_response_wrapper( - api_keys.regenerate, - ) - - -class AsyncAPIKeysResourceWithRawResponse: - def __init__(self, api_keys: AsyncAPIKeysResource) -> None: - self._api_keys = api_keys - - self.create = async_to_raw_response_wrapper( - api_keys.create, - ) - self.update = async_to_raw_response_wrapper( - api_keys.update, - ) - self.list = async_to_raw_response_wrapper( - api_keys.list, - ) - self.delete = async_to_raw_response_wrapper( - api_keys.delete, - ) - self.regenerate = async_to_raw_response_wrapper( - api_keys.regenerate, - ) - - -class APIKeysResourceWithStreamingResponse: - def __init__(self, api_keys: APIKeysResource) -> None: - self._api_keys = api_keys - - self.create = to_streamed_response_wrapper( - api_keys.create, - ) - self.update = to_streamed_response_wrapper( - api_keys.update, - ) - self.list = to_streamed_response_wrapper( - api_keys.list, - ) - self.delete = to_streamed_response_wrapper( - api_keys.delete, - ) - self.regenerate = to_streamed_response_wrapper( - api_keys.regenerate, - ) - - -class AsyncAPIKeysResourceWithStreamingResponse: - def __init__(self, api_keys: AsyncAPIKeysResource) -> None: - self._api_keys = api_keys - - self.create = async_to_streamed_response_wrapper( - api_keys.create, - ) - self.update = async_to_streamed_response_wrapper( - api_keys.update, - ) - self.list = async_to_streamed_response_wrapper( - api_keys.list, - ) - self.delete = async_to_streamed_response_wrapper( - api_keys.delete, - ) - self.regenerate = async_to_streamed_response_wrapper( - api_keys.regenerate, - ) diff --git a/src/digitalocean_genai_sdk/resources/agents/child_agents.py b/src/digitalocean_genai_sdk/resources/agents/child_agents.py deleted file mode 100644 index 7d4ed3bb..00000000 --- a/src/digitalocean_genai_sdk/resources/agents/child_agents.py +++ /dev/null @@ -1,508 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import httpx - -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._utils import maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ..._base_client import make_request_options -from ...types.agents import child_agent_add_params, child_agent_update_params -from ...types.agents.child_agent_add_response import ChildAgentAddResponse -from ...types.agents.child_agent_view_response import ChildAgentViewResponse -from ...types.agents.child_agent_delete_response import ChildAgentDeleteResponse -from ...types.agents.child_agent_update_response import ChildAgentUpdateResponse - -__all__ = ["ChildAgentsResource", "AsyncChildAgentsResource"] - - -class ChildAgentsResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> ChildAgentsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return ChildAgentsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> ChildAgentsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return ChildAgentsResourceWithStreamingResponse(self) - - def update( - self, - path_child_agent_uuid: str, - *, - path_parent_agent_uuid: str, - body_child_agent_uuid: str | NotGiven = NOT_GIVEN, - if_case: str | NotGiven = NOT_GIVEN, - body_parent_agent_uuid: str | NotGiven = NOT_GIVEN, - route_name: str | NotGiven = NOT_GIVEN, - uuid: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ChildAgentUpdateResponse: - """ - To update an agent route for an agent, send a PUT request to - `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`. - - Args: - body_parent_agent_uuid: A unique identifier for the parent agent. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_parent_agent_uuid: - raise ValueError( - f"Expected a non-empty value for `path_parent_agent_uuid` but received {path_parent_agent_uuid!r}" - ) - if not path_child_agent_uuid: - raise ValueError( - f"Expected a non-empty value for `path_child_agent_uuid` but received {path_child_agent_uuid!r}" - ) - return self._put( - f"/v2/genai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}", - body=maybe_transform( - { - "body_child_agent_uuid": body_child_agent_uuid, - "if_case": if_case, - "body_parent_agent_uuid": body_parent_agent_uuid, - "route_name": route_name, - "uuid": uuid, - }, - child_agent_update_params.ChildAgentUpdateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ChildAgentUpdateResponse, - ) - - def delete( - self, - child_agent_uuid: str, - *, - parent_agent_uuid: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ChildAgentDeleteResponse: - """ - To delete an agent route from a parent agent, send a DELETE request to - `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not parent_agent_uuid: - raise ValueError(f"Expected a non-empty value for `parent_agent_uuid` but received {parent_agent_uuid!r}") - if not child_agent_uuid: - raise ValueError(f"Expected a non-empty value for `child_agent_uuid` but received {child_agent_uuid!r}") - return self._delete( - f"/v2/genai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ChildAgentDeleteResponse, - ) - - def add( - self, - path_child_agent_uuid: str, - *, - path_parent_agent_uuid: str, - body_child_agent_uuid: str | NotGiven = NOT_GIVEN, - if_case: str | NotGiven = NOT_GIVEN, - body_parent_agent_uuid: str | NotGiven = NOT_GIVEN, - route_name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ChildAgentAddResponse: - """ - To add an agent route to an agent, send a POST request to - `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`. - - Args: - body_parent_agent_uuid: A unique identifier for the parent agent. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_parent_agent_uuid: - raise ValueError( - f"Expected a non-empty value for `path_parent_agent_uuid` but received {path_parent_agent_uuid!r}" - ) - if not path_child_agent_uuid: - raise ValueError( - f"Expected a non-empty value for `path_child_agent_uuid` but received {path_child_agent_uuid!r}" - ) - return self._post( - f"/v2/genai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}", - body=maybe_transform( - { - "body_child_agent_uuid": body_child_agent_uuid, - "if_case": if_case, - "body_parent_agent_uuid": body_parent_agent_uuid, - "route_name": route_name, - }, - child_agent_add_params.ChildAgentAddParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ChildAgentAddResponse, - ) - - def view( - self, - uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ChildAgentViewResponse: - """ - To view agent routes for an agent, send a GET requtest to - `/v2/gen-ai/agents/{uuid}/child_agents`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not uuid: - raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") - return self._get( - f"/v2/genai/agents/{uuid}/child_agents", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ChildAgentViewResponse, - ) - - -class AsyncChildAgentsResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncChildAgentsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncChildAgentsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncChildAgentsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncChildAgentsResourceWithStreamingResponse(self) - - async def update( - self, - path_child_agent_uuid: str, - *, - path_parent_agent_uuid: str, - body_child_agent_uuid: str | NotGiven = NOT_GIVEN, - if_case: str | NotGiven = NOT_GIVEN, - body_parent_agent_uuid: str | NotGiven = NOT_GIVEN, - route_name: str | NotGiven = NOT_GIVEN, - uuid: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ChildAgentUpdateResponse: - """ - To update an agent route for an agent, send a PUT request to - `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`. - - Args: - body_parent_agent_uuid: A unique identifier for the parent agent. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_parent_agent_uuid: - raise ValueError( - f"Expected a non-empty value for `path_parent_agent_uuid` but received {path_parent_agent_uuid!r}" - ) - if not path_child_agent_uuid: - raise ValueError( - f"Expected a non-empty value for `path_child_agent_uuid` but received {path_child_agent_uuid!r}" - ) - return await self._put( - f"/v2/genai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}", - body=await async_maybe_transform( - { - "body_child_agent_uuid": body_child_agent_uuid, - "if_case": if_case, - "body_parent_agent_uuid": body_parent_agent_uuid, - "route_name": route_name, - "uuid": uuid, - }, - child_agent_update_params.ChildAgentUpdateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ChildAgentUpdateResponse, - ) - - async def delete( - self, - child_agent_uuid: str, - *, - parent_agent_uuid: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ChildAgentDeleteResponse: - """ - To delete an agent route from a parent agent, send a DELETE request to - `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not parent_agent_uuid: - raise ValueError(f"Expected a non-empty value for `parent_agent_uuid` but received {parent_agent_uuid!r}") - if not child_agent_uuid: - raise ValueError(f"Expected a non-empty value for `child_agent_uuid` but received {child_agent_uuid!r}") - return await self._delete( - f"/v2/genai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ChildAgentDeleteResponse, - ) - - async def add( - self, - path_child_agent_uuid: str, - *, - path_parent_agent_uuid: str, - body_child_agent_uuid: str | NotGiven = NOT_GIVEN, - if_case: str | NotGiven = NOT_GIVEN, - body_parent_agent_uuid: str | NotGiven = NOT_GIVEN, - route_name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ChildAgentAddResponse: - """ - To add an agent route to an agent, send a POST request to - `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`. - - Args: - body_parent_agent_uuid: A unique identifier for the parent agent. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_parent_agent_uuid: - raise ValueError( - f"Expected a non-empty value for `path_parent_agent_uuid` but received {path_parent_agent_uuid!r}" - ) - if not path_child_agent_uuid: - raise ValueError( - f"Expected a non-empty value for `path_child_agent_uuid` but received {path_child_agent_uuid!r}" - ) - return await self._post( - f"/v2/genai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}", - body=await async_maybe_transform( - { - "body_child_agent_uuid": body_child_agent_uuid, - "if_case": if_case, - "body_parent_agent_uuid": body_parent_agent_uuid, - "route_name": route_name, - }, - child_agent_add_params.ChildAgentAddParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ChildAgentAddResponse, - ) - - async def view( - self, - uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ChildAgentViewResponse: - """ - To view agent routes for an agent, send a GET requtest to - `/v2/gen-ai/agents/{uuid}/child_agents`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not uuid: - raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") - return await self._get( - f"/v2/genai/agents/{uuid}/child_agents", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ChildAgentViewResponse, - ) - - -class ChildAgentsResourceWithRawResponse: - def __init__(self, child_agents: ChildAgentsResource) -> None: - self._child_agents = child_agents - - self.update = to_raw_response_wrapper( - child_agents.update, - ) - self.delete = to_raw_response_wrapper( - child_agents.delete, - ) - self.add = to_raw_response_wrapper( - child_agents.add, - ) - self.view = to_raw_response_wrapper( - child_agents.view, - ) - - -class AsyncChildAgentsResourceWithRawResponse: - def __init__(self, child_agents: AsyncChildAgentsResource) -> None: - self._child_agents = child_agents - - self.update = async_to_raw_response_wrapper( - child_agents.update, - ) - self.delete = async_to_raw_response_wrapper( - child_agents.delete, - ) - self.add = async_to_raw_response_wrapper( - child_agents.add, - ) - self.view = async_to_raw_response_wrapper( - child_agents.view, - ) - - -class ChildAgentsResourceWithStreamingResponse: - def __init__(self, child_agents: ChildAgentsResource) -> None: - self._child_agents = child_agents - - self.update = to_streamed_response_wrapper( - child_agents.update, - ) - self.delete = to_streamed_response_wrapper( - child_agents.delete, - ) - self.add = to_streamed_response_wrapper( - child_agents.add, - ) - self.view = to_streamed_response_wrapper( - child_agents.view, - ) - - -class AsyncChildAgentsResourceWithStreamingResponse: - def __init__(self, child_agents: AsyncChildAgentsResource) -> None: - self._child_agents = child_agents - - self.update = async_to_streamed_response_wrapper( - child_agents.update, - ) - self.delete = async_to_streamed_response_wrapper( - child_agents.delete, - ) - self.add = async_to_streamed_response_wrapper( - child_agents.add, - ) - self.view = async_to_streamed_response_wrapper( - child_agents.view, - ) diff --git a/src/digitalocean_genai_sdk/resources/agents/functions.py b/src/digitalocean_genai_sdk/resources/agents/functions.py deleted file mode 100644 index 89f9efa3..00000000 --- a/src/digitalocean_genai_sdk/resources/agents/functions.py +++ /dev/null @@ -1,421 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import httpx - -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._utils import maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ..._base_client import make_request_options -from ...types.agents import function_create_params, function_update_params -from ...types.agents.function_create_response import FunctionCreateResponse -from ...types.agents.function_delete_response import FunctionDeleteResponse -from ...types.agents.function_update_response import FunctionUpdateResponse - -__all__ = ["FunctionsResource", "AsyncFunctionsResource"] - - -class FunctionsResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> FunctionsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return FunctionsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> FunctionsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return FunctionsResourceWithStreamingResponse(self) - - def create( - self, - path_agent_uuid: str, - *, - body_agent_uuid: str | NotGiven = NOT_GIVEN, - description: str | NotGiven = NOT_GIVEN, - faas_name: str | NotGiven = NOT_GIVEN, - faas_namespace: str | NotGiven = NOT_GIVEN, - function_name: str | NotGiven = NOT_GIVEN, - input_schema: object | NotGiven = NOT_GIVEN, - output_schema: object | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FunctionCreateResponse: - """ - To create a function route for an agent, send a POST request to - `/v2/gen-ai/agents/{agent_uuid}/functions`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_agent_uuid: - raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") - return self._post( - f"/v2/genai/agents/{path_agent_uuid}/functions", - body=maybe_transform( - { - "body_agent_uuid": body_agent_uuid, - "description": description, - "faas_name": faas_name, - "faas_namespace": faas_namespace, - "function_name": function_name, - "input_schema": input_schema, - "output_schema": output_schema, - }, - function_create_params.FunctionCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=FunctionCreateResponse, - ) - - def update( - self, - path_function_uuid: str, - *, - path_agent_uuid: str, - body_agent_uuid: str | NotGiven = NOT_GIVEN, - description: str | NotGiven = NOT_GIVEN, - faas_name: str | NotGiven = NOT_GIVEN, - faas_namespace: str | NotGiven = NOT_GIVEN, - function_name: str | NotGiven = NOT_GIVEN, - body_function_uuid: str | NotGiven = NOT_GIVEN, - input_schema: object | NotGiven = NOT_GIVEN, - output_schema: object | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FunctionUpdateResponse: - """ - To update the function route, send a PUT request to - `/v2/gen-ai/agents/{agent_uuid}/functions/{function_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_agent_uuid: - raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") - if not path_function_uuid: - raise ValueError(f"Expected a non-empty value for `path_function_uuid` but received {path_function_uuid!r}") - return self._put( - f"/v2/genai/agents/{path_agent_uuid}/functions/{path_function_uuid}", - body=maybe_transform( - { - "body_agent_uuid": body_agent_uuid, - "description": description, - "faas_name": faas_name, - "faas_namespace": faas_namespace, - "function_name": function_name, - "body_function_uuid": body_function_uuid, - "input_schema": input_schema, - "output_schema": output_schema, - }, - function_update_params.FunctionUpdateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=FunctionUpdateResponse, - ) - - def delete( - self, - function_uuid: str, - *, - agent_uuid: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FunctionDeleteResponse: - """ - To delete a function route from an agent, send a DELETE request to - `/v2/gen-ai/agents/{agent_uuid}/functions/{function_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_uuid: - raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") - if not function_uuid: - raise ValueError(f"Expected a non-empty value for `function_uuid` but received {function_uuid!r}") - return self._delete( - f"/v2/genai/agents/{agent_uuid}/functions/{function_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=FunctionDeleteResponse, - ) - - -class AsyncFunctionsResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncFunctionsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncFunctionsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncFunctionsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncFunctionsResourceWithStreamingResponse(self) - - async def create( - self, - path_agent_uuid: str, - *, - body_agent_uuid: str | NotGiven = NOT_GIVEN, - description: str | NotGiven = NOT_GIVEN, - faas_name: str | NotGiven = NOT_GIVEN, - faas_namespace: str | NotGiven = NOT_GIVEN, - function_name: str | NotGiven = NOT_GIVEN, - input_schema: object | NotGiven = NOT_GIVEN, - output_schema: object | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FunctionCreateResponse: - """ - To create a function route for an agent, send a POST request to - `/v2/gen-ai/agents/{agent_uuid}/functions`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_agent_uuid: - raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") - return await self._post( - f"/v2/genai/agents/{path_agent_uuid}/functions", - body=await async_maybe_transform( - { - "body_agent_uuid": body_agent_uuid, - "description": description, - "faas_name": faas_name, - "faas_namespace": faas_namespace, - "function_name": function_name, - "input_schema": input_schema, - "output_schema": output_schema, - }, - function_create_params.FunctionCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=FunctionCreateResponse, - ) - - async def update( - self, - path_function_uuid: str, - *, - path_agent_uuid: str, - body_agent_uuid: str | NotGiven = NOT_GIVEN, - description: str | NotGiven = NOT_GIVEN, - faas_name: str | NotGiven = NOT_GIVEN, - faas_namespace: str | NotGiven = NOT_GIVEN, - function_name: str | NotGiven = NOT_GIVEN, - body_function_uuid: str | NotGiven = NOT_GIVEN, - input_schema: object | NotGiven = NOT_GIVEN, - output_schema: object | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FunctionUpdateResponse: - """ - To update the function route, send a PUT request to - `/v2/gen-ai/agents/{agent_uuid}/functions/{function_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_agent_uuid: - raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") - if not path_function_uuid: - raise ValueError(f"Expected a non-empty value for `path_function_uuid` but received {path_function_uuid!r}") - return await self._put( - f"/v2/genai/agents/{path_agent_uuid}/functions/{path_function_uuid}", - body=await async_maybe_transform( - { - "body_agent_uuid": body_agent_uuid, - "description": description, - "faas_name": faas_name, - "faas_namespace": faas_namespace, - "function_name": function_name, - "body_function_uuid": body_function_uuid, - "input_schema": input_schema, - "output_schema": output_schema, - }, - function_update_params.FunctionUpdateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=FunctionUpdateResponse, - ) - - async def delete( - self, - function_uuid: str, - *, - agent_uuid: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FunctionDeleteResponse: - """ - To delete a function route from an agent, send a DELETE request to - `/v2/gen-ai/agents/{agent_uuid}/functions/{function_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_uuid: - raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") - if not function_uuid: - raise ValueError(f"Expected a non-empty value for `function_uuid` but received {function_uuid!r}") - return await self._delete( - f"/v2/genai/agents/{agent_uuid}/functions/{function_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=FunctionDeleteResponse, - ) - - -class FunctionsResourceWithRawResponse: - def __init__(self, functions: FunctionsResource) -> None: - self._functions = functions - - self.create = to_raw_response_wrapper( - functions.create, - ) - self.update = to_raw_response_wrapper( - functions.update, - ) - self.delete = to_raw_response_wrapper( - functions.delete, - ) - - -class AsyncFunctionsResourceWithRawResponse: - def __init__(self, functions: AsyncFunctionsResource) -> None: - self._functions = functions - - self.create = async_to_raw_response_wrapper( - functions.create, - ) - self.update = async_to_raw_response_wrapper( - functions.update, - ) - self.delete = async_to_raw_response_wrapper( - functions.delete, - ) - - -class FunctionsResourceWithStreamingResponse: - def __init__(self, functions: FunctionsResource) -> None: - self._functions = functions - - self.create = to_streamed_response_wrapper( - functions.create, - ) - self.update = to_streamed_response_wrapper( - functions.update, - ) - self.delete = to_streamed_response_wrapper( - functions.delete, - ) - - -class AsyncFunctionsResourceWithStreamingResponse: - def __init__(self, functions: AsyncFunctionsResource) -> None: - self._functions = functions - - self.create = async_to_streamed_response_wrapper( - functions.create, - ) - self.update = async_to_streamed_response_wrapper( - functions.update, - ) - self.delete = async_to_streamed_response_wrapper( - functions.delete, - ) diff --git a/src/digitalocean_genai_sdk/resources/agents/knowledge_bases.py b/src/digitalocean_genai_sdk/resources/agents/knowledge_bases.py deleted file mode 100644 index 4a091446..00000000 --- a/src/digitalocean_genai_sdk/resources/agents/knowledge_bases.py +++ /dev/null @@ -1,346 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import httpx - -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ..._base_client import make_request_options -from ...types.agents.api_link_knowledge_base_output import APILinkKnowledgeBaseOutput -from ...types.agents.knowledge_base_detach_response import KnowledgeBaseDetachResponse - -__all__ = ["KnowledgeBasesResource", "AsyncKnowledgeBasesResource"] - - -class KnowledgeBasesResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> KnowledgeBasesResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return KnowledgeBasesResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> KnowledgeBasesResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return KnowledgeBasesResourceWithStreamingResponse(self) - - def attach( - self, - agent_uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APILinkKnowledgeBaseOutput: - """ - To attach knowledge bases to an agent, send a POST request to - `/v2/gen-ai/agents/{agent_uuid}/knowledge_bases` - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_uuid: - raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") - return self._post( - f"/v2/genai/agents/{agent_uuid}/knowledge_bases", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=APILinkKnowledgeBaseOutput, - ) - - def attach_single( - self, - knowledge_base_uuid: str, - *, - agent_uuid: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APILinkKnowledgeBaseOutput: - """ - To attach a knowledge base to an agent, send a POST request to - `/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}` - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_uuid: - raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") - if not knowledge_base_uuid: - raise ValueError( - f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" - ) - return self._post( - f"/v2/genai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=APILinkKnowledgeBaseOutput, - ) - - def detach( - self, - knowledge_base_uuid: str, - *, - agent_uuid: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KnowledgeBaseDetachResponse: - """ - To detach a knowledge base from an agent, send a DELETE request to - `/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_uuid: - raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") - if not knowledge_base_uuid: - raise ValueError( - f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" - ) - return self._delete( - f"/v2/genai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KnowledgeBaseDetachResponse, - ) - - -class AsyncKnowledgeBasesResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncKnowledgeBasesResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncKnowledgeBasesResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncKnowledgeBasesResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncKnowledgeBasesResourceWithStreamingResponse(self) - - async def attach( - self, - agent_uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APILinkKnowledgeBaseOutput: - """ - To attach knowledge bases to an agent, send a POST request to - `/v2/gen-ai/agents/{agent_uuid}/knowledge_bases` - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_uuid: - raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") - return await self._post( - f"/v2/genai/agents/{agent_uuid}/knowledge_bases", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=APILinkKnowledgeBaseOutput, - ) - - async def attach_single( - self, - knowledge_base_uuid: str, - *, - agent_uuid: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APILinkKnowledgeBaseOutput: - """ - To attach a knowledge base to an agent, send a POST request to - `/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}` - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_uuid: - raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") - if not knowledge_base_uuid: - raise ValueError( - f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" - ) - return await self._post( - f"/v2/genai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=APILinkKnowledgeBaseOutput, - ) - - async def detach( - self, - knowledge_base_uuid: str, - *, - agent_uuid: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KnowledgeBaseDetachResponse: - """ - To detach a knowledge base from an agent, send a DELETE request to - `/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not agent_uuid: - raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") - if not knowledge_base_uuid: - raise ValueError( - f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" - ) - return await self._delete( - f"/v2/genai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KnowledgeBaseDetachResponse, - ) - - -class KnowledgeBasesResourceWithRawResponse: - def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None: - self._knowledge_bases = knowledge_bases - - self.attach = to_raw_response_wrapper( - knowledge_bases.attach, - ) - self.attach_single = to_raw_response_wrapper( - knowledge_bases.attach_single, - ) - self.detach = to_raw_response_wrapper( - knowledge_bases.detach, - ) - - -class AsyncKnowledgeBasesResourceWithRawResponse: - def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None: - self._knowledge_bases = knowledge_bases - - self.attach = async_to_raw_response_wrapper( - knowledge_bases.attach, - ) - self.attach_single = async_to_raw_response_wrapper( - knowledge_bases.attach_single, - ) - self.detach = async_to_raw_response_wrapper( - knowledge_bases.detach, - ) - - -class KnowledgeBasesResourceWithStreamingResponse: - def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None: - self._knowledge_bases = knowledge_bases - - self.attach = to_streamed_response_wrapper( - knowledge_bases.attach, - ) - self.attach_single = to_streamed_response_wrapper( - knowledge_bases.attach_single, - ) - self.detach = to_streamed_response_wrapper( - knowledge_bases.detach, - ) - - -class AsyncKnowledgeBasesResourceWithStreamingResponse: - def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None: - self._knowledge_bases = knowledge_bases - - self.attach = async_to_streamed_response_wrapper( - knowledge_bases.attach, - ) - self.attach_single = async_to_streamed_response_wrapper( - knowledge_bases.attach_single, - ) - self.detach = async_to_streamed_response_wrapper( - knowledge_bases.detach, - ) diff --git a/src/digitalocean_genai_sdk/resources/api_keys/__init__.py b/src/digitalocean_genai_sdk/resources/api_keys/__init__.py deleted file mode 100644 index ed14565c..00000000 --- a/src/digitalocean_genai_sdk/resources/api_keys/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .api_keys import ( - APIKeysResource, - AsyncAPIKeysResource, - APIKeysResourceWithRawResponse, - AsyncAPIKeysResourceWithRawResponse, - APIKeysResourceWithStreamingResponse, - AsyncAPIKeysResourceWithStreamingResponse, -) - -__all__ = [ - "APIKeysResource", - "AsyncAPIKeysResource", - "APIKeysResourceWithRawResponse", - "AsyncAPIKeysResourceWithRawResponse", - "APIKeysResourceWithStreamingResponse", - "AsyncAPIKeysResourceWithStreamingResponse", -] diff --git a/src/digitalocean_genai_sdk/resources/api_keys/api_keys.py b/src/digitalocean_genai_sdk/resources/api_keys/api_keys.py deleted file mode 100644 index 63091bcc..00000000 --- a/src/digitalocean_genai_sdk/resources/api_keys/api_keys.py +++ /dev/null @@ -1,275 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import Literal - -import httpx - -from . import api_keys_ as api_keys -from ...types import api_key_list_params -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._utils import maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ..._base_client import make_request_options -from ...types.api_key_list_response import APIKeyListResponse - -__all__ = ["APIKeysResource", "AsyncAPIKeysResource"] - - -class APIKeysResource(SyncAPIResource): - @cached_property - def api_keys(self) -> api_keys.APIKeysResource: - return api_keys.APIKeysResource(self._client) - - @cached_property - def with_raw_response(self) -> APIKeysResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return APIKeysResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> APIKeysResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return APIKeysResourceWithStreamingResponse(self) - - def list( - self, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - public_only: bool | NotGiven = NOT_GIVEN, - usecases: List[ - Literal[ - "MODEL_USECASE_UNKNOWN", - "MODEL_USECASE_AGENT", - "MODEL_USECASE_FINETUNED", - "MODEL_USECASE_KNOWLEDGEBASE", - "MODEL_USECASE_GUARDRAIL", - "MODEL_USECASE_REASONING", - "MODEL_USECASE_SERVERLESS", - ] - ] - | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyListResponse: - """ - To list all models, send a GET request to `/v2/gen-ai/models`. - - Args: - page: page number. - - per_page: items per page. - - public_only: only include models that are publicly available. - - usecases: include only models defined for the listed usecases. - - - MODEL_USECASE_UNKNOWN: The use case of the model is unknown - - MODEL_USECASE_AGENT: The model maybe used in an agent - - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning - - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases - (embedding models) - - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails - - MODEL_USECASE_REASONING: The model usecase for reasoning - - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/v2/genai/models", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - "public_only": public_only, - "usecases": usecases, - }, - api_key_list_params.APIKeyListParams, - ), - ), - cast_to=APIKeyListResponse, - ) - - -class AsyncAPIKeysResource(AsyncAPIResource): - @cached_property - def api_keys(self) -> api_keys.AsyncAPIKeysResource: - return api_keys.AsyncAPIKeysResource(self._client) - - @cached_property - def with_raw_response(self) -> AsyncAPIKeysResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncAPIKeysResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncAPIKeysResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncAPIKeysResourceWithStreamingResponse(self) - - async def list( - self, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - public_only: bool | NotGiven = NOT_GIVEN, - usecases: List[ - Literal[ - "MODEL_USECASE_UNKNOWN", - "MODEL_USECASE_AGENT", - "MODEL_USECASE_FINETUNED", - "MODEL_USECASE_KNOWLEDGEBASE", - "MODEL_USECASE_GUARDRAIL", - "MODEL_USECASE_REASONING", - "MODEL_USECASE_SERVERLESS", - ] - ] - | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyListResponse: - """ - To list all models, send a GET request to `/v2/gen-ai/models`. - - Args: - page: page number. - - per_page: items per page. - - public_only: only include models that are publicly available. - - usecases: include only models defined for the listed usecases. - - - MODEL_USECASE_UNKNOWN: The use case of the model is unknown - - MODEL_USECASE_AGENT: The model maybe used in an agent - - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning - - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases - (embedding models) - - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails - - MODEL_USECASE_REASONING: The model usecase for reasoning - - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/v2/genai/models", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - "public_only": public_only, - "usecases": usecases, - }, - api_key_list_params.APIKeyListParams, - ), - ), - cast_to=APIKeyListResponse, - ) - - -class APIKeysResourceWithRawResponse: - def __init__(self, api_keys: APIKeysResource) -> None: - self._api_keys = api_keys - - self.list = to_raw_response_wrapper( - api_keys.list, - ) - - @cached_property - def api_keys(self) -> api_keys.APIKeysResourceWithRawResponse: - return api_keys.APIKeysResourceWithRawResponse(self._api_keys.api_keys) - - -class AsyncAPIKeysResourceWithRawResponse: - def __init__(self, api_keys: AsyncAPIKeysResource) -> None: - self._api_keys = api_keys - - self.list = async_to_raw_response_wrapper( - api_keys.list, - ) - - @cached_property - def api_keys(self) -> api_keys.AsyncAPIKeysResourceWithRawResponse: - return api_keys.AsyncAPIKeysResourceWithRawResponse(self._api_keys.api_keys) - - -class APIKeysResourceWithStreamingResponse: - def __init__(self, api_keys: APIKeysResource) -> None: - self._api_keys = api_keys - - self.list = to_streamed_response_wrapper( - api_keys.list, - ) - - @cached_property - def api_keys(self) -> api_keys.APIKeysResourceWithStreamingResponse: - return api_keys.APIKeysResourceWithStreamingResponse(self._api_keys.api_keys) - - -class AsyncAPIKeysResourceWithStreamingResponse: - def __init__(self, api_keys: AsyncAPIKeysResource) -> None: - self._api_keys = api_keys - - self.list = async_to_streamed_response_wrapper( - api_keys.list, - ) - - @cached_property - def api_keys(self) -> api_keys.AsyncAPIKeysResourceWithStreamingResponse: - return api_keys.AsyncAPIKeysResourceWithStreamingResponse(self._api_keys.api_keys) diff --git a/src/digitalocean_genai_sdk/resources/api_keys/api_keys_.py b/src/digitalocean_genai_sdk/resources/api_keys/api_keys_.py deleted file mode 100644 index 70b1147a..00000000 --- a/src/digitalocean_genai_sdk/resources/api_keys/api_keys_.py +++ /dev/null @@ -1,529 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import httpx - -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._utils import maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ..._base_client import make_request_options -from ...types.api_keys import api_key_list_params, api_key_create_params, api_key_update_params -from ...types.api_keys.api_key_list_response import APIKeyListResponse -from ...types.api_keys.api_key_create_response import APIKeyCreateResponse -from ...types.api_keys.api_key_delete_response import APIKeyDeleteResponse -from ...types.api_keys.api_key_update_response import APIKeyUpdateResponse -from ...types.api_keys.api_key_update_regenerate_response import APIKeyUpdateRegenerateResponse - -__all__ = ["APIKeysResource", "AsyncAPIKeysResource"] - - -class APIKeysResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> APIKeysResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return APIKeysResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> APIKeysResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return APIKeysResourceWithStreamingResponse(self) - - def create( - self, - *, - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyCreateResponse: - """ - To create a model API key, send a POST request to `/v2/gen-ai/models/api_keys`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._post( - "/v2/genai/models/api_keys", - body=maybe_transform({"name": name}, api_key_create_params.APIKeyCreateParams), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=APIKeyCreateResponse, - ) - - def update( - self, - path_api_key_uuid: str, - *, - body_api_key_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyUpdateResponse: - """ - To update a model API key, send a PUT request to - `/v2/gen-ai/models/api_keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_api_key_uuid: - raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") - return self._put( - f"/v2/genai/models/api_keys/{path_api_key_uuid}", - body=maybe_transform( - { - "body_api_key_uuid": body_api_key_uuid, - "name": name, - }, - api_key_update_params.APIKeyUpdateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=APIKeyUpdateResponse, - ) - - def list( - self, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyListResponse: - """ - To list all model API keys, send a GET request to `/v2/gen-ai/models/api_keys`. - - Args: - page: page number. - - per_page: items per page. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/v2/genai/models/api_keys", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - }, - api_key_list_params.APIKeyListParams, - ), - ), - cast_to=APIKeyListResponse, - ) - - def delete( - self, - api_key_uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyDeleteResponse: - """ - To delete an API key for a model, send a DELETE request to - `/v2/gen-ai/models/api_keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not api_key_uuid: - raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") - return self._delete( - f"/v2/genai/models/api_keys/{api_key_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=APIKeyDeleteResponse, - ) - - def update_regenerate( - self, - api_key_uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyUpdateRegenerateResponse: - """ - To regenerate a model API key, send a PUT request to - `/v2/gen-ai/models/api_keys/{api_key_uuid}/regenerate`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not api_key_uuid: - raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") - return self._put( - f"/v2/genai/models/api_keys/{api_key_uuid}/regenerate", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=APIKeyUpdateRegenerateResponse, - ) - - -class AsyncAPIKeysResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncAPIKeysResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncAPIKeysResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncAPIKeysResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncAPIKeysResourceWithStreamingResponse(self) - - async def create( - self, - *, - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyCreateResponse: - """ - To create a model API key, send a POST request to `/v2/gen-ai/models/api_keys`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._post( - "/v2/genai/models/api_keys", - body=await async_maybe_transform({"name": name}, api_key_create_params.APIKeyCreateParams), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=APIKeyCreateResponse, - ) - - async def update( - self, - path_api_key_uuid: str, - *, - body_api_key_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyUpdateResponse: - """ - To update a model API key, send a PUT request to - `/v2/gen-ai/models/api_keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_api_key_uuid: - raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") - return await self._put( - f"/v2/genai/models/api_keys/{path_api_key_uuid}", - body=await async_maybe_transform( - { - "body_api_key_uuid": body_api_key_uuid, - "name": name, - }, - api_key_update_params.APIKeyUpdateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=APIKeyUpdateResponse, - ) - - async def list( - self, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyListResponse: - """ - To list all model API keys, send a GET request to `/v2/gen-ai/models/api_keys`. - - Args: - page: page number. - - per_page: items per page. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/v2/genai/models/api_keys", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - }, - api_key_list_params.APIKeyListParams, - ), - ), - cast_to=APIKeyListResponse, - ) - - async def delete( - self, - api_key_uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyDeleteResponse: - """ - To delete an API key for a model, send a DELETE request to - `/v2/gen-ai/models/api_keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not api_key_uuid: - raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") - return await self._delete( - f"/v2/genai/models/api_keys/{api_key_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=APIKeyDeleteResponse, - ) - - async def update_regenerate( - self, - api_key_uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyUpdateRegenerateResponse: - """ - To regenerate a model API key, send a PUT request to - `/v2/gen-ai/models/api_keys/{api_key_uuid}/regenerate`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not api_key_uuid: - raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") - return await self._put( - f"/v2/genai/models/api_keys/{api_key_uuid}/regenerate", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=APIKeyUpdateRegenerateResponse, - ) - - -class APIKeysResourceWithRawResponse: - def __init__(self, api_keys: APIKeysResource) -> None: - self._api_keys = api_keys - - self.create = to_raw_response_wrapper( - api_keys.create, - ) - self.update = to_raw_response_wrapper( - api_keys.update, - ) - self.list = to_raw_response_wrapper( - api_keys.list, - ) - self.delete = to_raw_response_wrapper( - api_keys.delete, - ) - self.update_regenerate = to_raw_response_wrapper( - api_keys.update_regenerate, - ) - - -class AsyncAPIKeysResourceWithRawResponse: - def __init__(self, api_keys: AsyncAPIKeysResource) -> None: - self._api_keys = api_keys - - self.create = async_to_raw_response_wrapper( - api_keys.create, - ) - self.update = async_to_raw_response_wrapper( - api_keys.update, - ) - self.list = async_to_raw_response_wrapper( - api_keys.list, - ) - self.delete = async_to_raw_response_wrapper( - api_keys.delete, - ) - self.update_regenerate = async_to_raw_response_wrapper( - api_keys.update_regenerate, - ) - - -class APIKeysResourceWithStreamingResponse: - def __init__(self, api_keys: APIKeysResource) -> None: - self._api_keys = api_keys - - self.create = to_streamed_response_wrapper( - api_keys.create, - ) - self.update = to_streamed_response_wrapper( - api_keys.update, - ) - self.list = to_streamed_response_wrapper( - api_keys.list, - ) - self.delete = to_streamed_response_wrapper( - api_keys.delete, - ) - self.update_regenerate = to_streamed_response_wrapper( - api_keys.update_regenerate, - ) - - -class AsyncAPIKeysResourceWithStreamingResponse: - def __init__(self, api_keys: AsyncAPIKeysResource) -> None: - self._api_keys = api_keys - - self.create = async_to_streamed_response_wrapper( - api_keys.create, - ) - self.update = async_to_streamed_response_wrapper( - api_keys.update, - ) - self.list = async_to_streamed_response_wrapper( - api_keys.list, - ) - self.delete = async_to_streamed_response_wrapper( - api_keys.delete, - ) - self.update_regenerate = async_to_streamed_response_wrapper( - api_keys.update_regenerate, - ) diff --git a/src/digitalocean_genai_sdk/resources/auth/agents/__init__.py b/src/digitalocean_genai_sdk/resources/auth/agents/__init__.py deleted file mode 100644 index 2972198f..00000000 --- a/src/digitalocean_genai_sdk/resources/auth/agents/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .token import ( - TokenResource, - AsyncTokenResource, - TokenResourceWithRawResponse, - AsyncTokenResourceWithRawResponse, - TokenResourceWithStreamingResponse, - AsyncTokenResourceWithStreamingResponse, -) -from .agents import ( - AgentsResource, - AsyncAgentsResource, - AgentsResourceWithRawResponse, - AsyncAgentsResourceWithRawResponse, - AgentsResourceWithStreamingResponse, - AsyncAgentsResourceWithStreamingResponse, -) - -__all__ = [ - "TokenResource", - "AsyncTokenResource", - "TokenResourceWithRawResponse", - "AsyncTokenResourceWithRawResponse", - "TokenResourceWithStreamingResponse", - "AsyncTokenResourceWithStreamingResponse", - "AgentsResource", - "AsyncAgentsResource", - "AgentsResourceWithRawResponse", - "AsyncAgentsResourceWithRawResponse", - "AgentsResourceWithStreamingResponse", - "AsyncAgentsResourceWithStreamingResponse", -] diff --git a/src/digitalocean_genai_sdk/resources/auth/agents/token.py b/src/digitalocean_genai_sdk/resources/auth/agents/token.py deleted file mode 100644 index 73ecef05..00000000 --- a/src/digitalocean_genai_sdk/resources/auth/agents/token.py +++ /dev/null @@ -1,173 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import httpx - -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ...._utils import maybe_transform, async_maybe_transform -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource -from ...._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ...._base_client import make_request_options -from ....types.auth.agents import token_create_params -from ....types.auth.agents.token_create_response import TokenCreateResponse - -__all__ = ["TokenResource", "AsyncTokenResource"] - - -class TokenResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> TokenResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return TokenResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> TokenResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return TokenResourceWithStreamingResponse(self) - - def create( - self, - path_agent_uuid: str, - *, - body_agent_uuid: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> TokenCreateResponse: - """ - To issue an agent token, send a POST request to - `/v2/gen-ai/auth/agents/{agent_uuid}/token`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_agent_uuid: - raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") - return self._post( - f"/v2/genai/auth/agents/{path_agent_uuid}/token", - body=maybe_transform({"body_agent_uuid": body_agent_uuid}, token_create_params.TokenCreateParams), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=TokenCreateResponse, - ) - - -class AsyncTokenResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncTokenResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncTokenResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncTokenResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncTokenResourceWithStreamingResponse(self) - - async def create( - self, - path_agent_uuid: str, - *, - body_agent_uuid: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> TokenCreateResponse: - """ - To issue an agent token, send a POST request to - `/v2/gen-ai/auth/agents/{agent_uuid}/token`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_agent_uuid: - raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") - return await self._post( - f"/v2/genai/auth/agents/{path_agent_uuid}/token", - body=await async_maybe_transform( - {"body_agent_uuid": body_agent_uuid}, token_create_params.TokenCreateParams - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=TokenCreateResponse, - ) - - -class TokenResourceWithRawResponse: - def __init__(self, token: TokenResource) -> None: - self._token = token - - self.create = to_raw_response_wrapper( - token.create, - ) - - -class AsyncTokenResourceWithRawResponse: - def __init__(self, token: AsyncTokenResource) -> None: - self._token = token - - self.create = async_to_raw_response_wrapper( - token.create, - ) - - -class TokenResourceWithStreamingResponse: - def __init__(self, token: TokenResource) -> None: - self._token = token - - self.create = to_streamed_response_wrapper( - token.create, - ) - - -class AsyncTokenResourceWithStreamingResponse: - def __init__(self, token: AsyncTokenResource) -> None: - self._token = token - - self.create = async_to_streamed_response_wrapper( - token.create, - ) diff --git a/src/digitalocean_genai_sdk/resources/auth/auth.py b/src/digitalocean_genai_sdk/resources/auth/auth.py deleted file mode 100644 index 985fc56c..00000000 --- a/src/digitalocean_genai_sdk/resources/auth/auth.py +++ /dev/null @@ -1,102 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from .agents.agents import ( - AgentsResource, - AsyncAgentsResource, - AgentsResourceWithRawResponse, - AsyncAgentsResourceWithRawResponse, - AgentsResourceWithStreamingResponse, - AsyncAgentsResourceWithStreamingResponse, -) - -__all__ = ["AuthResource", "AsyncAuthResource"] - - -class AuthResource(SyncAPIResource): - @cached_property - def agents(self) -> AgentsResource: - return AgentsResource(self._client) - - @cached_property - def with_raw_response(self) -> AuthResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AuthResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AuthResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AuthResourceWithStreamingResponse(self) - - -class AsyncAuthResource(AsyncAPIResource): - @cached_property - def agents(self) -> AsyncAgentsResource: - return AsyncAgentsResource(self._client) - - @cached_property - def with_raw_response(self) -> AsyncAuthResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncAuthResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncAuthResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncAuthResourceWithStreamingResponse(self) - - -class AuthResourceWithRawResponse: - def __init__(self, auth: AuthResource) -> None: - self._auth = auth - - @cached_property - def agents(self) -> AgentsResourceWithRawResponse: - return AgentsResourceWithRawResponse(self._auth.agents) - - -class AsyncAuthResourceWithRawResponse: - def __init__(self, auth: AsyncAuthResource) -> None: - self._auth = auth - - @cached_property - def agents(self) -> AsyncAgentsResourceWithRawResponse: - return AsyncAgentsResourceWithRawResponse(self._auth.agents) - - -class AuthResourceWithStreamingResponse: - def __init__(self, auth: AuthResource) -> None: - self._auth = auth - - @cached_property - def agents(self) -> AgentsResourceWithStreamingResponse: - return AgentsResourceWithStreamingResponse(self._auth.agents) - - -class AsyncAuthResourceWithStreamingResponse: - def __init__(self, auth: AsyncAuthResource) -> None: - self._auth = auth - - @cached_property - def agents(self) -> AsyncAgentsResourceWithStreamingResponse: - return AsyncAgentsResourceWithStreamingResponse(self._auth.agents) diff --git a/src/digitalocean_genai_sdk/resources/indexing_jobs.py b/src/digitalocean_genai_sdk/resources/indexing_jobs.py deleted file mode 100644 index 7649a7a7..00000000 --- a/src/digitalocean_genai_sdk/resources/indexing_jobs.py +++ /dev/null @@ -1,543 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List - -import httpx - -from ..types import indexing_job_list_params, indexing_job_create_params, indexing_job_update_cancel_params -from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from .._utils import maybe_transform, async_maybe_transform -from .._compat import cached_property -from .._resource import SyncAPIResource, AsyncAPIResource -from .._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from .._base_client import make_request_options -from ..types.indexing_job_list_response import IndexingJobListResponse -from ..types.indexing_job_create_response import IndexingJobCreateResponse -from ..types.indexing_job_retrieve_response import IndexingJobRetrieveResponse -from ..types.indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse -from ..types.indexing_job_retrieve_data_sources_response import IndexingJobRetrieveDataSourcesResponse - -__all__ = ["IndexingJobsResource", "AsyncIndexingJobsResource"] - - -class IndexingJobsResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> IndexingJobsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return IndexingJobsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> IndexingJobsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return IndexingJobsResourceWithStreamingResponse(self) - - def create( - self, - *, - data_source_uuids: List[str] | NotGiven = NOT_GIVEN, - knowledge_base_uuid: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> IndexingJobCreateResponse: - """ - To start an indexing job for a knowledge base, send a POST request to - `/v2/gen-ai/indexing_jobs`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._post( - "/v2/genai/indexing_jobs", - body=maybe_transform( - { - "data_source_uuids": data_source_uuids, - "knowledge_base_uuid": knowledge_base_uuid, - }, - indexing_job_create_params.IndexingJobCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=IndexingJobCreateResponse, - ) - - def retrieve( - self, - uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> IndexingJobRetrieveResponse: - """ - To get status of an indexing Job for a knowledge base, send a GET request to - `/v2/gen-ai/indexing_jobs/{uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not uuid: - raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") - return self._get( - f"/v2/genai/indexing_jobs/{uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=IndexingJobRetrieveResponse, - ) - - def list( - self, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> IndexingJobListResponse: - """ - To list all indexing jobs for a knowledge base, send a GET request to - `/v2/gen-ai/indexing_jobs`. - - Args: - page: page number. - - per_page: items per page. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/v2/genai/indexing_jobs", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - }, - indexing_job_list_params.IndexingJobListParams, - ), - ), - cast_to=IndexingJobListResponse, - ) - - def retrieve_data_sources( - self, - indexing_job_uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> IndexingJobRetrieveDataSourcesResponse: - """ - To list all datasources for an indexing job, send a GET request to - `/v2/gen-ai/indexing_jobs/{indexing_job_uuid}/data_sources`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not indexing_job_uuid: - raise ValueError(f"Expected a non-empty value for `indexing_job_uuid` but received {indexing_job_uuid!r}") - return self._get( - f"/v2/genai/indexing_jobs/{indexing_job_uuid}/data_sources", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=IndexingJobRetrieveDataSourcesResponse, - ) - - def update_cancel( - self, - path_uuid: str, - *, - body_uuid: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> IndexingJobUpdateCancelResponse: - """ - To cancel an indexing job for a knowledge base, send a PUT request to - `/v2/gen-ai/indexing_jobs/{uuid}/cancel`. - - Args: - body_uuid: A unique identifier for an indexing job. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_uuid: - raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") - return self._put( - f"/v2/genai/indexing_jobs/{path_uuid}/cancel", - body=maybe_transform( - {"body_uuid": body_uuid}, indexing_job_update_cancel_params.IndexingJobUpdateCancelParams - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=IndexingJobUpdateCancelResponse, - ) - - -class AsyncIndexingJobsResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncIndexingJobsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncIndexingJobsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncIndexingJobsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncIndexingJobsResourceWithStreamingResponse(self) - - async def create( - self, - *, - data_source_uuids: List[str] | NotGiven = NOT_GIVEN, - knowledge_base_uuid: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> IndexingJobCreateResponse: - """ - To start an indexing job for a knowledge base, send a POST request to - `/v2/gen-ai/indexing_jobs`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._post( - "/v2/genai/indexing_jobs", - body=await async_maybe_transform( - { - "data_source_uuids": data_source_uuids, - "knowledge_base_uuid": knowledge_base_uuid, - }, - indexing_job_create_params.IndexingJobCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=IndexingJobCreateResponse, - ) - - async def retrieve( - self, - uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> IndexingJobRetrieveResponse: - """ - To get status of an indexing Job for a knowledge base, send a GET request to - `/v2/gen-ai/indexing_jobs/{uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not uuid: - raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") - return await self._get( - f"/v2/genai/indexing_jobs/{uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=IndexingJobRetrieveResponse, - ) - - async def list( - self, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> IndexingJobListResponse: - """ - To list all indexing jobs for a knowledge base, send a GET request to - `/v2/gen-ai/indexing_jobs`. - - Args: - page: page number. - - per_page: items per page. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/v2/genai/indexing_jobs", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - }, - indexing_job_list_params.IndexingJobListParams, - ), - ), - cast_to=IndexingJobListResponse, - ) - - async def retrieve_data_sources( - self, - indexing_job_uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> IndexingJobRetrieveDataSourcesResponse: - """ - To list all datasources for an indexing job, send a GET request to - `/v2/gen-ai/indexing_jobs/{indexing_job_uuid}/data_sources`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not indexing_job_uuid: - raise ValueError(f"Expected a non-empty value for `indexing_job_uuid` but received {indexing_job_uuid!r}") - return await self._get( - f"/v2/genai/indexing_jobs/{indexing_job_uuid}/data_sources", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=IndexingJobRetrieveDataSourcesResponse, - ) - - async def update_cancel( - self, - path_uuid: str, - *, - body_uuid: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> IndexingJobUpdateCancelResponse: - """ - To cancel an indexing job for a knowledge base, send a PUT request to - `/v2/gen-ai/indexing_jobs/{uuid}/cancel`. - - Args: - body_uuid: A unique identifier for an indexing job. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_uuid: - raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") - return await self._put( - f"/v2/genai/indexing_jobs/{path_uuid}/cancel", - body=await async_maybe_transform( - {"body_uuid": body_uuid}, indexing_job_update_cancel_params.IndexingJobUpdateCancelParams - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=IndexingJobUpdateCancelResponse, - ) - - -class IndexingJobsResourceWithRawResponse: - def __init__(self, indexing_jobs: IndexingJobsResource) -> None: - self._indexing_jobs = indexing_jobs - - self.create = to_raw_response_wrapper( - indexing_jobs.create, - ) - self.retrieve = to_raw_response_wrapper( - indexing_jobs.retrieve, - ) - self.list = to_raw_response_wrapper( - indexing_jobs.list, - ) - self.retrieve_data_sources = to_raw_response_wrapper( - indexing_jobs.retrieve_data_sources, - ) - self.update_cancel = to_raw_response_wrapper( - indexing_jobs.update_cancel, - ) - - -class AsyncIndexingJobsResourceWithRawResponse: - def __init__(self, indexing_jobs: AsyncIndexingJobsResource) -> None: - self._indexing_jobs = indexing_jobs - - self.create = async_to_raw_response_wrapper( - indexing_jobs.create, - ) - self.retrieve = async_to_raw_response_wrapper( - indexing_jobs.retrieve, - ) - self.list = async_to_raw_response_wrapper( - indexing_jobs.list, - ) - self.retrieve_data_sources = async_to_raw_response_wrapper( - indexing_jobs.retrieve_data_sources, - ) - self.update_cancel = async_to_raw_response_wrapper( - indexing_jobs.update_cancel, - ) - - -class IndexingJobsResourceWithStreamingResponse: - def __init__(self, indexing_jobs: IndexingJobsResource) -> None: - self._indexing_jobs = indexing_jobs - - self.create = to_streamed_response_wrapper( - indexing_jobs.create, - ) - self.retrieve = to_streamed_response_wrapper( - indexing_jobs.retrieve, - ) - self.list = to_streamed_response_wrapper( - indexing_jobs.list, - ) - self.retrieve_data_sources = to_streamed_response_wrapper( - indexing_jobs.retrieve_data_sources, - ) - self.update_cancel = to_streamed_response_wrapper( - indexing_jobs.update_cancel, - ) - - -class AsyncIndexingJobsResourceWithStreamingResponse: - def __init__(self, indexing_jobs: AsyncIndexingJobsResource) -> None: - self._indexing_jobs = indexing_jobs - - self.create = async_to_streamed_response_wrapper( - indexing_jobs.create, - ) - self.retrieve = async_to_streamed_response_wrapper( - indexing_jobs.retrieve, - ) - self.list = async_to_streamed_response_wrapper( - indexing_jobs.list, - ) - self.retrieve_data_sources = async_to_streamed_response_wrapper( - indexing_jobs.retrieve_data_sources, - ) - self.update_cancel = async_to_streamed_response_wrapper( - indexing_jobs.update_cancel, - ) diff --git a/src/digitalocean_genai_sdk/resources/knowledge_bases/__init__.py b/src/digitalocean_genai_sdk/resources/knowledge_bases/__init__.py deleted file mode 100644 index 03d143e2..00000000 --- a/src/digitalocean_genai_sdk/resources/knowledge_bases/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .data_sources import ( - DataSourcesResource, - AsyncDataSourcesResource, - DataSourcesResourceWithRawResponse, - AsyncDataSourcesResourceWithRawResponse, - DataSourcesResourceWithStreamingResponse, - AsyncDataSourcesResourceWithStreamingResponse, -) -from .knowledge_bases import ( - KnowledgeBasesResource, - AsyncKnowledgeBasesResource, - KnowledgeBasesResourceWithRawResponse, - AsyncKnowledgeBasesResourceWithRawResponse, - KnowledgeBasesResourceWithStreamingResponse, - AsyncKnowledgeBasesResourceWithStreamingResponse, -) - -__all__ = [ - "DataSourcesResource", - "AsyncDataSourcesResource", - "DataSourcesResourceWithRawResponse", - "AsyncDataSourcesResourceWithRawResponse", - "DataSourcesResourceWithStreamingResponse", - "AsyncDataSourcesResourceWithStreamingResponse", - "KnowledgeBasesResource", - "AsyncKnowledgeBasesResource", - "KnowledgeBasesResourceWithRawResponse", - "AsyncKnowledgeBasesResourceWithRawResponse", - "KnowledgeBasesResourceWithStreamingResponse", - "AsyncKnowledgeBasesResourceWithStreamingResponse", -] diff --git a/src/digitalocean_genai_sdk/resources/knowledge_bases/data_sources.py b/src/digitalocean_genai_sdk/resources/knowledge_bases/data_sources.py deleted file mode 100644 index b8a29c4a..00000000 --- a/src/digitalocean_genai_sdk/resources/knowledge_bases/data_sources.py +++ /dev/null @@ -1,410 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import httpx - -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._utils import maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ..._base_client import make_request_options -from ...types.knowledge_bases import ( - data_source_list_params, - data_source_create_params, -) -from ...types.knowledge_bases.data_source_list_response import DataSourceListResponse -from ...types.knowledge_bases.data_source_create_response import DataSourceCreateResponse -from ...types.knowledge_bases.data_source_delete_response import DataSourceDeleteResponse -from ...types.knowledge_bases.api_spaces_data_source_param import APISpacesDataSourceParam -from ...types.knowledge_bases.api_web_crawler_data_source_param import APIWebCrawlerDataSourceParam - -__all__ = ["DataSourcesResource", "AsyncDataSourcesResource"] - - -class DataSourcesResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> DataSourcesResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return DataSourcesResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> DataSourcesResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return DataSourcesResourceWithStreamingResponse(self) - - def create( - self, - path_knowledge_base_uuid: str, - *, - aws_data_source: data_source_create_params.AwsDataSource | NotGiven = NOT_GIVEN, - body_knowledge_base_uuid: str | NotGiven = NOT_GIVEN, - spaces_data_source: APISpacesDataSourceParam | NotGiven = NOT_GIVEN, - web_crawler_data_source: APIWebCrawlerDataSourceParam | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DataSourceCreateResponse: - """ - To add a data source to a knowledge base, send a POST request to - `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_knowledge_base_uuid: - raise ValueError( - f"Expected a non-empty value for `path_knowledge_base_uuid` but received {path_knowledge_base_uuid!r}" - ) - return self._post( - f"/v2/genai/knowledge_bases/{path_knowledge_base_uuid}/data_sources", - body=maybe_transform( - { - "aws_data_source": aws_data_source, - "body_knowledge_base_uuid": body_knowledge_base_uuid, - "spaces_data_source": spaces_data_source, - "web_crawler_data_source": web_crawler_data_source, - }, - data_source_create_params.DataSourceCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=DataSourceCreateResponse, - ) - - def list( - self, - knowledge_base_uuid: str, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DataSourceListResponse: - """ - To list all data sources for a knowledge base, send a GET request to - `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources`. - - Args: - page: page number. - - per_page: items per page. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not knowledge_base_uuid: - raise ValueError( - f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" - ) - return self._get( - f"/v2/genai/knowledge_bases/{knowledge_base_uuid}/data_sources", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - }, - data_source_list_params.DataSourceListParams, - ), - ), - cast_to=DataSourceListResponse, - ) - - def delete( - self, - data_source_uuid: str, - *, - knowledge_base_uuid: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DataSourceDeleteResponse: - """ - To delete a data source from a knowledge base, send a DELETE request to - `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources/{data_source_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not knowledge_base_uuid: - raise ValueError( - f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" - ) - if not data_source_uuid: - raise ValueError(f"Expected a non-empty value for `data_source_uuid` but received {data_source_uuid!r}") - return self._delete( - f"/v2/genai/knowledge_bases/{knowledge_base_uuid}/data_sources/{data_source_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=DataSourceDeleteResponse, - ) - - -class AsyncDataSourcesResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncDataSourcesResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncDataSourcesResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncDataSourcesResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncDataSourcesResourceWithStreamingResponse(self) - - async def create( - self, - path_knowledge_base_uuid: str, - *, - aws_data_source: data_source_create_params.AwsDataSource | NotGiven = NOT_GIVEN, - body_knowledge_base_uuid: str | NotGiven = NOT_GIVEN, - spaces_data_source: APISpacesDataSourceParam | NotGiven = NOT_GIVEN, - web_crawler_data_source: APIWebCrawlerDataSourceParam | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DataSourceCreateResponse: - """ - To add a data source to a knowledge base, send a POST request to - `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_knowledge_base_uuid: - raise ValueError( - f"Expected a non-empty value for `path_knowledge_base_uuid` but received {path_knowledge_base_uuid!r}" - ) - return await self._post( - f"/v2/genai/knowledge_bases/{path_knowledge_base_uuid}/data_sources", - body=await async_maybe_transform( - { - "aws_data_source": aws_data_source, - "body_knowledge_base_uuid": body_knowledge_base_uuid, - "spaces_data_source": spaces_data_source, - "web_crawler_data_source": web_crawler_data_source, - }, - data_source_create_params.DataSourceCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=DataSourceCreateResponse, - ) - - async def list( - self, - knowledge_base_uuid: str, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DataSourceListResponse: - """ - To list all data sources for a knowledge base, send a GET request to - `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources`. - - Args: - page: page number. - - per_page: items per page. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not knowledge_base_uuid: - raise ValueError( - f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" - ) - return await self._get( - f"/v2/genai/knowledge_bases/{knowledge_base_uuid}/data_sources", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - }, - data_source_list_params.DataSourceListParams, - ), - ), - cast_to=DataSourceListResponse, - ) - - async def delete( - self, - data_source_uuid: str, - *, - knowledge_base_uuid: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DataSourceDeleteResponse: - """ - To delete a data source from a knowledge base, send a DELETE request to - `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources/{data_source_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not knowledge_base_uuid: - raise ValueError( - f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" - ) - if not data_source_uuid: - raise ValueError(f"Expected a non-empty value for `data_source_uuid` but received {data_source_uuid!r}") - return await self._delete( - f"/v2/genai/knowledge_bases/{knowledge_base_uuid}/data_sources/{data_source_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=DataSourceDeleteResponse, - ) - - -class DataSourcesResourceWithRawResponse: - def __init__(self, data_sources: DataSourcesResource) -> None: - self._data_sources = data_sources - - self.create = to_raw_response_wrapper( - data_sources.create, - ) - self.list = to_raw_response_wrapper( - data_sources.list, - ) - self.delete = to_raw_response_wrapper( - data_sources.delete, - ) - - -class AsyncDataSourcesResourceWithRawResponse: - def __init__(self, data_sources: AsyncDataSourcesResource) -> None: - self._data_sources = data_sources - - self.create = async_to_raw_response_wrapper( - data_sources.create, - ) - self.list = async_to_raw_response_wrapper( - data_sources.list, - ) - self.delete = async_to_raw_response_wrapper( - data_sources.delete, - ) - - -class DataSourcesResourceWithStreamingResponse: - def __init__(self, data_sources: DataSourcesResource) -> None: - self._data_sources = data_sources - - self.create = to_streamed_response_wrapper( - data_sources.create, - ) - self.list = to_streamed_response_wrapper( - data_sources.list, - ) - self.delete = to_streamed_response_wrapper( - data_sources.delete, - ) - - -class AsyncDataSourcesResourceWithStreamingResponse: - def __init__(self, data_sources: AsyncDataSourcesResource) -> None: - self._data_sources = data_sources - - self.create = async_to_streamed_response_wrapper( - data_sources.create, - ) - self.list = async_to_streamed_response_wrapper( - data_sources.list, - ) - self.delete = async_to_streamed_response_wrapper( - data_sources.delete, - ) diff --git a/src/digitalocean_genai_sdk/resources/knowledge_bases/knowledge_bases.py b/src/digitalocean_genai_sdk/resources/knowledge_bases/knowledge_bases.py deleted file mode 100644 index 713aca63..00000000 --- a/src/digitalocean_genai_sdk/resources/knowledge_bases/knowledge_bases.py +++ /dev/null @@ -1,667 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List, Iterable - -import httpx - -from ...types import knowledge_base_list_params, knowledge_base_create_params, knowledge_base_update_params -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._utils import maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from .data_sources import ( - DataSourcesResource, - AsyncDataSourcesResource, - DataSourcesResourceWithRawResponse, - AsyncDataSourcesResourceWithRawResponse, - DataSourcesResourceWithStreamingResponse, - AsyncDataSourcesResourceWithStreamingResponse, -) -from ..._base_client import make_request_options -from ...types.knowledge_base_list_response import KnowledgeBaseListResponse -from ...types.knowledge_base_create_response import KnowledgeBaseCreateResponse -from ...types.knowledge_base_delete_response import KnowledgeBaseDeleteResponse -from ...types.knowledge_base_update_response import KnowledgeBaseUpdateResponse -from ...types.knowledge_base_retrieve_response import KnowledgeBaseRetrieveResponse - -__all__ = ["KnowledgeBasesResource", "AsyncKnowledgeBasesResource"] - - -class KnowledgeBasesResource(SyncAPIResource): - @cached_property - def data_sources(self) -> DataSourcesResource: - return DataSourcesResource(self._client) - - @cached_property - def with_raw_response(self) -> KnowledgeBasesResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return KnowledgeBasesResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> KnowledgeBasesResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return KnowledgeBasesResourceWithStreamingResponse(self) - - def create( - self, - *, - database_id: str | NotGiven = NOT_GIVEN, - datasources: Iterable[knowledge_base_create_params.Datasource] | NotGiven = NOT_GIVEN, - embedding_model_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - region: str | NotGiven = NOT_GIVEN, - tags: List[str] | NotGiven = NOT_GIVEN, - vpc_uuid: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KnowledgeBaseCreateResponse: - """ - To create a knowledge base, send a POST request to `/v2/gen-ai/knowledge_bases`. - - Args: - database_id: Identifier of the DigitalOcean OpenSearch database this knowledge base will use, - optional. If not provided, we create a new database for the knowledge base in - the same region as the knowledge base. - - datasources: The data sources to use for this knowledge base. See - [Organize Data Sources](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#spaces-buckets) - for more information on data sources best practices. - - embedding_model_uuid: Identifier for the - [embedding model](https://docs.digitalocean.com/products/genai-platform/details/models/#embedding-models). - - name: Name of the knowledge base. - - project_id: Identifier of the DigitalOcean project this knowledge base will belong to. - - region: The datacenter region to deploy the knowledge base in. - - tags: Tags to organize your knowledge base. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._post( - "/v2/genai/knowledge_bases", - body=maybe_transform( - { - "database_id": database_id, - "datasources": datasources, - "embedding_model_uuid": embedding_model_uuid, - "name": name, - "project_id": project_id, - "region": region, - "tags": tags, - "vpc_uuid": vpc_uuid, - }, - knowledge_base_create_params.KnowledgeBaseCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KnowledgeBaseCreateResponse, - ) - - def retrieve( - self, - uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KnowledgeBaseRetrieveResponse: - """ - To retrive information about an existing knowledge base, send a GET request to - `/v2/gen-ai/knowledge_bases/{uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not uuid: - raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") - return self._get( - f"/v2/genai/knowledge_bases/{uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KnowledgeBaseRetrieveResponse, - ) - - def update( - self, - path_uuid: str, - *, - database_id: str | NotGiven = NOT_GIVEN, - embedding_model_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - tags: List[str] | NotGiven = NOT_GIVEN, - body_uuid: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KnowledgeBaseUpdateResponse: - """ - To update a knowledge base, send a PUT request to - `/v2/gen-ai/knowledge_bases/{uuid}`. - - Args: - database_id: the id of the DigitalOcean database this knowledge base will use, optiona. - - embedding_model_uuid: Identifier for the foundation model. - - tags: Tags to organize your knowledge base. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_uuid: - raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") - return self._put( - f"/v2/genai/knowledge_bases/{path_uuid}", - body=maybe_transform( - { - "database_id": database_id, - "embedding_model_uuid": embedding_model_uuid, - "name": name, - "project_id": project_id, - "tags": tags, - "body_uuid": body_uuid, - }, - knowledge_base_update_params.KnowledgeBaseUpdateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KnowledgeBaseUpdateResponse, - ) - - def list( - self, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KnowledgeBaseListResponse: - """ - To list all knowledge bases, send a GET request to `/v2/gen-ai/knowledge_bases`. - - Args: - page: page number. - - per_page: items per page. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/v2/genai/knowledge_bases", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - }, - knowledge_base_list_params.KnowledgeBaseListParams, - ), - ), - cast_to=KnowledgeBaseListResponse, - ) - - def delete( - self, - uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KnowledgeBaseDeleteResponse: - """ - To delete a knowledge base, send a DELETE request to - `/v2/gen-ai/knowledge_bases/{uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not uuid: - raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") - return self._delete( - f"/v2/genai/knowledge_bases/{uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KnowledgeBaseDeleteResponse, - ) - - -class AsyncKnowledgeBasesResource(AsyncAPIResource): - @cached_property - def data_sources(self) -> AsyncDataSourcesResource: - return AsyncDataSourcesResource(self._client) - - @cached_property - def with_raw_response(self) -> AsyncKnowledgeBasesResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncKnowledgeBasesResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncKnowledgeBasesResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncKnowledgeBasesResourceWithStreamingResponse(self) - - async def create( - self, - *, - database_id: str | NotGiven = NOT_GIVEN, - datasources: Iterable[knowledge_base_create_params.Datasource] | NotGiven = NOT_GIVEN, - embedding_model_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - region: str | NotGiven = NOT_GIVEN, - tags: List[str] | NotGiven = NOT_GIVEN, - vpc_uuid: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KnowledgeBaseCreateResponse: - """ - To create a knowledge base, send a POST request to `/v2/gen-ai/knowledge_bases`. - - Args: - database_id: Identifier of the DigitalOcean OpenSearch database this knowledge base will use, - optional. If not provided, we create a new database for the knowledge base in - the same region as the knowledge base. - - datasources: The data sources to use for this knowledge base. See - [Organize Data Sources](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#spaces-buckets) - for more information on data sources best practices. - - embedding_model_uuid: Identifier for the - [embedding model](https://docs.digitalocean.com/products/genai-platform/details/models/#embedding-models). - - name: Name of the knowledge base. - - project_id: Identifier of the DigitalOcean project this knowledge base will belong to. - - region: The datacenter region to deploy the knowledge base in. - - tags: Tags to organize your knowledge base. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._post( - "/v2/genai/knowledge_bases", - body=await async_maybe_transform( - { - "database_id": database_id, - "datasources": datasources, - "embedding_model_uuid": embedding_model_uuid, - "name": name, - "project_id": project_id, - "region": region, - "tags": tags, - "vpc_uuid": vpc_uuid, - }, - knowledge_base_create_params.KnowledgeBaseCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KnowledgeBaseCreateResponse, - ) - - async def retrieve( - self, - uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KnowledgeBaseRetrieveResponse: - """ - To retrive information about an existing knowledge base, send a GET request to - `/v2/gen-ai/knowledge_bases/{uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not uuid: - raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") - return await self._get( - f"/v2/genai/knowledge_bases/{uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KnowledgeBaseRetrieveResponse, - ) - - async def update( - self, - path_uuid: str, - *, - database_id: str | NotGiven = NOT_GIVEN, - embedding_model_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - tags: List[str] | NotGiven = NOT_GIVEN, - body_uuid: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KnowledgeBaseUpdateResponse: - """ - To update a knowledge base, send a PUT request to - `/v2/gen-ai/knowledge_bases/{uuid}`. - - Args: - database_id: the id of the DigitalOcean database this knowledge base will use, optiona. - - embedding_model_uuid: Identifier for the foundation model. - - tags: Tags to organize your knowledge base. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_uuid: - raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") - return await self._put( - f"/v2/genai/knowledge_bases/{path_uuid}", - body=await async_maybe_transform( - { - "database_id": database_id, - "embedding_model_uuid": embedding_model_uuid, - "name": name, - "project_id": project_id, - "tags": tags, - "body_uuid": body_uuid, - }, - knowledge_base_update_params.KnowledgeBaseUpdateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KnowledgeBaseUpdateResponse, - ) - - async def list( - self, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KnowledgeBaseListResponse: - """ - To list all knowledge bases, send a GET request to `/v2/gen-ai/knowledge_bases`. - - Args: - page: page number. - - per_page: items per page. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/v2/genai/knowledge_bases", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - }, - knowledge_base_list_params.KnowledgeBaseListParams, - ), - ), - cast_to=KnowledgeBaseListResponse, - ) - - async def delete( - self, - uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KnowledgeBaseDeleteResponse: - """ - To delete a knowledge base, send a DELETE request to - `/v2/gen-ai/knowledge_bases/{uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not uuid: - raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") - return await self._delete( - f"/v2/genai/knowledge_bases/{uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KnowledgeBaseDeleteResponse, - ) - - -class KnowledgeBasesResourceWithRawResponse: - def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None: - self._knowledge_bases = knowledge_bases - - self.create = to_raw_response_wrapper( - knowledge_bases.create, - ) - self.retrieve = to_raw_response_wrapper( - knowledge_bases.retrieve, - ) - self.update = to_raw_response_wrapper( - knowledge_bases.update, - ) - self.list = to_raw_response_wrapper( - knowledge_bases.list, - ) - self.delete = to_raw_response_wrapper( - knowledge_bases.delete, - ) - - @cached_property - def data_sources(self) -> DataSourcesResourceWithRawResponse: - return DataSourcesResourceWithRawResponse(self._knowledge_bases.data_sources) - - -class AsyncKnowledgeBasesResourceWithRawResponse: - def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None: - self._knowledge_bases = knowledge_bases - - self.create = async_to_raw_response_wrapper( - knowledge_bases.create, - ) - self.retrieve = async_to_raw_response_wrapper( - knowledge_bases.retrieve, - ) - self.update = async_to_raw_response_wrapper( - knowledge_bases.update, - ) - self.list = async_to_raw_response_wrapper( - knowledge_bases.list, - ) - self.delete = async_to_raw_response_wrapper( - knowledge_bases.delete, - ) - - @cached_property - def data_sources(self) -> AsyncDataSourcesResourceWithRawResponse: - return AsyncDataSourcesResourceWithRawResponse(self._knowledge_bases.data_sources) - - -class KnowledgeBasesResourceWithStreamingResponse: - def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None: - self._knowledge_bases = knowledge_bases - - self.create = to_streamed_response_wrapper( - knowledge_bases.create, - ) - self.retrieve = to_streamed_response_wrapper( - knowledge_bases.retrieve, - ) - self.update = to_streamed_response_wrapper( - knowledge_bases.update, - ) - self.list = to_streamed_response_wrapper( - knowledge_bases.list, - ) - self.delete = to_streamed_response_wrapper( - knowledge_bases.delete, - ) - - @cached_property - def data_sources(self) -> DataSourcesResourceWithStreamingResponse: - return DataSourcesResourceWithStreamingResponse(self._knowledge_bases.data_sources) - - -class AsyncKnowledgeBasesResourceWithStreamingResponse: - def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None: - self._knowledge_bases = knowledge_bases - - self.create = async_to_streamed_response_wrapper( - knowledge_bases.create, - ) - self.retrieve = async_to_streamed_response_wrapper( - knowledge_bases.retrieve, - ) - self.update = async_to_streamed_response_wrapper( - knowledge_bases.update, - ) - self.list = async_to_streamed_response_wrapper( - knowledge_bases.list, - ) - self.delete = async_to_streamed_response_wrapper( - knowledge_bases.delete, - ) - - @cached_property - def data_sources(self) -> AsyncDataSourcesResourceWithStreamingResponse: - return AsyncDataSourcesResourceWithStreamingResponse(self._knowledge_bases.data_sources) diff --git a/src/digitalocean_genai_sdk/resources/providers/__init__.py b/src/digitalocean_genai_sdk/resources/providers/__init__.py deleted file mode 100644 index 1731e057..00000000 --- a/src/digitalocean_genai_sdk/resources/providers/__init__.py +++ /dev/null @@ -1,47 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .openai import ( - OpenAIResource, - AsyncOpenAIResource, - OpenAIResourceWithRawResponse, - AsyncOpenAIResourceWithRawResponse, - OpenAIResourceWithStreamingResponse, - AsyncOpenAIResourceWithStreamingResponse, -) -from .anthropic import ( - AnthropicResource, - AsyncAnthropicResource, - AnthropicResourceWithRawResponse, - AsyncAnthropicResourceWithRawResponse, - AnthropicResourceWithStreamingResponse, - AsyncAnthropicResourceWithStreamingResponse, -) -from .providers import ( - ProvidersResource, - AsyncProvidersResource, - ProvidersResourceWithRawResponse, - AsyncProvidersResourceWithRawResponse, - ProvidersResourceWithStreamingResponse, - AsyncProvidersResourceWithStreamingResponse, -) - -__all__ = [ - "AnthropicResource", - "AsyncAnthropicResource", - "AnthropicResourceWithRawResponse", - "AsyncAnthropicResourceWithRawResponse", - "AnthropicResourceWithStreamingResponse", - "AsyncAnthropicResourceWithStreamingResponse", - "OpenAIResource", - "AsyncOpenAIResource", - "OpenAIResourceWithRawResponse", - "AsyncOpenAIResourceWithRawResponse", - "OpenAIResourceWithStreamingResponse", - "AsyncOpenAIResourceWithStreamingResponse", - "ProvidersResource", - "AsyncProvidersResource", - "ProvidersResourceWithRawResponse", - "AsyncProvidersResourceWithRawResponse", - "ProvidersResourceWithStreamingResponse", - "AsyncProvidersResourceWithStreamingResponse", -] diff --git a/src/digitalocean_genai_sdk/resources/providers/anthropic/__init__.py b/src/digitalocean_genai_sdk/resources/providers/anthropic/__init__.py deleted file mode 100644 index 057a3a2f..00000000 --- a/src/digitalocean_genai_sdk/resources/providers/anthropic/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .keys import ( - KeysResource, - AsyncKeysResource, - KeysResourceWithRawResponse, - AsyncKeysResourceWithRawResponse, - KeysResourceWithStreamingResponse, - AsyncKeysResourceWithStreamingResponse, -) -from .anthropic import ( - AnthropicResource, - AsyncAnthropicResource, - AnthropicResourceWithRawResponse, - AsyncAnthropicResourceWithRawResponse, - AnthropicResourceWithStreamingResponse, - AsyncAnthropicResourceWithStreamingResponse, -) - -__all__ = [ - "KeysResource", - "AsyncKeysResource", - "KeysResourceWithRawResponse", - "AsyncKeysResourceWithRawResponse", - "KeysResourceWithStreamingResponse", - "AsyncKeysResourceWithStreamingResponse", - "AnthropicResource", - "AsyncAnthropicResource", - "AnthropicResourceWithRawResponse", - "AsyncAnthropicResourceWithRawResponse", - "AnthropicResourceWithStreamingResponse", - "AsyncAnthropicResourceWithStreamingResponse", -] diff --git a/src/digitalocean_genai_sdk/resources/providers/anthropic/anthropic.py b/src/digitalocean_genai_sdk/resources/providers/anthropic/anthropic.py deleted file mode 100644 index 64783563..00000000 --- a/src/digitalocean_genai_sdk/resources/providers/anthropic/anthropic.py +++ /dev/null @@ -1,102 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from .keys import ( - KeysResource, - AsyncKeysResource, - KeysResourceWithRawResponse, - AsyncKeysResourceWithRawResponse, - KeysResourceWithStreamingResponse, - AsyncKeysResourceWithStreamingResponse, -) -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource - -__all__ = ["AnthropicResource", "AsyncAnthropicResource"] - - -class AnthropicResource(SyncAPIResource): - @cached_property - def keys(self) -> KeysResource: - return KeysResource(self._client) - - @cached_property - def with_raw_response(self) -> AnthropicResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AnthropicResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AnthropicResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AnthropicResourceWithStreamingResponse(self) - - -class AsyncAnthropicResource(AsyncAPIResource): - @cached_property - def keys(self) -> AsyncKeysResource: - return AsyncKeysResource(self._client) - - @cached_property - def with_raw_response(self) -> AsyncAnthropicResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncAnthropicResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncAnthropicResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncAnthropicResourceWithStreamingResponse(self) - - -class AnthropicResourceWithRawResponse: - def __init__(self, anthropic: AnthropicResource) -> None: - self._anthropic = anthropic - - @cached_property - def keys(self) -> KeysResourceWithRawResponse: - return KeysResourceWithRawResponse(self._anthropic.keys) - - -class AsyncAnthropicResourceWithRawResponse: - def __init__(self, anthropic: AsyncAnthropicResource) -> None: - self._anthropic = anthropic - - @cached_property - def keys(self) -> AsyncKeysResourceWithRawResponse: - return AsyncKeysResourceWithRawResponse(self._anthropic.keys) - - -class AnthropicResourceWithStreamingResponse: - def __init__(self, anthropic: AnthropicResource) -> None: - self._anthropic = anthropic - - @cached_property - def keys(self) -> KeysResourceWithStreamingResponse: - return KeysResourceWithStreamingResponse(self._anthropic.keys) - - -class AsyncAnthropicResourceWithStreamingResponse: - def __init__(self, anthropic: AsyncAnthropicResource) -> None: - self._anthropic = anthropic - - @cached_property - def keys(self) -> AsyncKeysResourceWithStreamingResponse: - return AsyncKeysResourceWithStreamingResponse(self._anthropic.keys) diff --git a/src/digitalocean_genai_sdk/resources/providers/anthropic/keys.py b/src/digitalocean_genai_sdk/resources/providers/anthropic/keys.py deleted file mode 100644 index 1f65a5ab..00000000 --- a/src/digitalocean_genai_sdk/resources/providers/anthropic/keys.py +++ /dev/null @@ -1,662 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import httpx - -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ...._utils import maybe_transform, async_maybe_transform -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource -from ...._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ...._base_client import make_request_options -from ....types.providers.anthropic import key_list_params, key_create_params, key_update_params, key_list_agents_params -from ....types.providers.anthropic.key_list_response import KeyListResponse -from ....types.providers.anthropic.key_create_response import KeyCreateResponse -from ....types.providers.anthropic.key_delete_response import KeyDeleteResponse -from ....types.providers.anthropic.key_update_response import KeyUpdateResponse -from ....types.providers.anthropic.key_retrieve_response import KeyRetrieveResponse -from ....types.providers.anthropic.key_list_agents_response import KeyListAgentsResponse - -__all__ = ["KeysResource", "AsyncKeysResource"] - - -class KeysResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> KeysResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return KeysResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> KeysResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return KeysResourceWithStreamingResponse(self) - - def create( - self, - *, - api_key: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyCreateResponse: - """ - To create an Anthropic API key, send a POST request to - `/v2/gen-ai/anthropic/keys`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._post( - "/v2/genai/anthropic/keys", - body=maybe_transform( - { - "api_key": api_key, - "name": name, - }, - key_create_params.KeyCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyCreateResponse, - ) - - def retrieve( - self, - api_key_uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyRetrieveResponse: - """ - To retrieve details of an Anthropic API key, send a GET request to - `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not api_key_uuid: - raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") - return self._get( - f"/v2/genai/anthropic/keys/{api_key_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyRetrieveResponse, - ) - - def update( - self, - path_api_key_uuid: str, - *, - api_key: str | NotGiven = NOT_GIVEN, - body_api_key_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyUpdateResponse: - """ - To update an Anthropic API key, send a PUT request to - `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_api_key_uuid: - raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") - return self._put( - f"/v2/genai/anthropic/keys/{path_api_key_uuid}", - body=maybe_transform( - { - "api_key": api_key, - "body_api_key_uuid": body_api_key_uuid, - "name": name, - }, - key_update_params.KeyUpdateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyUpdateResponse, - ) - - def list( - self, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyListResponse: - """ - To list all Anthropic API keys, send a GET request to - `/v2/gen-ai/anthropic/keys`. - - Args: - page: page number. - - per_page: items per page. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/v2/genai/anthropic/keys", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - }, - key_list_params.KeyListParams, - ), - ), - cast_to=KeyListResponse, - ) - - def delete( - self, - api_key_uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyDeleteResponse: - """ - To delete an Anthropic API key, send a DELETE request to - `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not api_key_uuid: - raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") - return self._delete( - f"/v2/genai/anthropic/keys/{api_key_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyDeleteResponse, - ) - - def list_agents( - self, - uuid: str, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyListAgentsResponse: - """ - List Agents by Anthropic Key. - - Args: - page: page number. - - per_page: items per page. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not uuid: - raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") - return self._get( - f"/v2/genai/anthropic/keys/{uuid}/agents", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - }, - key_list_agents_params.KeyListAgentsParams, - ), - ), - cast_to=KeyListAgentsResponse, - ) - - -class AsyncKeysResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncKeysResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncKeysResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncKeysResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncKeysResourceWithStreamingResponse(self) - - async def create( - self, - *, - api_key: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyCreateResponse: - """ - To create an Anthropic API key, send a POST request to - `/v2/gen-ai/anthropic/keys`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._post( - "/v2/genai/anthropic/keys", - body=await async_maybe_transform( - { - "api_key": api_key, - "name": name, - }, - key_create_params.KeyCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyCreateResponse, - ) - - async def retrieve( - self, - api_key_uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyRetrieveResponse: - """ - To retrieve details of an Anthropic API key, send a GET request to - `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not api_key_uuid: - raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") - return await self._get( - f"/v2/genai/anthropic/keys/{api_key_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyRetrieveResponse, - ) - - async def update( - self, - path_api_key_uuid: str, - *, - api_key: str | NotGiven = NOT_GIVEN, - body_api_key_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyUpdateResponse: - """ - To update an Anthropic API key, send a PUT request to - `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_api_key_uuid: - raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") - return await self._put( - f"/v2/genai/anthropic/keys/{path_api_key_uuid}", - body=await async_maybe_transform( - { - "api_key": api_key, - "body_api_key_uuid": body_api_key_uuid, - "name": name, - }, - key_update_params.KeyUpdateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyUpdateResponse, - ) - - async def list( - self, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyListResponse: - """ - To list all Anthropic API keys, send a GET request to - `/v2/gen-ai/anthropic/keys`. - - Args: - page: page number. - - per_page: items per page. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/v2/genai/anthropic/keys", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - }, - key_list_params.KeyListParams, - ), - ), - cast_to=KeyListResponse, - ) - - async def delete( - self, - api_key_uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyDeleteResponse: - """ - To delete an Anthropic API key, send a DELETE request to - `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not api_key_uuid: - raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") - return await self._delete( - f"/v2/genai/anthropic/keys/{api_key_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyDeleteResponse, - ) - - async def list_agents( - self, - uuid: str, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyListAgentsResponse: - """ - List Agents by Anthropic Key. - - Args: - page: page number. - - per_page: items per page. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not uuid: - raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") - return await self._get( - f"/v2/genai/anthropic/keys/{uuid}/agents", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - }, - key_list_agents_params.KeyListAgentsParams, - ), - ), - cast_to=KeyListAgentsResponse, - ) - - -class KeysResourceWithRawResponse: - def __init__(self, keys: KeysResource) -> None: - self._keys = keys - - self.create = to_raw_response_wrapper( - keys.create, - ) - self.retrieve = to_raw_response_wrapper( - keys.retrieve, - ) - self.update = to_raw_response_wrapper( - keys.update, - ) - self.list = to_raw_response_wrapper( - keys.list, - ) - self.delete = to_raw_response_wrapper( - keys.delete, - ) - self.list_agents = to_raw_response_wrapper( - keys.list_agents, - ) - - -class AsyncKeysResourceWithRawResponse: - def __init__(self, keys: AsyncKeysResource) -> None: - self._keys = keys - - self.create = async_to_raw_response_wrapper( - keys.create, - ) - self.retrieve = async_to_raw_response_wrapper( - keys.retrieve, - ) - self.update = async_to_raw_response_wrapper( - keys.update, - ) - self.list = async_to_raw_response_wrapper( - keys.list, - ) - self.delete = async_to_raw_response_wrapper( - keys.delete, - ) - self.list_agents = async_to_raw_response_wrapper( - keys.list_agents, - ) - - -class KeysResourceWithStreamingResponse: - def __init__(self, keys: KeysResource) -> None: - self._keys = keys - - self.create = to_streamed_response_wrapper( - keys.create, - ) - self.retrieve = to_streamed_response_wrapper( - keys.retrieve, - ) - self.update = to_streamed_response_wrapper( - keys.update, - ) - self.list = to_streamed_response_wrapper( - keys.list, - ) - self.delete = to_streamed_response_wrapper( - keys.delete, - ) - self.list_agents = to_streamed_response_wrapper( - keys.list_agents, - ) - - -class AsyncKeysResourceWithStreamingResponse: - def __init__(self, keys: AsyncKeysResource) -> None: - self._keys = keys - - self.create = async_to_streamed_response_wrapper( - keys.create, - ) - self.retrieve = async_to_streamed_response_wrapper( - keys.retrieve, - ) - self.update = async_to_streamed_response_wrapper( - keys.update, - ) - self.list = async_to_streamed_response_wrapper( - keys.list, - ) - self.delete = async_to_streamed_response_wrapper( - keys.delete, - ) - self.list_agents = async_to_streamed_response_wrapper( - keys.list_agents, - ) diff --git a/src/digitalocean_genai_sdk/resources/providers/openai/__init__.py b/src/digitalocean_genai_sdk/resources/providers/openai/__init__.py deleted file mode 100644 index 66d8ca7a..00000000 --- a/src/digitalocean_genai_sdk/resources/providers/openai/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .keys import ( - KeysResource, - AsyncKeysResource, - KeysResourceWithRawResponse, - AsyncKeysResourceWithRawResponse, - KeysResourceWithStreamingResponse, - AsyncKeysResourceWithStreamingResponse, -) -from .openai import ( - OpenAIResource, - AsyncOpenAIResource, - OpenAIResourceWithRawResponse, - AsyncOpenAIResourceWithRawResponse, - OpenAIResourceWithStreamingResponse, - AsyncOpenAIResourceWithStreamingResponse, -) - -__all__ = [ - "KeysResource", - "AsyncKeysResource", - "KeysResourceWithRawResponse", - "AsyncKeysResourceWithRawResponse", - "KeysResourceWithStreamingResponse", - "AsyncKeysResourceWithStreamingResponse", - "OpenAIResource", - "AsyncOpenAIResource", - "OpenAIResourceWithRawResponse", - "AsyncOpenAIResourceWithRawResponse", - "OpenAIResourceWithStreamingResponse", - "AsyncOpenAIResourceWithStreamingResponse", -] diff --git a/src/digitalocean_genai_sdk/resources/providers/openai/keys.py b/src/digitalocean_genai_sdk/resources/providers/openai/keys.py deleted file mode 100644 index 06e7a23c..00000000 --- a/src/digitalocean_genai_sdk/resources/providers/openai/keys.py +++ /dev/null @@ -1,658 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import httpx - -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ...._utils import maybe_transform, async_maybe_transform -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource -from ...._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ...._base_client import make_request_options -from ....types.providers.openai import key_list_params, key_create_params, key_update_params, key_retrieve_agents_params -from ....types.providers.openai.key_list_response import KeyListResponse -from ....types.providers.openai.key_create_response import KeyCreateResponse -from ....types.providers.openai.key_delete_response import KeyDeleteResponse -from ....types.providers.openai.key_update_response import KeyUpdateResponse -from ....types.providers.openai.key_retrieve_response import KeyRetrieveResponse -from ....types.providers.openai.key_retrieve_agents_response import KeyRetrieveAgentsResponse - -__all__ = ["KeysResource", "AsyncKeysResource"] - - -class KeysResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> KeysResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return KeysResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> KeysResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return KeysResourceWithStreamingResponse(self) - - def create( - self, - *, - api_key: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyCreateResponse: - """ - To create an OpenAI API key, send a POST request to `/v2/gen-ai/openai/keys`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._post( - "/v2/genai/openai/keys", - body=maybe_transform( - { - "api_key": api_key, - "name": name, - }, - key_create_params.KeyCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyCreateResponse, - ) - - def retrieve( - self, - api_key_uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyRetrieveResponse: - """ - To retrieve details of an OpenAI API key, send a GET request to - `/v2/gen-ai/openai/keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not api_key_uuid: - raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") - return self._get( - f"/v2/genai/openai/keys/{api_key_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyRetrieveResponse, - ) - - def update( - self, - path_api_key_uuid: str, - *, - api_key: str | NotGiven = NOT_GIVEN, - body_api_key_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyUpdateResponse: - """ - To update an OpenAI API key, send a PUT request to - `/v2/gen-ai/openai/keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_api_key_uuid: - raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") - return self._put( - f"/v2/genai/openai/keys/{path_api_key_uuid}", - body=maybe_transform( - { - "api_key": api_key, - "body_api_key_uuid": body_api_key_uuid, - "name": name, - }, - key_update_params.KeyUpdateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyUpdateResponse, - ) - - def list( - self, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyListResponse: - """ - To list all OpenAI API keys, send a GET request to `/v2/gen-ai/openai/keys`. - - Args: - page: page number. - - per_page: items per page. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/v2/genai/openai/keys", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - }, - key_list_params.KeyListParams, - ), - ), - cast_to=KeyListResponse, - ) - - def delete( - self, - api_key_uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyDeleteResponse: - """ - To delete an OpenAI API key, send a DELETE request to - `/v2/gen-ai/openai/keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not api_key_uuid: - raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") - return self._delete( - f"/v2/genai/openai/keys/{api_key_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyDeleteResponse, - ) - - def retrieve_agents( - self, - uuid: str, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyRetrieveAgentsResponse: - """ - List Agents by OpenAI Key. - - Args: - page: page number. - - per_page: items per page. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not uuid: - raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") - return self._get( - f"/v2/genai/openai/keys/{uuid}/agents", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - }, - key_retrieve_agents_params.KeyRetrieveAgentsParams, - ), - ), - cast_to=KeyRetrieveAgentsResponse, - ) - - -class AsyncKeysResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncKeysResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncKeysResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncKeysResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncKeysResourceWithStreamingResponse(self) - - async def create( - self, - *, - api_key: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyCreateResponse: - """ - To create an OpenAI API key, send a POST request to `/v2/gen-ai/openai/keys`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._post( - "/v2/genai/openai/keys", - body=await async_maybe_transform( - { - "api_key": api_key, - "name": name, - }, - key_create_params.KeyCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyCreateResponse, - ) - - async def retrieve( - self, - api_key_uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyRetrieveResponse: - """ - To retrieve details of an OpenAI API key, send a GET request to - `/v2/gen-ai/openai/keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not api_key_uuid: - raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") - return await self._get( - f"/v2/genai/openai/keys/{api_key_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyRetrieveResponse, - ) - - async def update( - self, - path_api_key_uuid: str, - *, - api_key: str | NotGiven = NOT_GIVEN, - body_api_key_uuid: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyUpdateResponse: - """ - To update an OpenAI API key, send a PUT request to - `/v2/gen-ai/openai/keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_api_key_uuid: - raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") - return await self._put( - f"/v2/genai/openai/keys/{path_api_key_uuid}", - body=await async_maybe_transform( - { - "api_key": api_key, - "body_api_key_uuid": body_api_key_uuid, - "name": name, - }, - key_update_params.KeyUpdateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyUpdateResponse, - ) - - async def list( - self, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyListResponse: - """ - To list all OpenAI API keys, send a GET request to `/v2/gen-ai/openai/keys`. - - Args: - page: page number. - - per_page: items per page. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/v2/genai/openai/keys", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - }, - key_list_params.KeyListParams, - ), - ), - cast_to=KeyListResponse, - ) - - async def delete( - self, - api_key_uuid: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyDeleteResponse: - """ - To delete an OpenAI API key, send a DELETE request to - `/v2/gen-ai/openai/keys/{api_key_uuid}`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not api_key_uuid: - raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") - return await self._delete( - f"/v2/genai/openai/keys/{api_key_uuid}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyDeleteResponse, - ) - - async def retrieve_agents( - self, - uuid: str, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyRetrieveAgentsResponse: - """ - List Agents by OpenAI Key. - - Args: - page: page number. - - per_page: items per page. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not uuid: - raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") - return await self._get( - f"/v2/genai/openai/keys/{uuid}/agents", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - }, - key_retrieve_agents_params.KeyRetrieveAgentsParams, - ), - ), - cast_to=KeyRetrieveAgentsResponse, - ) - - -class KeysResourceWithRawResponse: - def __init__(self, keys: KeysResource) -> None: - self._keys = keys - - self.create = to_raw_response_wrapper( - keys.create, - ) - self.retrieve = to_raw_response_wrapper( - keys.retrieve, - ) - self.update = to_raw_response_wrapper( - keys.update, - ) - self.list = to_raw_response_wrapper( - keys.list, - ) - self.delete = to_raw_response_wrapper( - keys.delete, - ) - self.retrieve_agents = to_raw_response_wrapper( - keys.retrieve_agents, - ) - - -class AsyncKeysResourceWithRawResponse: - def __init__(self, keys: AsyncKeysResource) -> None: - self._keys = keys - - self.create = async_to_raw_response_wrapper( - keys.create, - ) - self.retrieve = async_to_raw_response_wrapper( - keys.retrieve, - ) - self.update = async_to_raw_response_wrapper( - keys.update, - ) - self.list = async_to_raw_response_wrapper( - keys.list, - ) - self.delete = async_to_raw_response_wrapper( - keys.delete, - ) - self.retrieve_agents = async_to_raw_response_wrapper( - keys.retrieve_agents, - ) - - -class KeysResourceWithStreamingResponse: - def __init__(self, keys: KeysResource) -> None: - self._keys = keys - - self.create = to_streamed_response_wrapper( - keys.create, - ) - self.retrieve = to_streamed_response_wrapper( - keys.retrieve, - ) - self.update = to_streamed_response_wrapper( - keys.update, - ) - self.list = to_streamed_response_wrapper( - keys.list, - ) - self.delete = to_streamed_response_wrapper( - keys.delete, - ) - self.retrieve_agents = to_streamed_response_wrapper( - keys.retrieve_agents, - ) - - -class AsyncKeysResourceWithStreamingResponse: - def __init__(self, keys: AsyncKeysResource) -> None: - self._keys = keys - - self.create = async_to_streamed_response_wrapper( - keys.create, - ) - self.retrieve = async_to_streamed_response_wrapper( - keys.retrieve, - ) - self.update = async_to_streamed_response_wrapper( - keys.update, - ) - self.list = async_to_streamed_response_wrapper( - keys.list, - ) - self.delete = async_to_streamed_response_wrapper( - keys.delete, - ) - self.retrieve_agents = async_to_streamed_response_wrapper( - keys.retrieve_agents, - ) diff --git a/src/digitalocean_genai_sdk/resources/providers/openai/openai.py b/src/digitalocean_genai_sdk/resources/providers/openai/openai.py deleted file mode 100644 index d29fd062..00000000 --- a/src/digitalocean_genai_sdk/resources/providers/openai/openai.py +++ /dev/null @@ -1,102 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from .keys import ( - KeysResource, - AsyncKeysResource, - KeysResourceWithRawResponse, - AsyncKeysResourceWithRawResponse, - KeysResourceWithStreamingResponse, - AsyncKeysResourceWithStreamingResponse, -) -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource - -__all__ = ["OpenAIResource", "AsyncOpenAIResource"] - - -class OpenAIResource(SyncAPIResource): - @cached_property - def keys(self) -> KeysResource: - return KeysResource(self._client) - - @cached_property - def with_raw_response(self) -> OpenAIResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return OpenAIResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> OpenAIResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return OpenAIResourceWithStreamingResponse(self) - - -class AsyncOpenAIResource(AsyncAPIResource): - @cached_property - def keys(self) -> AsyncKeysResource: - return AsyncKeysResource(self._client) - - @cached_property - def with_raw_response(self) -> AsyncOpenAIResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncOpenAIResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncOpenAIResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncOpenAIResourceWithStreamingResponse(self) - - -class OpenAIResourceWithRawResponse: - def __init__(self, openai: OpenAIResource) -> None: - self._openai = openai - - @cached_property - def keys(self) -> KeysResourceWithRawResponse: - return KeysResourceWithRawResponse(self._openai.keys) - - -class AsyncOpenAIResourceWithRawResponse: - def __init__(self, openai: AsyncOpenAIResource) -> None: - self._openai = openai - - @cached_property - def keys(self) -> AsyncKeysResourceWithRawResponse: - return AsyncKeysResourceWithRawResponse(self._openai.keys) - - -class OpenAIResourceWithStreamingResponse: - def __init__(self, openai: OpenAIResource) -> None: - self._openai = openai - - @cached_property - def keys(self) -> KeysResourceWithStreamingResponse: - return KeysResourceWithStreamingResponse(self._openai.keys) - - -class AsyncOpenAIResourceWithStreamingResponse: - def __init__(self, openai: AsyncOpenAIResource) -> None: - self._openai = openai - - @cached_property - def keys(self) -> AsyncKeysResourceWithStreamingResponse: - return AsyncKeysResourceWithStreamingResponse(self._openai.keys) diff --git a/src/digitalocean_genai_sdk/resources/providers/providers.py b/src/digitalocean_genai_sdk/resources/providers/providers.py deleted file mode 100644 index 50e3db1a..00000000 --- a/src/digitalocean_genai_sdk/resources/providers/providers.py +++ /dev/null @@ -1,134 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from .openai.openai import ( - OpenAIResource, - AsyncOpenAIResource, - OpenAIResourceWithRawResponse, - AsyncOpenAIResourceWithRawResponse, - OpenAIResourceWithStreamingResponse, - AsyncOpenAIResourceWithStreamingResponse, -) -from .anthropic.anthropic import ( - AnthropicResource, - AsyncAnthropicResource, - AnthropicResourceWithRawResponse, - AsyncAnthropicResourceWithRawResponse, - AnthropicResourceWithStreamingResponse, - AsyncAnthropicResourceWithStreamingResponse, -) - -__all__ = ["ProvidersResource", "AsyncProvidersResource"] - - -class ProvidersResource(SyncAPIResource): - @cached_property - def anthropic(self) -> AnthropicResource: - return AnthropicResource(self._client) - - @cached_property - def openai(self) -> OpenAIResource: - return OpenAIResource(self._client) - - @cached_property - def with_raw_response(self) -> ProvidersResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return ProvidersResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> ProvidersResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return ProvidersResourceWithStreamingResponse(self) - - -class AsyncProvidersResource(AsyncAPIResource): - @cached_property - def anthropic(self) -> AsyncAnthropicResource: - return AsyncAnthropicResource(self._client) - - @cached_property - def openai(self) -> AsyncOpenAIResource: - return AsyncOpenAIResource(self._client) - - @cached_property - def with_raw_response(self) -> AsyncProvidersResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncProvidersResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncProvidersResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncProvidersResourceWithStreamingResponse(self) - - -class ProvidersResourceWithRawResponse: - def __init__(self, providers: ProvidersResource) -> None: - self._providers = providers - - @cached_property - def anthropic(self) -> AnthropicResourceWithRawResponse: - return AnthropicResourceWithRawResponse(self._providers.anthropic) - - @cached_property - def openai(self) -> OpenAIResourceWithRawResponse: - return OpenAIResourceWithRawResponse(self._providers.openai) - - -class AsyncProvidersResourceWithRawResponse: - def __init__(self, providers: AsyncProvidersResource) -> None: - self._providers = providers - - @cached_property - def anthropic(self) -> AsyncAnthropicResourceWithRawResponse: - return AsyncAnthropicResourceWithRawResponse(self._providers.anthropic) - - @cached_property - def openai(self) -> AsyncOpenAIResourceWithRawResponse: - return AsyncOpenAIResourceWithRawResponse(self._providers.openai) - - -class ProvidersResourceWithStreamingResponse: - def __init__(self, providers: ProvidersResource) -> None: - self._providers = providers - - @cached_property - def anthropic(self) -> AnthropicResourceWithStreamingResponse: - return AnthropicResourceWithStreamingResponse(self._providers.anthropic) - - @cached_property - def openai(self) -> OpenAIResourceWithStreamingResponse: - return OpenAIResourceWithStreamingResponse(self._providers.openai) - - -class AsyncProvidersResourceWithStreamingResponse: - def __init__(self, providers: AsyncProvidersResource) -> None: - self._providers = providers - - @cached_property - def anthropic(self) -> AsyncAnthropicResourceWithStreamingResponse: - return AsyncAnthropicResourceWithStreamingResponse(self._providers.anthropic) - - @cached_property - def openai(self) -> AsyncOpenAIResourceWithStreamingResponse: - return AsyncOpenAIResourceWithStreamingResponse(self._providers.openai) diff --git a/src/digitalocean_genai_sdk/resources/regions.py b/src/digitalocean_genai_sdk/resources/regions.py deleted file mode 100644 index d506688b..00000000 --- a/src/digitalocean_genai_sdk/resources/regions.py +++ /dev/null @@ -1,191 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import httpx - -from ..types import region_list_params -from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from .._utils import maybe_transform, async_maybe_transform -from .._compat import cached_property -from .._resource import SyncAPIResource, AsyncAPIResource -from .._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from .._base_client import make_request_options -from ..types.region_list_response import RegionListResponse - -__all__ = ["RegionsResource", "AsyncRegionsResource"] - - -class RegionsResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> RegionsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return RegionsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> RegionsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return RegionsResourceWithStreamingResponse(self) - - def list( - self, - *, - serves_batch: bool | NotGiven = NOT_GIVEN, - serves_inference: bool | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> RegionListResponse: - """ - To list all datacenter regions, send a GET request to `/v2/gen-ai/regions`. - - Args: - serves_batch: include datacenters that are capable of running batch jobs. - - serves_inference: include datacenters that serve inference. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/v2/genai/regions", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "serves_batch": serves_batch, - "serves_inference": serves_inference, - }, - region_list_params.RegionListParams, - ), - ), - cast_to=RegionListResponse, - ) - - -class AsyncRegionsResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncRegionsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncRegionsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncRegionsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncRegionsResourceWithStreamingResponse(self) - - async def list( - self, - *, - serves_batch: bool | NotGiven = NOT_GIVEN, - serves_inference: bool | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> RegionListResponse: - """ - To list all datacenter regions, send a GET request to `/v2/gen-ai/regions`. - - Args: - serves_batch: include datacenters that are capable of running batch jobs. - - serves_inference: include datacenters that serve inference. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/v2/genai/regions", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "serves_batch": serves_batch, - "serves_inference": serves_inference, - }, - region_list_params.RegionListParams, - ), - ), - cast_to=RegionListResponse, - ) - - -class RegionsResourceWithRawResponse: - def __init__(self, regions: RegionsResource) -> None: - self._regions = regions - - self.list = to_raw_response_wrapper( - regions.list, - ) - - -class AsyncRegionsResourceWithRawResponse: - def __init__(self, regions: AsyncRegionsResource) -> None: - self._regions = regions - - self.list = async_to_raw_response_wrapper( - regions.list, - ) - - -class RegionsResourceWithStreamingResponse: - def __init__(self, regions: RegionsResource) -> None: - self._regions = regions - - self.list = to_streamed_response_wrapper( - regions.list, - ) - - -class AsyncRegionsResourceWithStreamingResponse: - def __init__(self, regions: AsyncRegionsResource) -> None: - self._regions = regions - - self.list = async_to_streamed_response_wrapper( - regions.list, - ) diff --git a/src/digitalocean_genai_sdk/types/__init__.py b/src/digitalocean_genai_sdk/types/__init__.py deleted file mode 100644 index ee516f83..00000000 --- a/src/digitalocean_genai_sdk/types/__init__.py +++ /dev/null @@ -1,57 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from .model import Model as Model -from .api_agent import APIAgent as APIAgent -from .api_model import APIModel as APIModel -from .api_agreement import APIAgreement as APIAgreement -from .api_indexing_job import APIIndexingJob as APIIndexingJob -from .agent_list_params import AgentListParams as AgentListParams -from .api_model_version import APIModelVersion as APIModelVersion -from .api_knowledge_base import APIKnowledgeBase as APIKnowledgeBase -from .region_list_params import RegionListParams as RegionListParams -from .agent_create_params import AgentCreateParams as AgentCreateParams -from .agent_list_response import AgentListResponse as AgentListResponse -from .agent_update_params import AgentUpdateParams as AgentUpdateParams -from .api_key_list_params import APIKeyListParams as APIKeyListParams -from .model_list_response import ModelListResponse as ModelListResponse -from .api_retrieval_method import APIRetrievalMethod as APIRetrievalMethod -from .region_list_response import RegionListResponse as RegionListResponse -from .agent_create_response import AgentCreateResponse as AgentCreateResponse -from .agent_delete_response import AgentDeleteResponse as AgentDeleteResponse -from .agent_update_response import AgentUpdateResponse as AgentUpdateResponse -from .api_key_list_response import APIKeyListResponse as APIKeyListResponse -from .api_agent_api_key_info import APIAgentAPIKeyInfo as APIAgentAPIKeyInfo -from .agent_retrieve_response import AgentRetrieveResponse as AgentRetrieveResponse -from .api_openai_api_key_info import APIOpenAIAPIKeyInfo as APIOpenAIAPIKeyInfo -from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams -from .indexing_job_list_params import IndexingJobListParams as IndexingJobListParams -from .api_deployment_visibility import APIDeploymentVisibility as APIDeploymentVisibility -from .embedding_create_response import EmbeddingCreateResponse as EmbeddingCreateResponse -from .agent_update_status_params import AgentUpdateStatusParams as AgentUpdateStatusParams -from .api_anthropic_api_key_info import APIAnthropicAPIKeyInfo as APIAnthropicAPIKeyInfo -from .indexing_job_create_params import IndexingJobCreateParams as IndexingJobCreateParams -from .indexing_job_list_response import IndexingJobListResponse as IndexingJobListResponse -from .knowledge_base_list_params import KnowledgeBaseListParams as KnowledgeBaseListParams -from .agent_update_status_response import AgentUpdateStatusResponse as AgentUpdateStatusResponse -from .indexing_job_create_response import IndexingJobCreateResponse as IndexingJobCreateResponse -from .knowledge_base_create_params import KnowledgeBaseCreateParams as KnowledgeBaseCreateParams -from .knowledge_base_list_response import KnowledgeBaseListResponse as KnowledgeBaseListResponse -from .knowledge_base_update_params import KnowledgeBaseUpdateParams as KnowledgeBaseUpdateParams -from .chat_completion_token_logprob import ChatCompletionTokenLogprob as ChatCompletionTokenLogprob -from .chat_create_completion_params import ChatCreateCompletionParams as ChatCreateCompletionParams -from .indexing_job_retrieve_response import IndexingJobRetrieveResponse as IndexingJobRetrieveResponse -from .knowledge_base_create_response import KnowledgeBaseCreateResponse as KnowledgeBaseCreateResponse -from .knowledge_base_delete_response import KnowledgeBaseDeleteResponse as KnowledgeBaseDeleteResponse -from .knowledge_base_update_response import KnowledgeBaseUpdateResponse as KnowledgeBaseUpdateResponse -from .chat_create_completion_response import ChatCreateCompletionResponse as ChatCreateCompletionResponse -from .knowledge_base_retrieve_response import KnowledgeBaseRetrieveResponse as KnowledgeBaseRetrieveResponse -from .indexing_job_update_cancel_params import IndexingJobUpdateCancelParams as IndexingJobUpdateCancelParams -from .indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse as IndexingJobUpdateCancelResponse -from .indexing_job_retrieve_data_sources_response import ( - IndexingJobRetrieveDataSourcesResponse as IndexingJobRetrieveDataSourcesResponse, -) -from .chat_completion_request_message_content_part_text_param import ( - ChatCompletionRequestMessageContentPartTextParam as ChatCompletionRequestMessageContentPartTextParam, -) diff --git a/src/digitalocean_genai_sdk/types/agent_create_params.py b/src/digitalocean_genai_sdk/types/agent_create_params.py deleted file mode 100644 index 58b99df7..00000000 --- a/src/digitalocean_genai_sdk/types/agent_create_params.py +++ /dev/null @@ -1,39 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import Annotated, TypedDict - -from .._utils import PropertyInfo - -__all__ = ["AgentCreateParams"] - - -class AgentCreateParams(TypedDict, total=False): - anthropic_key_uuid: str - - description: str - - instruction: str - """Agent instruction. - - Instructions help your agent to perform its job effectively. See - [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) - for best practices. - """ - - knowledge_base_uuid: List[str] - - model_uuid: str - """Identifier for the foundation model.""" - - name: str - - openai_key_uuid: Annotated[str, PropertyInfo(alias="open_ai_key_uuid")] - - project_id: str - - region: str - - tags: List[str] diff --git a/src/digitalocean_genai_sdk/types/agent_create_response.py b/src/digitalocean_genai_sdk/types/agent_create_response.py deleted file mode 100644 index 48545fe9..00000000 --- a/src/digitalocean_genai_sdk/types/agent_create_response.py +++ /dev/null @@ -1,16 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Optional - -from .._models import BaseModel - -__all__ = ["AgentCreateResponse"] - - -class AgentCreateResponse(BaseModel): - agent: Optional["APIAgent"] = None - - -from .api_agent import APIAgent diff --git a/src/digitalocean_genai_sdk/types/agent_delete_response.py b/src/digitalocean_genai_sdk/types/agent_delete_response.py deleted file mode 100644 index eb1d440d..00000000 --- a/src/digitalocean_genai_sdk/types/agent_delete_response.py +++ /dev/null @@ -1,16 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Optional - -from .._models import BaseModel - -__all__ = ["AgentDeleteResponse"] - - -class AgentDeleteResponse(BaseModel): - agent: Optional["APIAgent"] = None - - -from .api_agent import APIAgent diff --git a/src/digitalocean_genai_sdk/types/agent_list_params.py b/src/digitalocean_genai_sdk/types/agent_list_params.py deleted file mode 100644 index e13a10c9..00000000 --- a/src/digitalocean_genai_sdk/types/agent_list_params.py +++ /dev/null @@ -1,18 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["AgentListParams"] - - -class AgentListParams(TypedDict, total=False): - only_deployed: bool - """only list agents that are deployed.""" - - page: int - """page number.""" - - per_page: int - """items per page.""" diff --git a/src/digitalocean_genai_sdk/types/agent_list_response.py b/src/digitalocean_genai_sdk/types/agent_list_response.py deleted file mode 100644 index 4cedbb39..00000000 --- a/src/digitalocean_genai_sdk/types/agent_list_response.py +++ /dev/null @@ -1,198 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional -from datetime import datetime -from typing_extensions import Literal - -from .._models import BaseModel -from .api_model import APIModel -from .agents.api_meta import APIMeta -from .agents.api_links import APILinks -from .api_knowledge_base import APIKnowledgeBase -from .api_retrieval_method import APIRetrievalMethod -from .api_deployment_visibility import APIDeploymentVisibility - -__all__ = [ - "AgentListResponse", - "Agent", - "AgentChatbot", - "AgentChatbotIdentifier", - "AgentDeployment", - "AgentTemplate", - "AgentTemplateGuardrail", -] - - -class AgentChatbot(BaseModel): - button_background_color: Optional[str] = None - - logo: Optional[str] = None - - name: Optional[str] = None - - primary_color: Optional[str] = None - - secondary_color: Optional[str] = None - - starting_message: Optional[str] = None - - -class AgentChatbotIdentifier(BaseModel): - agent_chatbot_identifier: Optional[str] = None - - -class AgentDeployment(BaseModel): - created_at: Optional[datetime] = None - - name: Optional[str] = None - - status: Optional[ - Literal[ - "STATUS_UNKNOWN", - "STATUS_WAITING_FOR_DEPLOYMENT", - "STATUS_DEPLOYING", - "STATUS_RUNNING", - "STATUS_FAILED", - "STATUS_WAITING_FOR_UNDEPLOYMENT", - "STATUS_UNDEPLOYING", - "STATUS_UNDEPLOYMENT_FAILED", - "STATUS_DELETED", - ] - ] = None - - updated_at: Optional[datetime] = None - - url: Optional[str] = None - - uuid: Optional[str] = None - - visibility: Optional[APIDeploymentVisibility] = None - - -class AgentTemplateGuardrail(BaseModel): - priority: Optional[int] = None - - uuid: Optional[str] = None - - -class AgentTemplate(BaseModel): - created_at: Optional[datetime] = None - - description: Optional[str] = None - - guardrails: Optional[List[AgentTemplateGuardrail]] = None - - instruction: Optional[str] = None - - k: Optional[int] = None - - knowledge_bases: Optional[List[APIKnowledgeBase]] = None - - long_description: Optional[str] = None - - max_tokens: Optional[int] = None - - model: Optional[APIModel] = None - - name: Optional[str] = None - - short_description: Optional[str] = None - - summary: Optional[str] = None - - tags: Optional[List[str]] = None - - temperature: Optional[float] = None - - template_type: Optional[Literal["AGENT_TEMPLATE_TYPE_STANDARD", "AGENT_TEMPLATE_TYPE_ONE_CLICK"]] = None - - top_p: Optional[float] = None - - updated_at: Optional[datetime] = None - - uuid: Optional[str] = None - - -class Agent(BaseModel): - chatbot: Optional[AgentChatbot] = None - - chatbot_identifiers: Optional[List[AgentChatbotIdentifier]] = None - - created_at: Optional[datetime] = None - - deployment: Optional[AgentDeployment] = None - - description: Optional[str] = None - - if_case: Optional[str] = None - - instruction: Optional[str] = None - """Agent instruction. - - Instructions help your agent to perform its job effectively. See - [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) - for best practices. - """ - - k: Optional[int] = None - - max_tokens: Optional[int] = None - """ - Specifies the maximum number of tokens the model can process in a single input - or output, set as a number between 1 and 512. This determines the length of each - response. - """ - - model: Optional[APIModel] = None - - name: Optional[str] = None - - project_id: Optional[str] = None - - provide_citations: Optional[bool] = None - - region: Optional[str] = None - - retrieval_method: Optional[APIRetrievalMethod] = None - - route_created_at: Optional[datetime] = None - - route_created_by: Optional[str] = None - - route_name: Optional[str] = None - - route_uuid: Optional[str] = None - - tags: Optional[List[str]] = None - - temperature: Optional[float] = None - """Controls the model’s creativity, specified as a number between 0 and 1. - - Lower values produce more predictable and conservative responses, while higher - values encourage creativity and variation. - """ - - template: Optional[AgentTemplate] = None - - top_p: Optional[float] = None - """ - Defines the cumulative probability threshold for word selection, specified as a - number between 0 and 1. Higher values allow for more diverse outputs, while - lower values ensure focused and coherent responses. - """ - - updated_at: Optional[datetime] = None - - url: Optional[str] = None - - user_id: Optional[str] = None - - uuid: Optional[str] = None - - -class AgentListResponse(BaseModel): - agents: Optional[List[Agent]] = None - - links: Optional[APILinks] = None - - meta: Optional[APIMeta] = None diff --git a/src/digitalocean_genai_sdk/types/agent_retrieve_response.py b/src/digitalocean_genai_sdk/types/agent_retrieve_response.py deleted file mode 100644 index 2eed88af..00000000 --- a/src/digitalocean_genai_sdk/types/agent_retrieve_response.py +++ /dev/null @@ -1,16 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Optional - -from .._models import BaseModel - -__all__ = ["AgentRetrieveResponse"] - - -class AgentRetrieveResponse(BaseModel): - agent: Optional["APIAgent"] = None - - -from .api_agent import APIAgent diff --git a/src/digitalocean_genai_sdk/types/agent_update_params.py b/src/digitalocean_genai_sdk/types/agent_update_params.py deleted file mode 100644 index 85f9a9c2..00000000 --- a/src/digitalocean_genai_sdk/types/agent_update_params.py +++ /dev/null @@ -1,65 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import Annotated, TypedDict - -from .._utils import PropertyInfo -from .api_retrieval_method import APIRetrievalMethod - -__all__ = ["AgentUpdateParams"] - - -class AgentUpdateParams(TypedDict, total=False): - anthropic_key_uuid: str - - description: str - - instruction: str - """Agent instruction. - - Instructions help your agent to perform its job effectively. See - [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) - for best practices. - """ - - k: int - - max_tokens: int - """ - Specifies the maximum number of tokens the model can process in a single input - or output, set as a number between 1 and 512. This determines the length of each - response. - """ - - model_uuid: str - """Identifier for the foundation model.""" - - name: str - - openai_key_uuid: Annotated[str, PropertyInfo(alias="open_ai_key_uuid")] - - project_id: str - - provide_citations: bool - - retrieval_method: APIRetrievalMethod - - tags: List[str] - - temperature: float - """Controls the model’s creativity, specified as a number between 0 and 1. - - Lower values produce more predictable and conservative responses, while higher - values encourage creativity and variation. - """ - - top_p: float - """ - Defines the cumulative probability threshold for word selection, specified as a - number between 0 and 1. Higher values allow for more diverse outputs, while - lower values ensure focused and coherent responses. - """ - - body_uuid: Annotated[str, PropertyInfo(alias="uuid")] diff --git a/src/digitalocean_genai_sdk/types/agent_update_response.py b/src/digitalocean_genai_sdk/types/agent_update_response.py deleted file mode 100644 index 2948aa1c..00000000 --- a/src/digitalocean_genai_sdk/types/agent_update_response.py +++ /dev/null @@ -1,16 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Optional - -from .._models import BaseModel - -__all__ = ["AgentUpdateResponse"] - - -class AgentUpdateResponse(BaseModel): - agent: Optional["APIAgent"] = None - - -from .api_agent import APIAgent diff --git a/src/digitalocean_genai_sdk/types/agent_update_status_params.py b/src/digitalocean_genai_sdk/types/agent_update_status_params.py deleted file mode 100644 index a0cdc0b9..00000000 --- a/src/digitalocean_genai_sdk/types/agent_update_status_params.py +++ /dev/null @@ -1,16 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Annotated, TypedDict - -from .._utils import PropertyInfo -from .api_deployment_visibility import APIDeploymentVisibility - -__all__ = ["AgentUpdateStatusParams"] - - -class AgentUpdateStatusParams(TypedDict, total=False): - body_uuid: Annotated[str, PropertyInfo(alias="uuid")] - - visibility: APIDeploymentVisibility diff --git a/src/digitalocean_genai_sdk/types/agent_update_status_response.py b/src/digitalocean_genai_sdk/types/agent_update_status_response.py deleted file mode 100644 index b200f99d..00000000 --- a/src/digitalocean_genai_sdk/types/agent_update_status_response.py +++ /dev/null @@ -1,16 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Optional - -from .._models import BaseModel - -__all__ = ["AgentUpdateStatusResponse"] - - -class AgentUpdateStatusResponse(BaseModel): - agent: Optional["APIAgent"] = None - - -from .api_agent import APIAgent diff --git a/src/digitalocean_genai_sdk/types/agents/__init__.py b/src/digitalocean_genai_sdk/types/agents/__init__.py deleted file mode 100644 index aae0ee6b..00000000 --- a/src/digitalocean_genai_sdk/types/agents/__init__.py +++ /dev/null @@ -1,31 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from .api_meta import APIMeta as APIMeta -from .api_links import APILinks as APILinks -from .api_key_list_params import APIKeyListParams as APIKeyListParams -from .version_list_params import VersionListParams as VersionListParams -from .api_key_create_params import APIKeyCreateParams as APIKeyCreateParams -from .api_key_list_response import APIKeyListResponse as APIKeyListResponse -from .api_key_update_params import APIKeyUpdateParams as APIKeyUpdateParams -from .version_list_response import VersionListResponse as VersionListResponse -from .version_update_params import VersionUpdateParams as VersionUpdateParams -from .child_agent_add_params import ChildAgentAddParams as ChildAgentAddParams -from .function_create_params import FunctionCreateParams as FunctionCreateParams -from .function_update_params import FunctionUpdateParams as FunctionUpdateParams -from .api_key_create_response import APIKeyCreateResponse as APIKeyCreateResponse -from .api_key_delete_response import APIKeyDeleteResponse as APIKeyDeleteResponse -from .api_key_update_response import APIKeyUpdateResponse as APIKeyUpdateResponse -from .version_update_response import VersionUpdateResponse as VersionUpdateResponse -from .child_agent_add_response import ChildAgentAddResponse as ChildAgentAddResponse -from .function_create_response import FunctionCreateResponse as FunctionCreateResponse -from .function_delete_response import FunctionDeleteResponse as FunctionDeleteResponse -from .function_update_response import FunctionUpdateResponse as FunctionUpdateResponse -from .child_agent_update_params import ChildAgentUpdateParams as ChildAgentUpdateParams -from .child_agent_view_response import ChildAgentViewResponse as ChildAgentViewResponse -from .api_key_regenerate_response import APIKeyRegenerateResponse as APIKeyRegenerateResponse -from .child_agent_delete_response import ChildAgentDeleteResponse as ChildAgentDeleteResponse -from .child_agent_update_response import ChildAgentUpdateResponse as ChildAgentUpdateResponse -from .api_link_knowledge_base_output import APILinkKnowledgeBaseOutput as APILinkKnowledgeBaseOutput -from .knowledge_base_detach_response import KnowledgeBaseDetachResponse as KnowledgeBaseDetachResponse diff --git a/src/digitalocean_genai_sdk/types/agents/api_key_create_params.py b/src/digitalocean_genai_sdk/types/agents/api_key_create_params.py deleted file mode 100644 index c3fc44cd..00000000 --- a/src/digitalocean_genai_sdk/types/agents/api_key_create_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Annotated, TypedDict - -from ..._utils import PropertyInfo - -__all__ = ["APIKeyCreateParams"] - - -class APIKeyCreateParams(TypedDict, total=False): - body_agent_uuid: Annotated[str, PropertyInfo(alias="agent_uuid")] - - name: str diff --git a/src/digitalocean_genai_sdk/types/agents/api_key_create_response.py b/src/digitalocean_genai_sdk/types/agents/api_key_create_response.py deleted file mode 100644 index 09689fe7..00000000 --- a/src/digitalocean_genai_sdk/types/agents/api_key_create_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel -from ..api_agent_api_key_info import APIAgentAPIKeyInfo - -__all__ = ["APIKeyCreateResponse"] - - -class APIKeyCreateResponse(BaseModel): - api_key_info: Optional[APIAgentAPIKeyInfo] = None diff --git a/src/digitalocean_genai_sdk/types/agents/api_key_delete_response.py b/src/digitalocean_genai_sdk/types/agents/api_key_delete_response.py deleted file mode 100644 index 02b03f61..00000000 --- a/src/digitalocean_genai_sdk/types/agents/api_key_delete_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel -from ..api_agent_api_key_info import APIAgentAPIKeyInfo - -__all__ = ["APIKeyDeleteResponse"] - - -class APIKeyDeleteResponse(BaseModel): - api_key_info: Optional[APIAgentAPIKeyInfo] = None diff --git a/src/digitalocean_genai_sdk/types/agents/api_key_list_params.py b/src/digitalocean_genai_sdk/types/agents/api_key_list_params.py deleted file mode 100644 index 11da9398..00000000 --- a/src/digitalocean_genai_sdk/types/agents/api_key_list_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["APIKeyListParams"] - - -class APIKeyListParams(TypedDict, total=False): - page: int - """page number.""" - - per_page: int - """items per page.""" diff --git a/src/digitalocean_genai_sdk/types/agents/api_key_list_response.py b/src/digitalocean_genai_sdk/types/agents/api_key_list_response.py deleted file mode 100644 index eff98649..00000000 --- a/src/digitalocean_genai_sdk/types/agents/api_key_list_response.py +++ /dev/null @@ -1,18 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from .api_meta import APIMeta -from ..._models import BaseModel -from .api_links import APILinks -from ..api_agent_api_key_info import APIAgentAPIKeyInfo - -__all__ = ["APIKeyListResponse"] - - -class APIKeyListResponse(BaseModel): - api_key_infos: Optional[List[APIAgentAPIKeyInfo]] = None - - links: Optional[APILinks] = None - - meta: Optional[APIMeta] = None diff --git a/src/digitalocean_genai_sdk/types/agents/api_key_regenerate_response.py b/src/digitalocean_genai_sdk/types/agents/api_key_regenerate_response.py deleted file mode 100644 index ea2f761e..00000000 --- a/src/digitalocean_genai_sdk/types/agents/api_key_regenerate_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel -from ..api_agent_api_key_info import APIAgentAPIKeyInfo - -__all__ = ["APIKeyRegenerateResponse"] - - -class APIKeyRegenerateResponse(BaseModel): - api_key_info: Optional[APIAgentAPIKeyInfo] = None diff --git a/src/digitalocean_genai_sdk/types/agents/api_key_update_params.py b/src/digitalocean_genai_sdk/types/agents/api_key_update_params.py deleted file mode 100644 index b49ebb38..00000000 --- a/src/digitalocean_genai_sdk/types/agents/api_key_update_params.py +++ /dev/null @@ -1,19 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, Annotated, TypedDict - -from ..._utils import PropertyInfo - -__all__ = ["APIKeyUpdateParams"] - - -class APIKeyUpdateParams(TypedDict, total=False): - path_agent_uuid: Required[Annotated[str, PropertyInfo(alias="agent_uuid")]] - - body_agent_uuid: Annotated[str, PropertyInfo(alias="agent_uuid")] - - body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")] - - name: str diff --git a/src/digitalocean_genai_sdk/types/agents/api_key_update_response.py b/src/digitalocean_genai_sdk/types/agents/api_key_update_response.py deleted file mode 100644 index 87442329..00000000 --- a/src/digitalocean_genai_sdk/types/agents/api_key_update_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel -from ..api_agent_api_key_info import APIAgentAPIKeyInfo - -__all__ = ["APIKeyUpdateResponse"] - - -class APIKeyUpdateResponse(BaseModel): - api_key_info: Optional[APIAgentAPIKeyInfo] = None diff --git a/src/digitalocean_genai_sdk/types/agents/api_link_knowledge_base_output.py b/src/digitalocean_genai_sdk/types/agents/api_link_knowledge_base_output.py deleted file mode 100644 index a38f021b..00000000 --- a/src/digitalocean_genai_sdk/types/agents/api_link_knowledge_base_output.py +++ /dev/null @@ -1,16 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Optional - -from ..._models import BaseModel - -__all__ = ["APILinkKnowledgeBaseOutput"] - - -class APILinkKnowledgeBaseOutput(BaseModel): - agent: Optional["APIAgent"] = None - - -from ..api_agent import APIAgent diff --git a/src/digitalocean_genai_sdk/types/agents/child_agent_add_params.py b/src/digitalocean_genai_sdk/types/agents/child_agent_add_params.py deleted file mode 100644 index 001baa6f..00000000 --- a/src/digitalocean_genai_sdk/types/agents/child_agent_add_params.py +++ /dev/null @@ -1,22 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, Annotated, TypedDict - -from ..._utils import PropertyInfo - -__all__ = ["ChildAgentAddParams"] - - -class ChildAgentAddParams(TypedDict, total=False): - path_parent_agent_uuid: Required[Annotated[str, PropertyInfo(alias="parent_agent_uuid")]] - - body_child_agent_uuid: Annotated[str, PropertyInfo(alias="child_agent_uuid")] - - if_case: str - - body_parent_agent_uuid: Annotated[str, PropertyInfo(alias="parent_agent_uuid")] - """A unique identifier for the parent agent.""" - - route_name: str diff --git a/src/digitalocean_genai_sdk/types/agents/child_agent_add_response.py b/src/digitalocean_genai_sdk/types/agents/child_agent_add_response.py deleted file mode 100644 index baccec10..00000000 --- a/src/digitalocean_genai_sdk/types/agents/child_agent_add_response.py +++ /dev/null @@ -1,14 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel - -__all__ = ["ChildAgentAddResponse"] - - -class ChildAgentAddResponse(BaseModel): - child_agent_uuid: Optional[str] = None - - parent_agent_uuid: Optional[str] = None - """A unique identifier for the parent agent.""" diff --git a/src/digitalocean_genai_sdk/types/agents/child_agent_delete_response.py b/src/digitalocean_genai_sdk/types/agents/child_agent_delete_response.py deleted file mode 100644 index b50fb024..00000000 --- a/src/digitalocean_genai_sdk/types/agents/child_agent_delete_response.py +++ /dev/null @@ -1,13 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel - -__all__ = ["ChildAgentDeleteResponse"] - - -class ChildAgentDeleteResponse(BaseModel): - child_agent_uuid: Optional[str] = None - - parent_agent_uuid: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/agents/child_agent_update_params.py b/src/digitalocean_genai_sdk/types/agents/child_agent_update_params.py deleted file mode 100644 index 2f009a52..00000000 --- a/src/digitalocean_genai_sdk/types/agents/child_agent_update_params.py +++ /dev/null @@ -1,24 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, Annotated, TypedDict - -from ..._utils import PropertyInfo - -__all__ = ["ChildAgentUpdateParams"] - - -class ChildAgentUpdateParams(TypedDict, total=False): - path_parent_agent_uuid: Required[Annotated[str, PropertyInfo(alias="parent_agent_uuid")]] - - body_child_agent_uuid: Annotated[str, PropertyInfo(alias="child_agent_uuid")] - - if_case: str - - body_parent_agent_uuid: Annotated[str, PropertyInfo(alias="parent_agent_uuid")] - """A unique identifier for the parent agent.""" - - route_name: str - - uuid: str diff --git a/src/digitalocean_genai_sdk/types/agents/child_agent_update_response.py b/src/digitalocean_genai_sdk/types/agents/child_agent_update_response.py deleted file mode 100644 index 48a13c72..00000000 --- a/src/digitalocean_genai_sdk/types/agents/child_agent_update_response.py +++ /dev/null @@ -1,18 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel - -__all__ = ["ChildAgentUpdateResponse"] - - -class ChildAgentUpdateResponse(BaseModel): - child_agent_uuid: Optional[str] = None - - parent_agent_uuid: Optional[str] = None - """A unique identifier for the parent agent.""" - - rollback: Optional[bool] = None - - uuid: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/agents/child_agent_view_response.py b/src/digitalocean_genai_sdk/types/agents/child_agent_view_response.py deleted file mode 100644 index ffbaef12..00000000 --- a/src/digitalocean_genai_sdk/types/agents/child_agent_view_response.py +++ /dev/null @@ -1,16 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List, Optional - -from ..._models import BaseModel - -__all__ = ["ChildAgentViewResponse"] - - -class ChildAgentViewResponse(BaseModel): - children: Optional[List["APIAgent"]] = None - - -from ..api_agent import APIAgent diff --git a/src/digitalocean_genai_sdk/types/agents/function_create_params.py b/src/digitalocean_genai_sdk/types/agents/function_create_params.py deleted file mode 100644 index 938fb1d5..00000000 --- a/src/digitalocean_genai_sdk/types/agents/function_create_params.py +++ /dev/null @@ -1,25 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Annotated, TypedDict - -from ..._utils import PropertyInfo - -__all__ = ["FunctionCreateParams"] - - -class FunctionCreateParams(TypedDict, total=False): - body_agent_uuid: Annotated[str, PropertyInfo(alias="agent_uuid")] - - description: str - - faas_name: str - - faas_namespace: str - - function_name: str - - input_schema: object - - output_schema: object diff --git a/src/digitalocean_genai_sdk/types/agents/function_create_response.py b/src/digitalocean_genai_sdk/types/agents/function_create_response.py deleted file mode 100644 index 82ab984b..00000000 --- a/src/digitalocean_genai_sdk/types/agents/function_create_response.py +++ /dev/null @@ -1,16 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Optional - -from ..._models import BaseModel - -__all__ = ["FunctionCreateResponse"] - - -class FunctionCreateResponse(BaseModel): - agent: Optional["APIAgent"] = None - - -from ..api_agent import APIAgent diff --git a/src/digitalocean_genai_sdk/types/agents/function_delete_response.py b/src/digitalocean_genai_sdk/types/agents/function_delete_response.py deleted file mode 100644 index 678ef62d..00000000 --- a/src/digitalocean_genai_sdk/types/agents/function_delete_response.py +++ /dev/null @@ -1,16 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Optional - -from ..._models import BaseModel - -__all__ = ["FunctionDeleteResponse"] - - -class FunctionDeleteResponse(BaseModel): - agent: Optional["APIAgent"] = None - - -from ..api_agent import APIAgent diff --git a/src/digitalocean_genai_sdk/types/agents/function_update_params.py b/src/digitalocean_genai_sdk/types/agents/function_update_params.py deleted file mode 100644 index 2fa8e8f0..00000000 --- a/src/digitalocean_genai_sdk/types/agents/function_update_params.py +++ /dev/null @@ -1,29 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, Annotated, TypedDict - -from ..._utils import PropertyInfo - -__all__ = ["FunctionUpdateParams"] - - -class FunctionUpdateParams(TypedDict, total=False): - path_agent_uuid: Required[Annotated[str, PropertyInfo(alias="agent_uuid")]] - - body_agent_uuid: Annotated[str, PropertyInfo(alias="agent_uuid")] - - description: str - - faas_name: str - - faas_namespace: str - - function_name: str - - body_function_uuid: Annotated[str, PropertyInfo(alias="function_uuid")] - - input_schema: object - - output_schema: object diff --git a/src/digitalocean_genai_sdk/types/agents/function_update_response.py b/src/digitalocean_genai_sdk/types/agents/function_update_response.py deleted file mode 100644 index 82fc63be..00000000 --- a/src/digitalocean_genai_sdk/types/agents/function_update_response.py +++ /dev/null @@ -1,16 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Optional - -from ..._models import BaseModel - -__all__ = ["FunctionUpdateResponse"] - - -class FunctionUpdateResponse(BaseModel): - agent: Optional["APIAgent"] = None - - -from ..api_agent import APIAgent diff --git a/src/digitalocean_genai_sdk/types/agents/knowledge_base_detach_response.py b/src/digitalocean_genai_sdk/types/agents/knowledge_base_detach_response.py deleted file mode 100644 index 76bb4236..00000000 --- a/src/digitalocean_genai_sdk/types/agents/knowledge_base_detach_response.py +++ /dev/null @@ -1,16 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Optional - -from ..._models import BaseModel - -__all__ = ["KnowledgeBaseDetachResponse"] - - -class KnowledgeBaseDetachResponse(BaseModel): - agent: Optional["APIAgent"] = None - - -from ..api_agent import APIAgent diff --git a/src/digitalocean_genai_sdk/types/api_agent.py b/src/digitalocean_genai_sdk/types/api_agent.py deleted file mode 100644 index d6e18ca2..00000000 --- a/src/digitalocean_genai_sdk/types/api_agent.py +++ /dev/null @@ -1,263 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List, Optional -from datetime import datetime -from typing_extensions import Literal - -from .._models import BaseModel -from .api_model import APIModel -from .api_knowledge_base import APIKnowledgeBase -from .api_retrieval_method import APIRetrievalMethod -from .api_agent_api_key_info import APIAgentAPIKeyInfo -from .api_openai_api_key_info import APIOpenAIAPIKeyInfo -from .api_deployment_visibility import APIDeploymentVisibility -from .api_anthropic_api_key_info import APIAnthropicAPIKeyInfo - -__all__ = [ - "APIAgent", - "APIKey", - "Chatbot", - "ChatbotIdentifier", - "Deployment", - "Function", - "Guardrail", - "Template", - "TemplateGuardrail", -] - - -class APIKey(BaseModel): - api_key: Optional[str] = None - - -class Chatbot(BaseModel): - button_background_color: Optional[str] = None - - logo: Optional[str] = None - - name: Optional[str] = None - - primary_color: Optional[str] = None - - secondary_color: Optional[str] = None - - starting_message: Optional[str] = None - - -class ChatbotIdentifier(BaseModel): - agent_chatbot_identifier: Optional[str] = None - - -class Deployment(BaseModel): - created_at: Optional[datetime] = None - - name: Optional[str] = None - - status: Optional[ - Literal[ - "STATUS_UNKNOWN", - "STATUS_WAITING_FOR_DEPLOYMENT", - "STATUS_DEPLOYING", - "STATUS_RUNNING", - "STATUS_FAILED", - "STATUS_WAITING_FOR_UNDEPLOYMENT", - "STATUS_UNDEPLOYING", - "STATUS_UNDEPLOYMENT_FAILED", - "STATUS_DELETED", - ] - ] = None - - updated_at: Optional[datetime] = None - - url: Optional[str] = None - - uuid: Optional[str] = None - - visibility: Optional[APIDeploymentVisibility] = None - - -class Function(BaseModel): - api_key: Optional[str] = None - - created_at: Optional[datetime] = None - - created_by: Optional[str] = None - - description: Optional[str] = None - - faas_name: Optional[str] = None - - faas_namespace: Optional[str] = None - - input_schema: Optional[object] = None - - name: Optional[str] = None - - output_schema: Optional[object] = None - - updated_at: Optional[datetime] = None - - url: Optional[str] = None - - uuid: Optional[str] = None - - -class Guardrail(BaseModel): - agent_uuid: Optional[str] = None - - created_at: Optional[datetime] = None - - default_response: Optional[str] = None - - description: Optional[str] = None - - guardrail_uuid: Optional[str] = None - - is_attached: Optional[bool] = None - - is_default: Optional[bool] = None - - metadata: Optional[object] = None - - name: Optional[str] = None - - priority: Optional[int] = None - - type: Optional[ - Literal[ - "GUARDRAIL_TYPE_UNKNOWN", - "GUARDRAIL_TYPE_JAILBREAK", - "GUARDRAIL_TYPE_SENSITIVE_DATA", - "GUARDRAIL_TYPE_CONTENT_MODERATION", - ] - ] = None - - updated_at: Optional[datetime] = None - - uuid: Optional[str] = None - - -class TemplateGuardrail(BaseModel): - priority: Optional[int] = None - - uuid: Optional[str] = None - - -class Template(BaseModel): - created_at: Optional[datetime] = None - - description: Optional[str] = None - - guardrails: Optional[List[TemplateGuardrail]] = None - - instruction: Optional[str] = None - - k: Optional[int] = None - - knowledge_bases: Optional[List[APIKnowledgeBase]] = None - - long_description: Optional[str] = None - - max_tokens: Optional[int] = None - - model: Optional[APIModel] = None - - name: Optional[str] = None - - short_description: Optional[str] = None - - summary: Optional[str] = None - - tags: Optional[List[str]] = None - - temperature: Optional[float] = None - - template_type: Optional[Literal["AGENT_TEMPLATE_TYPE_STANDARD", "AGENT_TEMPLATE_TYPE_ONE_CLICK"]] = None - - top_p: Optional[float] = None - - updated_at: Optional[datetime] = None - - uuid: Optional[str] = None - - -class APIAgent(BaseModel): - anthropic_api_key: Optional[APIAnthropicAPIKeyInfo] = None - - api_key_infos: Optional[List[APIAgentAPIKeyInfo]] = None - - api_keys: Optional[List[APIKey]] = None - - chatbot: Optional[Chatbot] = None - - chatbot_identifiers: Optional[List[ChatbotIdentifier]] = None - - child_agents: Optional[List["APIAgent"]] = None - - created_at: Optional[datetime] = None - - deployment: Optional[Deployment] = None - - description: Optional[str] = None - - functions: Optional[List[Function]] = None - - guardrails: Optional[List[Guardrail]] = None - - if_case: Optional[str] = None - - instruction: Optional[str] = None - """Agent instruction. - - Instructions help your agent to perform its job effectively. See - [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) - for best practices. - """ - - k: Optional[int] = None - - knowledge_bases: Optional[List[APIKnowledgeBase]] = None - - max_tokens: Optional[int] = None - - model: Optional[APIModel] = None - - name: Optional[str] = None - - openai_api_key: Optional[APIOpenAIAPIKeyInfo] = None - - parent_agents: Optional[List["APIAgent"]] = None - - project_id: Optional[str] = None - - provide_citations: Optional[bool] = None - - region: Optional[str] = None - - retrieval_method: Optional[APIRetrievalMethod] = None - - route_created_at: Optional[datetime] = None - - route_created_by: Optional[str] = None - - route_name: Optional[str] = None - - route_uuid: Optional[str] = None - - tags: Optional[List[str]] = None - - temperature: Optional[float] = None - - template: Optional[Template] = None - - top_p: Optional[float] = None - - updated_at: Optional[datetime] = None - - url: Optional[str] = None - - user_id: Optional[str] = None - - uuid: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/api_agent_api_key_info.py b/src/digitalocean_genai_sdk/types/api_agent_api_key_info.py deleted file mode 100644 index 8dc71564..00000000 --- a/src/digitalocean_genai_sdk/types/api_agent_api_key_info.py +++ /dev/null @@ -1,22 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional -from datetime import datetime - -from .._models import BaseModel - -__all__ = ["APIAgentAPIKeyInfo"] - - -class APIAgentAPIKeyInfo(BaseModel): - created_at: Optional[datetime] = None - - created_by: Optional[str] = None - - deleted_at: Optional[datetime] = None - - name: Optional[str] = None - - secret_key: Optional[str] = None - - uuid: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/api_agreement.py b/src/digitalocean_genai_sdk/types/api_agreement.py deleted file mode 100644 index c4359f1f..00000000 --- a/src/digitalocean_genai_sdk/types/api_agreement.py +++ /dev/null @@ -1,17 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from .._models import BaseModel - -__all__ = ["APIAgreement"] - - -class APIAgreement(BaseModel): - description: Optional[str] = None - - name: Optional[str] = None - - url: Optional[str] = None - - uuid: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/api_anthropic_api_key_info.py b/src/digitalocean_genai_sdk/types/api_anthropic_api_key_info.py deleted file mode 100644 index e2e04a8a..00000000 --- a/src/digitalocean_genai_sdk/types/api_anthropic_api_key_info.py +++ /dev/null @@ -1,22 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional -from datetime import datetime - -from .._models import BaseModel - -__all__ = ["APIAnthropicAPIKeyInfo"] - - -class APIAnthropicAPIKeyInfo(BaseModel): - created_at: Optional[datetime] = None - - created_by: Optional[str] = None - - deleted_at: Optional[datetime] = None - - name: Optional[str] = None - - updated_at: Optional[datetime] = None - - uuid: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/api_indexing_job.py b/src/digitalocean_genai_sdk/types/api_indexing_job.py deleted file mode 100644 index f24aac94..00000000 --- a/src/digitalocean_genai_sdk/types/api_indexing_job.py +++ /dev/null @@ -1,43 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional -from datetime import datetime -from typing_extensions import Literal - -from .._models import BaseModel - -__all__ = ["APIIndexingJob"] - - -class APIIndexingJob(BaseModel): - completed_datasources: Optional[int] = None - - created_at: Optional[datetime] = None - - data_source_uuids: Optional[List[str]] = None - - finished_at: Optional[datetime] = None - - knowledge_base_uuid: Optional[str] = None - - phase: Optional[ - Literal[ - "BATCH_JOB_PHASE_UNKNOWN", - "BATCH_JOB_PHASE_PENDING", - "BATCH_JOB_PHASE_RUNNING", - "BATCH_JOB_PHASE_SUCCEEDED", - "BATCH_JOB_PHASE_FAILED", - "BATCH_JOB_PHASE_ERROR", - "BATCH_JOB_PHASE_CANCELLED", - ] - ] = None - - started_at: Optional[datetime] = None - - tokens: Optional[int] = None - - total_datasources: Optional[int] = None - - updated_at: Optional[datetime] = None - - uuid: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/api_key_list_params.py b/src/digitalocean_genai_sdk/types/api_key_list_params.py deleted file mode 100644 index a1ab60dc..00000000 --- a/src/digitalocean_genai_sdk/types/api_key_list_params.py +++ /dev/null @@ -1,42 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import Literal, TypedDict - -__all__ = ["APIKeyListParams"] - - -class APIKeyListParams(TypedDict, total=False): - page: int - """page number.""" - - per_page: int - """items per page.""" - - public_only: bool - """only include models that are publicly available.""" - - usecases: List[ - Literal[ - "MODEL_USECASE_UNKNOWN", - "MODEL_USECASE_AGENT", - "MODEL_USECASE_FINETUNED", - "MODEL_USECASE_KNOWLEDGEBASE", - "MODEL_USECASE_GUARDRAIL", - "MODEL_USECASE_REASONING", - "MODEL_USECASE_SERVERLESS", - ] - ] - """include only models defined for the listed usecases. - - - MODEL_USECASE_UNKNOWN: The use case of the model is unknown - - MODEL_USECASE_AGENT: The model maybe used in an agent - - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning - - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases - (embedding models) - - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails - - MODEL_USECASE_REASONING: The model usecase for reasoning - - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference - """ diff --git a/src/digitalocean_genai_sdk/types/api_key_list_response.py b/src/digitalocean_genai_sdk/types/api_key_list_response.py deleted file mode 100644 index 360de7a4..00000000 --- a/src/digitalocean_genai_sdk/types/api_key_list_response.py +++ /dev/null @@ -1,42 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional -from datetime import datetime - -from .._models import BaseModel -from .api_agreement import APIAgreement -from .agents.api_meta import APIMeta -from .agents.api_links import APILinks -from .api_model_version import APIModelVersion - -__all__ = ["APIKeyListResponse", "Model"] - - -class Model(BaseModel): - agreement: Optional[APIAgreement] = None - - created_at: Optional[datetime] = None - - is_foundational: Optional[bool] = None - - name: Optional[str] = None - - parent_uuid: Optional[str] = None - - updated_at: Optional[datetime] = None - - upload_complete: Optional[bool] = None - - url: Optional[str] = None - - uuid: Optional[str] = None - - version: Optional[APIModelVersion] = None - - -class APIKeyListResponse(BaseModel): - links: Optional[APILinks] = None - - meta: Optional[APIMeta] = None - - models: Optional[List[Model]] = None diff --git a/src/digitalocean_genai_sdk/types/api_keys/__init__.py b/src/digitalocean_genai_sdk/types/api_keys/__init__.py deleted file mode 100644 index c3cbcd6d..00000000 --- a/src/digitalocean_genai_sdk/types/api_keys/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from .api_key_list_params import APIKeyListParams as APIKeyListParams -from .api_key_create_params import APIKeyCreateParams as APIKeyCreateParams -from .api_key_list_response import APIKeyListResponse as APIKeyListResponse -from .api_key_update_params import APIKeyUpdateParams as APIKeyUpdateParams -from .api_model_api_key_info import APIModelAPIKeyInfo as APIModelAPIKeyInfo -from .api_key_create_response import APIKeyCreateResponse as APIKeyCreateResponse -from .api_key_delete_response import APIKeyDeleteResponse as APIKeyDeleteResponse -from .api_key_update_response import APIKeyUpdateResponse as APIKeyUpdateResponse -from .api_key_update_regenerate_response import APIKeyUpdateRegenerateResponse as APIKeyUpdateRegenerateResponse diff --git a/src/digitalocean_genai_sdk/types/api_keys/api_key_create_params.py b/src/digitalocean_genai_sdk/types/api_keys/api_key_create_params.py deleted file mode 100644 index 16cc23c9..00000000 --- a/src/digitalocean_genai_sdk/types/api_keys/api_key_create_params.py +++ /dev/null @@ -1,11 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["APIKeyCreateParams"] - - -class APIKeyCreateParams(TypedDict, total=False): - name: str diff --git a/src/digitalocean_genai_sdk/types/api_keys/api_key_create_response.py b/src/digitalocean_genai_sdk/types/api_keys/api_key_create_response.py deleted file mode 100644 index 654e9f1e..00000000 --- a/src/digitalocean_genai_sdk/types/api_keys/api_key_create_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel -from .api_model_api_key_info import APIModelAPIKeyInfo - -__all__ = ["APIKeyCreateResponse"] - - -class APIKeyCreateResponse(BaseModel): - api_key_info: Optional[APIModelAPIKeyInfo] = None diff --git a/src/digitalocean_genai_sdk/types/api_keys/api_key_delete_response.py b/src/digitalocean_genai_sdk/types/api_keys/api_key_delete_response.py deleted file mode 100644 index 4d81d047..00000000 --- a/src/digitalocean_genai_sdk/types/api_keys/api_key_delete_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel -from .api_model_api_key_info import APIModelAPIKeyInfo - -__all__ = ["APIKeyDeleteResponse"] - - -class APIKeyDeleteResponse(BaseModel): - api_key_info: Optional[APIModelAPIKeyInfo] = None diff --git a/src/digitalocean_genai_sdk/types/api_keys/api_key_list_params.py b/src/digitalocean_genai_sdk/types/api_keys/api_key_list_params.py deleted file mode 100644 index 11da9398..00000000 --- a/src/digitalocean_genai_sdk/types/api_keys/api_key_list_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["APIKeyListParams"] - - -class APIKeyListParams(TypedDict, total=False): - page: int - """page number.""" - - per_page: int - """items per page.""" diff --git a/src/digitalocean_genai_sdk/types/api_keys/api_key_list_response.py b/src/digitalocean_genai_sdk/types/api_keys/api_key_list_response.py deleted file mode 100644 index 535e2f96..00000000 --- a/src/digitalocean_genai_sdk/types/api_keys/api_key_list_response.py +++ /dev/null @@ -1,18 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from ..._models import BaseModel -from ..agents.api_meta import APIMeta -from ..agents.api_links import APILinks -from .api_model_api_key_info import APIModelAPIKeyInfo - -__all__ = ["APIKeyListResponse"] - - -class APIKeyListResponse(BaseModel): - api_key_infos: Optional[List[APIModelAPIKeyInfo]] = None - - links: Optional[APILinks] = None - - meta: Optional[APIMeta] = None diff --git a/src/digitalocean_genai_sdk/types/api_keys/api_key_update_params.py b/src/digitalocean_genai_sdk/types/api_keys/api_key_update_params.py deleted file mode 100644 index 23c1c0b9..00000000 --- a/src/digitalocean_genai_sdk/types/api_keys/api_key_update_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Annotated, TypedDict - -from ..._utils import PropertyInfo - -__all__ = ["APIKeyUpdateParams"] - - -class APIKeyUpdateParams(TypedDict, total=False): - body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")] - - name: str diff --git a/src/digitalocean_genai_sdk/types/api_keys/api_key_update_regenerate_response.py b/src/digitalocean_genai_sdk/types/api_keys/api_key_update_regenerate_response.py deleted file mode 100644 index 44a316dc..00000000 --- a/src/digitalocean_genai_sdk/types/api_keys/api_key_update_regenerate_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel -from .api_model_api_key_info import APIModelAPIKeyInfo - -__all__ = ["APIKeyUpdateRegenerateResponse"] - - -class APIKeyUpdateRegenerateResponse(BaseModel): - api_key_info: Optional[APIModelAPIKeyInfo] = None diff --git a/src/digitalocean_genai_sdk/types/api_keys/api_key_update_response.py b/src/digitalocean_genai_sdk/types/api_keys/api_key_update_response.py deleted file mode 100644 index 3671addf..00000000 --- a/src/digitalocean_genai_sdk/types/api_keys/api_key_update_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel -from .api_model_api_key_info import APIModelAPIKeyInfo - -__all__ = ["APIKeyUpdateResponse"] - - -class APIKeyUpdateResponse(BaseModel): - api_key_info: Optional[APIModelAPIKeyInfo] = None diff --git a/src/digitalocean_genai_sdk/types/api_keys/api_model_api_key_info.py b/src/digitalocean_genai_sdk/types/api_keys/api_model_api_key_info.py deleted file mode 100644 index bf354a47..00000000 --- a/src/digitalocean_genai_sdk/types/api_keys/api_model_api_key_info.py +++ /dev/null @@ -1,22 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional -from datetime import datetime - -from ..._models import BaseModel - -__all__ = ["APIModelAPIKeyInfo"] - - -class APIModelAPIKeyInfo(BaseModel): - created_at: Optional[datetime] = None - - created_by: Optional[str] = None - - deleted_at: Optional[datetime] = None - - name: Optional[str] = None - - secret_key: Optional[str] = None - - uuid: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/api_knowledge_base.py b/src/digitalocean_genai_sdk/types/api_knowledge_base.py deleted file mode 100644 index 5b4b6e2c..00000000 --- a/src/digitalocean_genai_sdk/types/api_knowledge_base.py +++ /dev/null @@ -1,37 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional -from datetime import datetime - -from .._models import BaseModel -from .api_indexing_job import APIIndexingJob - -__all__ = ["APIKnowledgeBase"] - - -class APIKnowledgeBase(BaseModel): - added_to_agent_at: Optional[datetime] = None - - created_at: Optional[datetime] = None - - database_id: Optional[str] = None - - embedding_model_uuid: Optional[str] = None - - is_public: Optional[bool] = None - - last_indexing_job: Optional[APIIndexingJob] = None - - name: Optional[str] = None - - project_id: Optional[str] = None - - region: Optional[str] = None - - tags: Optional[List[str]] = None - - updated_at: Optional[datetime] = None - - user_id: Optional[str] = None - - uuid: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/api_model.py b/src/digitalocean_genai_sdk/types/api_model.py deleted file mode 100644 index d680a638..00000000 --- a/src/digitalocean_genai_sdk/types/api_model.py +++ /dev/null @@ -1,57 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional -from datetime import datetime -from typing_extensions import Literal - -from .._models import BaseModel -from .api_agreement import APIAgreement -from .api_model_version import APIModelVersion - -__all__ = ["APIModel"] - - -class APIModel(BaseModel): - agreement: Optional[APIAgreement] = None - - created_at: Optional[datetime] = None - - inference_name: Optional[str] = None - - inference_version: Optional[str] = None - - is_foundational: Optional[bool] = None - - metadata: Optional[object] = None - - name: Optional[str] = None - - parent_uuid: Optional[str] = None - - provider: Optional[Literal["MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", "MODEL_PROVIDER_OPENAI"]] = ( - None - ) - - updated_at: Optional[datetime] = None - - upload_complete: Optional[bool] = None - - url: Optional[str] = None - - usecases: Optional[ - List[ - Literal[ - "MODEL_USECASE_UNKNOWN", - "MODEL_USECASE_AGENT", - "MODEL_USECASE_FINETUNED", - "MODEL_USECASE_KNOWLEDGEBASE", - "MODEL_USECASE_GUARDRAIL", - "MODEL_USECASE_REASONING", - "MODEL_USECASE_SERVERLESS", - ] - ] - ] = None - - uuid: Optional[str] = None - - version: Optional[APIModelVersion] = None diff --git a/src/digitalocean_genai_sdk/types/api_model_version.py b/src/digitalocean_genai_sdk/types/api_model_version.py deleted file mode 100644 index 2e118632..00000000 --- a/src/digitalocean_genai_sdk/types/api_model_version.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from .._models import BaseModel - -__all__ = ["APIModelVersion"] - - -class APIModelVersion(BaseModel): - major: Optional[int] = None - - minor: Optional[int] = None - - patch: Optional[int] = None diff --git a/src/digitalocean_genai_sdk/types/api_openai_api_key_info.py b/src/digitalocean_genai_sdk/types/api_openai_api_key_info.py deleted file mode 100644 index 39328f80..00000000 --- a/src/digitalocean_genai_sdk/types/api_openai_api_key_info.py +++ /dev/null @@ -1,25 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional -from datetime import datetime - -from .._models import BaseModel -from .api_model import APIModel - -__all__ = ["APIOpenAIAPIKeyInfo"] - - -class APIOpenAIAPIKeyInfo(BaseModel): - created_at: Optional[datetime] = None - - created_by: Optional[str] = None - - deleted_at: Optional[datetime] = None - - models: Optional[List[APIModel]] = None - - name: Optional[str] = None - - updated_at: Optional[datetime] = None - - uuid: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/auth/agents/__init__.py b/src/digitalocean_genai_sdk/types/auth/agents/__init__.py deleted file mode 100644 index 9fae55b6..00000000 --- a/src/digitalocean_genai_sdk/types/auth/agents/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from .token_create_params import TokenCreateParams as TokenCreateParams -from .token_create_response import TokenCreateResponse as TokenCreateResponse diff --git a/src/digitalocean_genai_sdk/types/auth/agents/token_create_params.py b/src/digitalocean_genai_sdk/types/auth/agents/token_create_params.py deleted file mode 100644 index 0df640f9..00000000 --- a/src/digitalocean_genai_sdk/types/auth/agents/token_create_params.py +++ /dev/null @@ -1,13 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Annotated, TypedDict - -from ...._utils import PropertyInfo - -__all__ = ["TokenCreateParams"] - - -class TokenCreateParams(TypedDict, total=False): - body_agent_uuid: Annotated[str, PropertyInfo(alias="agent_uuid")] diff --git a/src/digitalocean_genai_sdk/types/auth/agents/token_create_response.py b/src/digitalocean_genai_sdk/types/auth/agents/token_create_response.py deleted file mode 100644 index e58b7399..00000000 --- a/src/digitalocean_genai_sdk/types/auth/agents/token_create_response.py +++ /dev/null @@ -1,13 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ...._models import BaseModel - -__all__ = ["TokenCreateResponse"] - - -class TokenCreateResponse(BaseModel): - access_token: Optional[str] = None - - refresh_token: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/indexing_job_create_params.py b/src/digitalocean_genai_sdk/types/indexing_job_create_params.py deleted file mode 100644 index 04838472..00000000 --- a/src/digitalocean_genai_sdk/types/indexing_job_create_params.py +++ /dev/null @@ -1,14 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import TypedDict - -__all__ = ["IndexingJobCreateParams"] - - -class IndexingJobCreateParams(TypedDict, total=False): - data_source_uuids: List[str] - - knowledge_base_uuid: str diff --git a/src/digitalocean_genai_sdk/types/indexing_job_create_response.py b/src/digitalocean_genai_sdk/types/indexing_job_create_response.py deleted file mode 100644 index 839bc83b..00000000 --- a/src/digitalocean_genai_sdk/types/indexing_job_create_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from .._models import BaseModel -from .api_indexing_job import APIIndexingJob - -__all__ = ["IndexingJobCreateResponse"] - - -class IndexingJobCreateResponse(BaseModel): - job: Optional[APIIndexingJob] = None diff --git a/src/digitalocean_genai_sdk/types/indexing_job_list_params.py b/src/digitalocean_genai_sdk/types/indexing_job_list_params.py deleted file mode 100644 index 90206aba..00000000 --- a/src/digitalocean_genai_sdk/types/indexing_job_list_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["IndexingJobListParams"] - - -class IndexingJobListParams(TypedDict, total=False): - page: int - """page number.""" - - per_page: int - """items per page.""" diff --git a/src/digitalocean_genai_sdk/types/indexing_job_list_response.py b/src/digitalocean_genai_sdk/types/indexing_job_list_response.py deleted file mode 100644 index 1379cc55..00000000 --- a/src/digitalocean_genai_sdk/types/indexing_job_list_response.py +++ /dev/null @@ -1,18 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from .._models import BaseModel -from .agents.api_meta import APIMeta -from .agents.api_links import APILinks -from .api_indexing_job import APIIndexingJob - -__all__ = ["IndexingJobListResponse"] - - -class IndexingJobListResponse(BaseModel): - jobs: Optional[List[APIIndexingJob]] = None - - links: Optional[APILinks] = None - - meta: Optional[APIMeta] = None diff --git a/src/digitalocean_genai_sdk/types/indexing_job_retrieve_data_sources_response.py b/src/digitalocean_genai_sdk/types/indexing_job_retrieve_data_sources_response.py deleted file mode 100644 index b178b984..00000000 --- a/src/digitalocean_genai_sdk/types/indexing_job_retrieve_data_sources_response.py +++ /dev/null @@ -1,52 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional -from datetime import datetime -from typing_extensions import Literal - -from .._models import BaseModel - -__all__ = ["IndexingJobRetrieveDataSourcesResponse", "IndexedDataSource"] - - -class IndexedDataSource(BaseModel): - completed_at: Optional[datetime] = None - - data_source_uuid: Optional[str] = None - - error_details: Optional[str] = None - - error_msg: Optional[str] = None - - failed_item_count: Optional[str] = None - - indexed_file_count: Optional[str] = None - - indexed_item_count: Optional[str] = None - - removed_item_count: Optional[str] = None - - skipped_item_count: Optional[str] = None - - started_at: Optional[datetime] = None - - status: Optional[ - Literal[ - "DATA_SOURCE_STATUS_UNKNOWN", - "DATA_SOURCE_STATUS_IN_PROGRESS", - "DATA_SOURCE_STATUS_UPDATED", - "DATA_SOURCE_STATUS_PARTIALLY_UPDATED", - "DATA_SOURCE_STATUS_NOT_UPDATED", - "DATA_SOURCE_STATUS_FAILED", - ] - ] = None - - total_bytes: Optional[str] = None - - total_bytes_indexed: Optional[str] = None - - total_file_count: Optional[str] = None - - -class IndexingJobRetrieveDataSourcesResponse(BaseModel): - indexed_data_sources: Optional[List[IndexedDataSource]] = None diff --git a/src/digitalocean_genai_sdk/types/indexing_job_retrieve_response.py b/src/digitalocean_genai_sdk/types/indexing_job_retrieve_response.py deleted file mode 100644 index 95f33d7a..00000000 --- a/src/digitalocean_genai_sdk/types/indexing_job_retrieve_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from .._models import BaseModel -from .api_indexing_job import APIIndexingJob - -__all__ = ["IndexingJobRetrieveResponse"] - - -class IndexingJobRetrieveResponse(BaseModel): - job: Optional[APIIndexingJob] = None diff --git a/src/digitalocean_genai_sdk/types/indexing_job_update_cancel_params.py b/src/digitalocean_genai_sdk/types/indexing_job_update_cancel_params.py deleted file mode 100644 index 4c2848b0..00000000 --- a/src/digitalocean_genai_sdk/types/indexing_job_update_cancel_params.py +++ /dev/null @@ -1,14 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Annotated, TypedDict - -from .._utils import PropertyInfo - -__all__ = ["IndexingJobUpdateCancelParams"] - - -class IndexingJobUpdateCancelParams(TypedDict, total=False): - body_uuid: Annotated[str, PropertyInfo(alias="uuid")] - """A unique identifier for an indexing job.""" diff --git a/src/digitalocean_genai_sdk/types/indexing_job_update_cancel_response.py b/src/digitalocean_genai_sdk/types/indexing_job_update_cancel_response.py deleted file mode 100644 index d50e1865..00000000 --- a/src/digitalocean_genai_sdk/types/indexing_job_update_cancel_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from .._models import BaseModel -from .api_indexing_job import APIIndexingJob - -__all__ = ["IndexingJobUpdateCancelResponse"] - - -class IndexingJobUpdateCancelResponse(BaseModel): - job: Optional[APIIndexingJob] = None diff --git a/src/digitalocean_genai_sdk/types/knowledge_base_create_params.py b/src/digitalocean_genai_sdk/types/knowledge_base_create_params.py deleted file mode 100644 index 3a58166b..00000000 --- a/src/digitalocean_genai_sdk/types/knowledge_base_create_params.py +++ /dev/null @@ -1,64 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List, Iterable -from typing_extensions import TypedDict - -from .knowledge_bases.api_spaces_data_source_param import APISpacesDataSourceParam -from .knowledge_bases.api_file_upload_data_source_param import APIFileUploadDataSourceParam -from .knowledge_bases.api_web_crawler_data_source_param import APIWebCrawlerDataSourceParam - -__all__ = ["KnowledgeBaseCreateParams", "Datasource"] - - -class KnowledgeBaseCreateParams(TypedDict, total=False): - database_id: str - """ - Identifier of the DigitalOcean OpenSearch database this knowledge base will use, - optional. If not provided, we create a new database for the knowledge base in - the same region as the knowledge base. - """ - - datasources: Iterable[Datasource] - """The data sources to use for this knowledge base. - - See - [Organize Data Sources](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#spaces-buckets) - for more information on data sources best practices. - """ - - embedding_model_uuid: str - """ - Identifier for the - [embedding model](https://docs.digitalocean.com/products/genai-platform/details/models/#embedding-models). - """ - - name: str - """Name of the knowledge base.""" - - project_id: str - """Identifier of the DigitalOcean project this knowledge base will belong to.""" - - region: str - """The datacenter region to deploy the knowledge base in.""" - - tags: List[str] - """Tags to organize your knowledge base.""" - - vpc_uuid: str - - -class Datasource(TypedDict, total=False): - bucket_name: str - - bucket_region: str - - file_upload_data_source: APIFileUploadDataSourceParam - """File to upload as data source for knowledge base.""" - - item_path: str - - spaces_data_source: APISpacesDataSourceParam - - web_crawler_data_source: APIWebCrawlerDataSourceParam diff --git a/src/digitalocean_genai_sdk/types/knowledge_base_create_response.py b/src/digitalocean_genai_sdk/types/knowledge_base_create_response.py deleted file mode 100644 index cc2d8b9f..00000000 --- a/src/digitalocean_genai_sdk/types/knowledge_base_create_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from .._models import BaseModel -from .api_knowledge_base import APIKnowledgeBase - -__all__ = ["KnowledgeBaseCreateResponse"] - - -class KnowledgeBaseCreateResponse(BaseModel): - knowledge_base: Optional[APIKnowledgeBase] = None diff --git a/src/digitalocean_genai_sdk/types/knowledge_base_delete_response.py b/src/digitalocean_genai_sdk/types/knowledge_base_delete_response.py deleted file mode 100644 index 6401e25a..00000000 --- a/src/digitalocean_genai_sdk/types/knowledge_base_delete_response.py +++ /dev/null @@ -1,11 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from .._models import BaseModel - -__all__ = ["KnowledgeBaseDeleteResponse"] - - -class KnowledgeBaseDeleteResponse(BaseModel): - uuid: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/knowledge_base_list_params.py b/src/digitalocean_genai_sdk/types/knowledge_base_list_params.py deleted file mode 100644 index dcf9a0ec..00000000 --- a/src/digitalocean_genai_sdk/types/knowledge_base_list_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["KnowledgeBaseListParams"] - - -class KnowledgeBaseListParams(TypedDict, total=False): - page: int - """page number.""" - - per_page: int - """items per page.""" diff --git a/src/digitalocean_genai_sdk/types/knowledge_base_list_response.py b/src/digitalocean_genai_sdk/types/knowledge_base_list_response.py deleted file mode 100644 index 09ca1ad3..00000000 --- a/src/digitalocean_genai_sdk/types/knowledge_base_list_response.py +++ /dev/null @@ -1,18 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from .._models import BaseModel -from .agents.api_meta import APIMeta -from .agents.api_links import APILinks -from .api_knowledge_base import APIKnowledgeBase - -__all__ = ["KnowledgeBaseListResponse"] - - -class KnowledgeBaseListResponse(BaseModel): - knowledge_bases: Optional[List[APIKnowledgeBase]] = None - - links: Optional[APILinks] = None - - meta: Optional[APIMeta] = None diff --git a/src/digitalocean_genai_sdk/types/knowledge_base_retrieve_response.py b/src/digitalocean_genai_sdk/types/knowledge_base_retrieve_response.py deleted file mode 100644 index 5a3b5f2c..00000000 --- a/src/digitalocean_genai_sdk/types/knowledge_base_retrieve_response.py +++ /dev/null @@ -1,30 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional -from typing_extensions import Literal - -from .._models import BaseModel -from .api_knowledge_base import APIKnowledgeBase - -__all__ = ["KnowledgeBaseRetrieveResponse"] - - -class KnowledgeBaseRetrieveResponse(BaseModel): - database_status: Optional[ - Literal[ - "CREATING", - "ONLINE", - "POWEROFF", - "REBUILDING", - "REBALANCING", - "DECOMMISSIONED", - "FORKING", - "MIGRATING", - "RESIZING", - "RESTORING", - "POWERING_ON", - "UNHEALTHY", - ] - ] = None - - knowledge_base: Optional[APIKnowledgeBase] = None diff --git a/src/digitalocean_genai_sdk/types/knowledge_base_update_params.py b/src/digitalocean_genai_sdk/types/knowledge_base_update_params.py deleted file mode 100644 index 297c79de..00000000 --- a/src/digitalocean_genai_sdk/types/knowledge_base_update_params.py +++ /dev/null @@ -1,27 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import Annotated, TypedDict - -from .._utils import PropertyInfo - -__all__ = ["KnowledgeBaseUpdateParams"] - - -class KnowledgeBaseUpdateParams(TypedDict, total=False): - database_id: str - """the id of the DigitalOcean database this knowledge base will use, optiona.""" - - embedding_model_uuid: str - """Identifier for the foundation model.""" - - name: str - - project_id: str - - tags: List[str] - """Tags to organize your knowledge base.""" - - body_uuid: Annotated[str, PropertyInfo(alias="uuid")] diff --git a/src/digitalocean_genai_sdk/types/knowledge_base_update_response.py b/src/digitalocean_genai_sdk/types/knowledge_base_update_response.py deleted file mode 100644 index f3ba2c32..00000000 --- a/src/digitalocean_genai_sdk/types/knowledge_base_update_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from .._models import BaseModel -from .api_knowledge_base import APIKnowledgeBase - -__all__ = ["KnowledgeBaseUpdateResponse"] - - -class KnowledgeBaseUpdateResponse(BaseModel): - knowledge_base: Optional[APIKnowledgeBase] = None diff --git a/src/digitalocean_genai_sdk/types/knowledge_bases/__init__.py b/src/digitalocean_genai_sdk/types/knowledge_bases/__init__.py deleted file mode 100644 index f5f31034..00000000 --- a/src/digitalocean_genai_sdk/types/knowledge_bases/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from .api_spaces_data_source import APISpacesDataSource as APISpacesDataSource -from .data_source_list_params import DataSourceListParams as DataSourceListParams -from .data_source_create_params import DataSourceCreateParams as DataSourceCreateParams -from .data_source_list_response import DataSourceListResponse as DataSourceListResponse -from .api_file_upload_data_source import APIFileUploadDataSource as APIFileUploadDataSource -from .api_web_crawler_data_source import APIWebCrawlerDataSource as APIWebCrawlerDataSource -from .data_source_create_response import DataSourceCreateResponse as DataSourceCreateResponse -from .data_source_delete_response import DataSourceDeleteResponse as DataSourceDeleteResponse -from .api_spaces_data_source_param import APISpacesDataSourceParam as APISpacesDataSourceParam -from .api_knowledge_base_data_source import APIKnowledgeBaseDataSource as APIKnowledgeBaseDataSource -from .api_file_upload_data_source_param import APIFileUploadDataSourceParam as APIFileUploadDataSourceParam -from .api_web_crawler_data_source_param import APIWebCrawlerDataSourceParam as APIWebCrawlerDataSourceParam diff --git a/src/digitalocean_genai_sdk/types/knowledge_bases/api_file_upload_data_source.py b/src/digitalocean_genai_sdk/types/knowledge_bases/api_file_upload_data_source.py deleted file mode 100644 index 1dcc9639..00000000 --- a/src/digitalocean_genai_sdk/types/knowledge_bases/api_file_upload_data_source.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel - -__all__ = ["APIFileUploadDataSource"] - - -class APIFileUploadDataSource(BaseModel): - original_file_name: Optional[str] = None - - size_in_bytes: Optional[str] = None - - stored_object_key: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/knowledge_bases/api_file_upload_data_source_param.py b/src/digitalocean_genai_sdk/types/knowledge_bases/api_file_upload_data_source_param.py deleted file mode 100644 index 37221059..00000000 --- a/src/digitalocean_genai_sdk/types/knowledge_bases/api_file_upload_data_source_param.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["APIFileUploadDataSourceParam"] - - -class APIFileUploadDataSourceParam(TypedDict, total=False): - original_file_name: str - - size_in_bytes: str - - stored_object_key: str diff --git a/src/digitalocean_genai_sdk/types/knowledge_bases/api_knowledge_base_data_source.py b/src/digitalocean_genai_sdk/types/knowledge_bases/api_knowledge_base_data_source.py deleted file mode 100644 index df1cd3bb..00000000 --- a/src/digitalocean_genai_sdk/types/knowledge_bases/api_knowledge_base_data_source.py +++ /dev/null @@ -1,35 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional -from datetime import datetime - -from ..._models import BaseModel -from ..api_indexing_job import APIIndexingJob -from .api_spaces_data_source import APISpacesDataSource -from .api_file_upload_data_source import APIFileUploadDataSource -from .api_web_crawler_data_source import APIWebCrawlerDataSource - -__all__ = ["APIKnowledgeBaseDataSource"] - - -class APIKnowledgeBaseDataSource(BaseModel): - bucket_name: Optional[str] = None - - created_at: Optional[datetime] = None - - file_upload_data_source: Optional[APIFileUploadDataSource] = None - """File to upload as data source for knowledge base.""" - - item_path: Optional[str] = None - - last_indexing_job: Optional[APIIndexingJob] = None - - region: Optional[str] = None - - spaces_data_source: Optional[APISpacesDataSource] = None - - updated_at: Optional[datetime] = None - - uuid: Optional[str] = None - - web_crawler_data_source: Optional[APIWebCrawlerDataSource] = None diff --git a/src/digitalocean_genai_sdk/types/knowledge_bases/api_spaces_data_source.py b/src/digitalocean_genai_sdk/types/knowledge_bases/api_spaces_data_source.py deleted file mode 100644 index f3a0421a..00000000 --- a/src/digitalocean_genai_sdk/types/knowledge_bases/api_spaces_data_source.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel - -__all__ = ["APISpacesDataSource"] - - -class APISpacesDataSource(BaseModel): - bucket_name: Optional[str] = None - - item_path: Optional[str] = None - - region: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/knowledge_bases/api_spaces_data_source_param.py b/src/digitalocean_genai_sdk/types/knowledge_bases/api_spaces_data_source_param.py deleted file mode 100644 index b7f2f657..00000000 --- a/src/digitalocean_genai_sdk/types/knowledge_bases/api_spaces_data_source_param.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["APISpacesDataSourceParam"] - - -class APISpacesDataSourceParam(TypedDict, total=False): - bucket_name: str - - item_path: str - - region: str diff --git a/src/digitalocean_genai_sdk/types/knowledge_bases/api_web_crawler_data_source.py b/src/digitalocean_genai_sdk/types/knowledge_bases/api_web_crawler_data_source.py deleted file mode 100644 index 4690c607..00000000 --- a/src/digitalocean_genai_sdk/types/knowledge_bases/api_web_crawler_data_source.py +++ /dev/null @@ -1,26 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["APIWebCrawlerDataSource"] - - -class APIWebCrawlerDataSource(BaseModel): - base_url: Optional[str] = None - """The base url to crawl.""" - - crawling_option: Optional[Literal["UNKNOWN", "SCOPED", "PATH", "DOMAIN", "SUBDOMAINS"]] = None - """Options for specifying how URLs found on pages should be handled. - - - UNKNOWN: Default unknown value - - SCOPED: Only include the base URL. - - PATH: Crawl the base URL and linked pages within the URL path. - - DOMAIN: Crawl the base URL and linked pages within the same domain. - - SUBDOMAINS: Crawl the base URL and linked pages for any subdomain. - """ - - embed_media: Optional[bool] = None - """Whether to ingest and index media (images, etc.) on web pages.""" diff --git a/src/digitalocean_genai_sdk/types/knowledge_bases/api_web_crawler_data_source_param.py b/src/digitalocean_genai_sdk/types/knowledge_bases/api_web_crawler_data_source_param.py deleted file mode 100644 index 2345ed3a..00000000 --- a/src/digitalocean_genai_sdk/types/knowledge_bases/api_web_crawler_data_source_param.py +++ /dev/null @@ -1,25 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, TypedDict - -__all__ = ["APIWebCrawlerDataSourceParam"] - - -class APIWebCrawlerDataSourceParam(TypedDict, total=False): - base_url: str - """The base url to crawl.""" - - crawling_option: Literal["UNKNOWN", "SCOPED", "PATH", "DOMAIN", "SUBDOMAINS"] - """Options for specifying how URLs found on pages should be handled. - - - UNKNOWN: Default unknown value - - SCOPED: Only include the base URL. - - PATH: Crawl the base URL and linked pages within the URL path. - - DOMAIN: Crawl the base URL and linked pages within the same domain. - - SUBDOMAINS: Crawl the base URL and linked pages for any subdomain. - """ - - embed_media: bool - """Whether to ingest and index media (images, etc.) on web pages.""" diff --git a/src/digitalocean_genai_sdk/types/knowledge_bases/data_source_create_params.py b/src/digitalocean_genai_sdk/types/knowledge_bases/data_source_create_params.py deleted file mode 100644 index b1abafdf..00000000 --- a/src/digitalocean_genai_sdk/types/knowledge_bases/data_source_create_params.py +++ /dev/null @@ -1,33 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Annotated, TypedDict - -from ..._utils import PropertyInfo -from .api_spaces_data_source_param import APISpacesDataSourceParam -from .api_web_crawler_data_source_param import APIWebCrawlerDataSourceParam - -__all__ = ["DataSourceCreateParams", "AwsDataSource"] - - -class DataSourceCreateParams(TypedDict, total=False): - aws_data_source: AwsDataSource - - body_knowledge_base_uuid: Annotated[str, PropertyInfo(alias="knowledge_base_uuid")] - - spaces_data_source: APISpacesDataSourceParam - - web_crawler_data_source: APIWebCrawlerDataSourceParam - - -class AwsDataSource(TypedDict, total=False): - bucket_name: str - - item_path: str - - key_id: str - - region: str - - secret_key: str diff --git a/src/digitalocean_genai_sdk/types/knowledge_bases/data_source_create_response.py b/src/digitalocean_genai_sdk/types/knowledge_bases/data_source_create_response.py deleted file mode 100644 index 1035d3f4..00000000 --- a/src/digitalocean_genai_sdk/types/knowledge_bases/data_source_create_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel -from .api_knowledge_base_data_source import APIKnowledgeBaseDataSource - -__all__ = ["DataSourceCreateResponse"] - - -class DataSourceCreateResponse(BaseModel): - knowledge_base_data_source: Optional[APIKnowledgeBaseDataSource] = None diff --git a/src/digitalocean_genai_sdk/types/knowledge_bases/data_source_delete_response.py b/src/digitalocean_genai_sdk/types/knowledge_bases/data_source_delete_response.py deleted file mode 100644 index 53954d7f..00000000 --- a/src/digitalocean_genai_sdk/types/knowledge_bases/data_source_delete_response.py +++ /dev/null @@ -1,13 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel - -__all__ = ["DataSourceDeleteResponse"] - - -class DataSourceDeleteResponse(BaseModel): - data_source_uuid: Optional[str] = None - - knowledge_base_uuid: Optional[str] = None diff --git a/src/digitalocean_genai_sdk/types/knowledge_bases/data_source_list_params.py b/src/digitalocean_genai_sdk/types/knowledge_bases/data_source_list_params.py deleted file mode 100644 index e3ed5e3c..00000000 --- a/src/digitalocean_genai_sdk/types/knowledge_bases/data_source_list_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["DataSourceListParams"] - - -class DataSourceListParams(TypedDict, total=False): - page: int - """page number.""" - - per_page: int - """items per page.""" diff --git a/src/digitalocean_genai_sdk/types/knowledge_bases/data_source_list_response.py b/src/digitalocean_genai_sdk/types/knowledge_bases/data_source_list_response.py deleted file mode 100644 index 78246ce1..00000000 --- a/src/digitalocean_genai_sdk/types/knowledge_bases/data_source_list_response.py +++ /dev/null @@ -1,18 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from ..._models import BaseModel -from ..agents.api_meta import APIMeta -from ..agents.api_links import APILinks -from .api_knowledge_base_data_source import APIKnowledgeBaseDataSource - -__all__ = ["DataSourceListResponse"] - - -class DataSourceListResponse(BaseModel): - knowledge_base_data_sources: Optional[List[APIKnowledgeBaseDataSource]] = None - - links: Optional[APILinks] = None - - meta: Optional[APIMeta] = None diff --git a/src/digitalocean_genai_sdk/types/providers/anthropic/__init__.py b/src/digitalocean_genai_sdk/types/providers/anthropic/__init__.py deleted file mode 100644 index eb47e709..00000000 --- a/src/digitalocean_genai_sdk/types/providers/anthropic/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from .key_list_params import KeyListParams as KeyListParams -from .key_create_params import KeyCreateParams as KeyCreateParams -from .key_list_response import KeyListResponse as KeyListResponse -from .key_update_params import KeyUpdateParams as KeyUpdateParams -from .key_create_response import KeyCreateResponse as KeyCreateResponse -from .key_delete_response import KeyDeleteResponse as KeyDeleteResponse -from .key_update_response import KeyUpdateResponse as KeyUpdateResponse -from .key_retrieve_response import KeyRetrieveResponse as KeyRetrieveResponse -from .key_list_agents_params import KeyListAgentsParams as KeyListAgentsParams -from .key_list_agents_response import KeyListAgentsResponse as KeyListAgentsResponse diff --git a/src/digitalocean_genai_sdk/types/providers/anthropic/key_create_params.py b/src/digitalocean_genai_sdk/types/providers/anthropic/key_create_params.py deleted file mode 100644 index 389f167c..00000000 --- a/src/digitalocean_genai_sdk/types/providers/anthropic/key_create_params.py +++ /dev/null @@ -1,13 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["KeyCreateParams"] - - -class KeyCreateParams(TypedDict, total=False): - api_key: str - - name: str diff --git a/src/digitalocean_genai_sdk/types/providers/anthropic/key_create_response.py b/src/digitalocean_genai_sdk/types/providers/anthropic/key_create_response.py deleted file mode 100644 index a032810c..00000000 --- a/src/digitalocean_genai_sdk/types/providers/anthropic/key_create_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ...._models import BaseModel -from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo - -__all__ = ["KeyCreateResponse"] - - -class KeyCreateResponse(BaseModel): - api_key_info: Optional[APIAnthropicAPIKeyInfo] = None diff --git a/src/digitalocean_genai_sdk/types/providers/anthropic/key_delete_response.py b/src/digitalocean_genai_sdk/types/providers/anthropic/key_delete_response.py deleted file mode 100644 index 2afe2dda..00000000 --- a/src/digitalocean_genai_sdk/types/providers/anthropic/key_delete_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ...._models import BaseModel -from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo - -__all__ = ["KeyDeleteResponse"] - - -class KeyDeleteResponse(BaseModel): - api_key_info: Optional[APIAnthropicAPIKeyInfo] = None diff --git a/src/digitalocean_genai_sdk/types/providers/anthropic/key_list_agents_params.py b/src/digitalocean_genai_sdk/types/providers/anthropic/key_list_agents_params.py deleted file mode 100644 index ebbc3b7e..00000000 --- a/src/digitalocean_genai_sdk/types/providers/anthropic/key_list_agents_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["KeyListAgentsParams"] - - -class KeyListAgentsParams(TypedDict, total=False): - page: int - """page number.""" - - per_page: int - """items per page.""" diff --git a/src/digitalocean_genai_sdk/types/providers/anthropic/key_list_agents_response.py b/src/digitalocean_genai_sdk/types/providers/anthropic/key_list_agents_response.py deleted file mode 100644 index ba6ca946..00000000 --- a/src/digitalocean_genai_sdk/types/providers/anthropic/key_list_agents_response.py +++ /dev/null @@ -1,22 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List, Optional - -from ...._models import BaseModel -from ...agents.api_meta import APIMeta -from ...agents.api_links import APILinks - -__all__ = ["KeyListAgentsResponse"] - - -class KeyListAgentsResponse(BaseModel): - agents: Optional[List["APIAgent"]] = None - - links: Optional[APILinks] = None - - meta: Optional[APIMeta] = None - - -from ...api_agent import APIAgent diff --git a/src/digitalocean_genai_sdk/types/providers/anthropic/key_list_params.py b/src/digitalocean_genai_sdk/types/providers/anthropic/key_list_params.py deleted file mode 100644 index a11458ad..00000000 --- a/src/digitalocean_genai_sdk/types/providers/anthropic/key_list_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["KeyListParams"] - - -class KeyListParams(TypedDict, total=False): - page: int - """page number.""" - - per_page: int - """items per page.""" diff --git a/src/digitalocean_genai_sdk/types/providers/anthropic/key_list_response.py b/src/digitalocean_genai_sdk/types/providers/anthropic/key_list_response.py deleted file mode 100644 index d0b84e96..00000000 --- a/src/digitalocean_genai_sdk/types/providers/anthropic/key_list_response.py +++ /dev/null @@ -1,18 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from ...._models import BaseModel -from ...agents.api_meta import APIMeta -from ...agents.api_links import APILinks -from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo - -__all__ = ["KeyListResponse"] - - -class KeyListResponse(BaseModel): - api_key_infos: Optional[List[APIAnthropicAPIKeyInfo]] = None - - links: Optional[APILinks] = None - - meta: Optional[APIMeta] = None diff --git a/src/digitalocean_genai_sdk/types/providers/anthropic/key_retrieve_response.py b/src/digitalocean_genai_sdk/types/providers/anthropic/key_retrieve_response.py deleted file mode 100644 index b8361fc2..00000000 --- a/src/digitalocean_genai_sdk/types/providers/anthropic/key_retrieve_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ...._models import BaseModel -from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo - -__all__ = ["KeyRetrieveResponse"] - - -class KeyRetrieveResponse(BaseModel): - api_key_info: Optional[APIAnthropicAPIKeyInfo] = None diff --git a/src/digitalocean_genai_sdk/types/providers/anthropic/key_update_params.py b/src/digitalocean_genai_sdk/types/providers/anthropic/key_update_params.py deleted file mode 100644 index c07d7f66..00000000 --- a/src/digitalocean_genai_sdk/types/providers/anthropic/key_update_params.py +++ /dev/null @@ -1,17 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Annotated, TypedDict - -from ...._utils import PropertyInfo - -__all__ = ["KeyUpdateParams"] - - -class KeyUpdateParams(TypedDict, total=False): - api_key: str - - body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")] - - name: str diff --git a/src/digitalocean_genai_sdk/types/providers/anthropic/key_update_response.py b/src/digitalocean_genai_sdk/types/providers/anthropic/key_update_response.py deleted file mode 100644 index b04277a6..00000000 --- a/src/digitalocean_genai_sdk/types/providers/anthropic/key_update_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ...._models import BaseModel -from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo - -__all__ = ["KeyUpdateResponse"] - - -class KeyUpdateResponse(BaseModel): - api_key_info: Optional[APIAnthropicAPIKeyInfo] = None diff --git a/src/digitalocean_genai_sdk/types/providers/openai/__init__.py b/src/digitalocean_genai_sdk/types/providers/openai/__init__.py deleted file mode 100644 index 70abf332..00000000 --- a/src/digitalocean_genai_sdk/types/providers/openai/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from .key_list_params import KeyListParams as KeyListParams -from .key_create_params import KeyCreateParams as KeyCreateParams -from .key_list_response import KeyListResponse as KeyListResponse -from .key_update_params import KeyUpdateParams as KeyUpdateParams -from .key_create_response import KeyCreateResponse as KeyCreateResponse -from .key_delete_response import KeyDeleteResponse as KeyDeleteResponse -from .key_update_response import KeyUpdateResponse as KeyUpdateResponse -from .key_retrieve_response import KeyRetrieveResponse as KeyRetrieveResponse -from .key_retrieve_agents_params import KeyRetrieveAgentsParams as KeyRetrieveAgentsParams -from .key_retrieve_agents_response import KeyRetrieveAgentsResponse as KeyRetrieveAgentsResponse diff --git a/src/digitalocean_genai_sdk/types/providers/openai/key_create_params.py b/src/digitalocean_genai_sdk/types/providers/openai/key_create_params.py deleted file mode 100644 index 389f167c..00000000 --- a/src/digitalocean_genai_sdk/types/providers/openai/key_create_params.py +++ /dev/null @@ -1,13 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["KeyCreateParams"] - - -class KeyCreateParams(TypedDict, total=False): - api_key: str - - name: str diff --git a/src/digitalocean_genai_sdk/types/providers/openai/key_create_response.py b/src/digitalocean_genai_sdk/types/providers/openai/key_create_response.py deleted file mode 100644 index f3b4d36c..00000000 --- a/src/digitalocean_genai_sdk/types/providers/openai/key_create_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ...._models import BaseModel -from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo - -__all__ = ["KeyCreateResponse"] - - -class KeyCreateResponse(BaseModel): - api_key_info: Optional[APIOpenAIAPIKeyInfo] = None diff --git a/src/digitalocean_genai_sdk/types/providers/openai/key_delete_response.py b/src/digitalocean_genai_sdk/types/providers/openai/key_delete_response.py deleted file mode 100644 index 0c8922bb..00000000 --- a/src/digitalocean_genai_sdk/types/providers/openai/key_delete_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ...._models import BaseModel -from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo - -__all__ = ["KeyDeleteResponse"] - - -class KeyDeleteResponse(BaseModel): - api_key_info: Optional[APIOpenAIAPIKeyInfo] = None diff --git a/src/digitalocean_genai_sdk/types/providers/openai/key_list_params.py b/src/digitalocean_genai_sdk/types/providers/openai/key_list_params.py deleted file mode 100644 index a11458ad..00000000 --- a/src/digitalocean_genai_sdk/types/providers/openai/key_list_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["KeyListParams"] - - -class KeyListParams(TypedDict, total=False): - page: int - """page number.""" - - per_page: int - """items per page.""" diff --git a/src/digitalocean_genai_sdk/types/providers/openai/key_list_response.py b/src/digitalocean_genai_sdk/types/providers/openai/key_list_response.py deleted file mode 100644 index c263cba3..00000000 --- a/src/digitalocean_genai_sdk/types/providers/openai/key_list_response.py +++ /dev/null @@ -1,18 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from ...._models import BaseModel -from ...agents.api_meta import APIMeta -from ...agents.api_links import APILinks -from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo - -__all__ = ["KeyListResponse"] - - -class KeyListResponse(BaseModel): - api_key_infos: Optional[List[APIOpenAIAPIKeyInfo]] = None - - links: Optional[APILinks] = None - - meta: Optional[APIMeta] = None diff --git a/src/digitalocean_genai_sdk/types/providers/openai/key_retrieve_agents_params.py b/src/digitalocean_genai_sdk/types/providers/openai/key_retrieve_agents_params.py deleted file mode 100644 index ec745d14..00000000 --- a/src/digitalocean_genai_sdk/types/providers/openai/key_retrieve_agents_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["KeyRetrieveAgentsParams"] - - -class KeyRetrieveAgentsParams(TypedDict, total=False): - page: int - """page number.""" - - per_page: int - """items per page.""" diff --git a/src/digitalocean_genai_sdk/types/providers/openai/key_retrieve_agents_response.py b/src/digitalocean_genai_sdk/types/providers/openai/key_retrieve_agents_response.py deleted file mode 100644 index f42edea6..00000000 --- a/src/digitalocean_genai_sdk/types/providers/openai/key_retrieve_agents_response.py +++ /dev/null @@ -1,22 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List, Optional - -from ...._models import BaseModel -from ...agents.api_meta import APIMeta -from ...agents.api_links import APILinks - -__all__ = ["KeyRetrieveAgentsResponse"] - - -class KeyRetrieveAgentsResponse(BaseModel): - agents: Optional[List["APIAgent"]] = None - - links: Optional[APILinks] = None - - meta: Optional[APIMeta] = None - - -from ...api_agent import APIAgent diff --git a/src/digitalocean_genai_sdk/types/providers/openai/key_retrieve_response.py b/src/digitalocean_genai_sdk/types/providers/openai/key_retrieve_response.py deleted file mode 100644 index 7015b6f7..00000000 --- a/src/digitalocean_genai_sdk/types/providers/openai/key_retrieve_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ...._models import BaseModel -from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo - -__all__ = ["KeyRetrieveResponse"] - - -class KeyRetrieveResponse(BaseModel): - api_key_info: Optional[APIOpenAIAPIKeyInfo] = None diff --git a/src/digitalocean_genai_sdk/types/providers/openai/key_update_params.py b/src/digitalocean_genai_sdk/types/providers/openai/key_update_params.py deleted file mode 100644 index c07d7f66..00000000 --- a/src/digitalocean_genai_sdk/types/providers/openai/key_update_params.py +++ /dev/null @@ -1,17 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Annotated, TypedDict - -from ...._utils import PropertyInfo - -__all__ = ["KeyUpdateParams"] - - -class KeyUpdateParams(TypedDict, total=False): - api_key: str - - body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")] - - name: str diff --git a/src/digitalocean_genai_sdk/types/providers/openai/key_update_response.py b/src/digitalocean_genai_sdk/types/providers/openai/key_update_response.py deleted file mode 100644 index 4889f994..00000000 --- a/src/digitalocean_genai_sdk/types/providers/openai/key_update_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ...._models import BaseModel -from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo - -__all__ = ["KeyUpdateResponse"] - - -class KeyUpdateResponse(BaseModel): - api_key_info: Optional[APIOpenAIAPIKeyInfo] = None diff --git a/src/digitalocean_genai_sdk/types/region_list_params.py b/src/digitalocean_genai_sdk/types/region_list_params.py deleted file mode 100644 index 1db0ad50..00000000 --- a/src/digitalocean_genai_sdk/types/region_list_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["RegionListParams"] - - -class RegionListParams(TypedDict, total=False): - serves_batch: bool - """include datacenters that are capable of running batch jobs.""" - - serves_inference: bool - """include datacenters that serve inference.""" diff --git a/src/digitalocean_genai_sdk/types/region_list_response.py b/src/digitalocean_genai_sdk/types/region_list_response.py deleted file mode 100644 index 0f955b36..00000000 --- a/src/digitalocean_genai_sdk/types/region_list_response.py +++ /dev/null @@ -1,23 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from .._models import BaseModel - -__all__ = ["RegionListResponse", "Region"] - - -class Region(BaseModel): - inference_url: Optional[str] = None - - region: Optional[str] = None - - serves_batch: Optional[bool] = None - - serves_inference: Optional[bool] = None - - stream_inference_url: Optional[str] = None - - -class RegionListResponse(BaseModel): - regions: Optional[List[Region]] = None diff --git a/src/digitalocean_genai_sdk/__init__.py b/src/gradientai/__init__.py similarity index 87% rename from src/digitalocean_genai_sdk/__init__.py rename to src/gradientai/__init__.py index fc240d83..e0f0a60b 100644 --- a/src/digitalocean_genai_sdk/__init__.py +++ b/src/gradientai/__init__.py @@ -10,11 +10,11 @@ Stream, Timeout, Transport, + GradientAI, AsyncClient, AsyncStream, RequestOptions, - DigitaloceanGenaiSDK, - AsyncDigitaloceanGenaiSDK, + AsyncGradientAI, ) from ._models import BaseModel from ._version import __title__, __version__ @@ -28,12 +28,12 @@ RateLimitError, APITimeoutError, BadRequestError, + GradientAIError, APIConnectionError, AuthenticationError, InternalServerError, PermissionDeniedError, UnprocessableEntityError, - DigitaloceanGenaiSDKError, APIResponseValidationError, ) from ._base_client import DefaultHttpxClient, DefaultAsyncHttpxClient @@ -49,7 +49,7 @@ "NotGiven", "NOT_GIVEN", "Omit", - "DigitaloceanGenaiSDKError", + "GradientAIError", "APIError", "APIStatusError", "APITimeoutError", @@ -69,8 +69,8 @@ "AsyncClient", "Stream", "AsyncStream", - "DigitaloceanGenaiSDK", - "AsyncDigitaloceanGenaiSDK", + "GradientAI", + "AsyncGradientAI", "file_from_path", "BaseModel", "DEFAULT_TIMEOUT", @@ -88,12 +88,12 @@ # Update the __module__ attribute for exported symbols so that # error messages point to this module instead of the module # it was originally defined in, e.g. -# digitalocean_genai_sdk._exceptions.NotFoundError -> digitalocean_genai_sdk.NotFoundError +# gradientai._exceptions.NotFoundError -> gradientai.NotFoundError __locals = locals() for __name in __all__: if not __name.startswith("__"): try: - __locals[__name].__module__ = "digitalocean_genai_sdk" + __locals[__name].__module__ = "gradientai" except (TypeError, AttributeError): # Some of our exported symbols are builtins which we can't set attributes for. pass diff --git a/src/digitalocean_genai_sdk/_base_client.py b/src/gradientai/_base_client.py similarity index 99% rename from src/digitalocean_genai_sdk/_base_client.py rename to src/gradientai/_base_client.py index 6fd247cc..aa3b35f1 100644 --- a/src/digitalocean_genai_sdk/_base_client.py +++ b/src/gradientai/_base_client.py @@ -389,7 +389,7 @@ def __init__( if max_retries is None: # pyright: ignore[reportUnnecessaryComparison] raise TypeError( - "max_retries cannot be None. If you want to disable retries, pass `0`; if you want unlimited retries, pass `math.inf` or a very high number; if you want the default behavior, pass `digitalocean_genai_sdk.DEFAULT_MAX_RETRIES`" + "max_retries cannot be None. If you want to disable retries, pass `0`; if you want unlimited retries, pass `math.inf` or a very high number; if you want the default behavior, pass `gradientai.DEFAULT_MAX_RETRIES`" ) def _enforce_trailing_slash(self, url: URL) -> URL: diff --git a/src/digitalocean_genai_sdk/_client.py b/src/gradientai/_client.py similarity index 63% rename from src/digitalocean_genai_sdk/_client.py rename to src/gradientai/_client.py index 2f86bb7d..b22056ad 100644 --- a/src/digitalocean_genai_sdk/_client.py +++ b/src/gradientai/_client.py @@ -23,7 +23,7 @@ from ._compat import cached_property from ._version import __version__ from ._streaming import Stream as Stream, AsyncStream as AsyncStream -from ._exceptions import APIStatusError, DigitaloceanGenaiSDKError +from ._exceptions import APIStatusError, GradientAIError from ._base_client import ( DEFAULT_MAX_RETRIES, SyncAPIClient, @@ -31,42 +31,25 @@ ) if TYPE_CHECKING: - from .resources import ( - auth, - chat, - agents, - models, - regions, - api_keys, - providers, - embeddings, - indexing_jobs, - knowledge_bases, - ) + from .resources import chat, agents, models, embeddings from .resources.chat import ChatResource, AsyncChatResource from .resources.models import ModelsResource, AsyncModelsResource - from .resources.regions import RegionsResource, AsyncRegionsResource - from .resources.auth.auth import AuthResource, AsyncAuthResource from .resources.embeddings import EmbeddingsResource, AsyncEmbeddingsResource from .resources.agents.agents import AgentsResource, AsyncAgentsResource - from .resources.indexing_jobs import IndexingJobsResource, AsyncIndexingJobsResource - from .resources.api_keys.api_keys import APIKeysResource, AsyncAPIKeysResource - from .resources.providers.providers import ProvidersResource, AsyncProvidersResource - from .resources.knowledge_bases.knowledge_bases import KnowledgeBasesResource, AsyncKnowledgeBasesResource __all__ = [ "Timeout", "Transport", "ProxiesTypes", "RequestOptions", - "DigitaloceanGenaiSDK", - "AsyncDigitaloceanGenaiSDK", + "GradientAI", + "AsyncGradientAI", "Client", "AsyncClient", ] -class DigitaloceanGenaiSDK(SyncAPIClient): +class GradientAI(SyncAPIClient): # client options api_key: str @@ -93,20 +76,20 @@ def __init__( # part of our public interface in the future. _strict_response_validation: bool = False, ) -> None: - """Construct a new synchronous DigitaloceanGenaiSDK client instance. + """Construct a new synchronous GradientAI client instance. This automatically infers the `api_key` argument from the `DIGITALOCEAN_GENAI_SDK_API_KEY` environment variable if it is not provided. """ if api_key is None: api_key = os.environ.get("DIGITALOCEAN_GENAI_SDK_API_KEY") if api_key is None: - raise DigitaloceanGenaiSDKError( + raise GradientAIError( "The api_key client option must be set either by passing api_key to the client or by setting the DIGITALOCEAN_GENAI_SDK_API_KEY environment variable" ) self.api_key = api_key if base_url is None: - base_url = os.environ.get("DIGITALOCEAN_GENAI_SDK_BASE_URL") + base_url = os.environ.get("GRADIENT_AI_BASE_URL") if base_url is None: base_url = f"https://api.digitalocean.com/" @@ -127,42 +110,6 @@ def agents(self) -> AgentsResource: return AgentsResource(self) - @cached_property - def providers(self) -> ProvidersResource: - from .resources.providers import ProvidersResource - - return ProvidersResource(self) - - @cached_property - def auth(self) -> AuthResource: - from .resources.auth import AuthResource - - return AuthResource(self) - - @cached_property - def regions(self) -> RegionsResource: - from .resources.regions import RegionsResource - - return RegionsResource(self) - - @cached_property - def indexing_jobs(self) -> IndexingJobsResource: - from .resources.indexing_jobs import IndexingJobsResource - - return IndexingJobsResource(self) - - @cached_property - def knowledge_bases(self) -> KnowledgeBasesResource: - from .resources.knowledge_bases import KnowledgeBasesResource - - return KnowledgeBasesResource(self) - - @cached_property - def api_keys(self) -> APIKeysResource: - from .resources.api_keys import APIKeysResource - - return APIKeysResource(self) - @cached_property def chat(self) -> ChatResource: from .resources.chat import ChatResource @@ -182,12 +129,12 @@ def models(self) -> ModelsResource: return ModelsResource(self) @cached_property - def with_raw_response(self) -> DigitaloceanGenaiSDKWithRawResponse: - return DigitaloceanGenaiSDKWithRawResponse(self) + def with_raw_response(self) -> GradientAIWithRawResponse: + return GradientAIWithRawResponse(self) @cached_property - def with_streaming_response(self) -> DigitaloceanGenaiSDKWithStreamedResponse: - return DigitaloceanGenaiSDKWithStreamedResponse(self) + def with_streaming_response(self) -> GradientAIWithStreamedResponse: + return GradientAIWithStreamedResponse(self) @property @override @@ -294,7 +241,7 @@ def _make_status_error( return APIStatusError(err_msg, response=response, body=body) -class AsyncDigitaloceanGenaiSDK(AsyncAPIClient): +class AsyncGradientAI(AsyncAPIClient): # client options api_key: str @@ -321,20 +268,20 @@ def __init__( # part of our public interface in the future. _strict_response_validation: bool = False, ) -> None: - """Construct a new async AsyncDigitaloceanGenaiSDK client instance. + """Construct a new async AsyncGradientAI client instance. This automatically infers the `api_key` argument from the `DIGITALOCEAN_GENAI_SDK_API_KEY` environment variable if it is not provided. """ if api_key is None: api_key = os.environ.get("DIGITALOCEAN_GENAI_SDK_API_KEY") if api_key is None: - raise DigitaloceanGenaiSDKError( + raise GradientAIError( "The api_key client option must be set either by passing api_key to the client or by setting the DIGITALOCEAN_GENAI_SDK_API_KEY environment variable" ) self.api_key = api_key if base_url is None: - base_url = os.environ.get("DIGITALOCEAN_GENAI_SDK_BASE_URL") + base_url = os.environ.get("GRADIENT_AI_BASE_URL") if base_url is None: base_url = f"https://api.digitalocean.com/" @@ -355,42 +302,6 @@ def agents(self) -> AsyncAgentsResource: return AsyncAgentsResource(self) - @cached_property - def providers(self) -> AsyncProvidersResource: - from .resources.providers import AsyncProvidersResource - - return AsyncProvidersResource(self) - - @cached_property - def auth(self) -> AsyncAuthResource: - from .resources.auth import AsyncAuthResource - - return AsyncAuthResource(self) - - @cached_property - def regions(self) -> AsyncRegionsResource: - from .resources.regions import AsyncRegionsResource - - return AsyncRegionsResource(self) - - @cached_property - def indexing_jobs(self) -> AsyncIndexingJobsResource: - from .resources.indexing_jobs import AsyncIndexingJobsResource - - return AsyncIndexingJobsResource(self) - - @cached_property - def knowledge_bases(self) -> AsyncKnowledgeBasesResource: - from .resources.knowledge_bases import AsyncKnowledgeBasesResource - - return AsyncKnowledgeBasesResource(self) - - @cached_property - def api_keys(self) -> AsyncAPIKeysResource: - from .resources.api_keys import AsyncAPIKeysResource - - return AsyncAPIKeysResource(self) - @cached_property def chat(self) -> AsyncChatResource: from .resources.chat import AsyncChatResource @@ -410,12 +321,12 @@ def models(self) -> AsyncModelsResource: return AsyncModelsResource(self) @cached_property - def with_raw_response(self) -> AsyncDigitaloceanGenaiSDKWithRawResponse: - return AsyncDigitaloceanGenaiSDKWithRawResponse(self) + def with_raw_response(self) -> AsyncGradientAIWithRawResponse: + return AsyncGradientAIWithRawResponse(self) @cached_property - def with_streaming_response(self) -> AsyncDigitaloceanGenaiSDKWithStreamedResponse: - return AsyncDigitaloceanGenaiSDKWithStreamedResponse(self) + def with_streaming_response(self) -> AsyncGradientAIWithStreamedResponse: + return AsyncGradientAIWithStreamedResponse(self) @property @override @@ -522,10 +433,10 @@ def _make_status_error( return APIStatusError(err_msg, response=response, body=body) -class DigitaloceanGenaiSDKWithRawResponse: - _client: DigitaloceanGenaiSDK +class GradientAIWithRawResponse: + _client: GradientAI - def __init__(self, client: DigitaloceanGenaiSDK) -> None: + def __init__(self, client: GradientAI) -> None: self._client = client @cached_property @@ -534,42 +445,6 @@ def agents(self) -> agents.AgentsResourceWithRawResponse: return AgentsResourceWithRawResponse(self._client.agents) - @cached_property - def providers(self) -> providers.ProvidersResourceWithRawResponse: - from .resources.providers import ProvidersResourceWithRawResponse - - return ProvidersResourceWithRawResponse(self._client.providers) - - @cached_property - def auth(self) -> auth.AuthResourceWithRawResponse: - from .resources.auth import AuthResourceWithRawResponse - - return AuthResourceWithRawResponse(self._client.auth) - - @cached_property - def regions(self) -> regions.RegionsResourceWithRawResponse: - from .resources.regions import RegionsResourceWithRawResponse - - return RegionsResourceWithRawResponse(self._client.regions) - - @cached_property - def indexing_jobs(self) -> indexing_jobs.IndexingJobsResourceWithRawResponse: - from .resources.indexing_jobs import IndexingJobsResourceWithRawResponse - - return IndexingJobsResourceWithRawResponse(self._client.indexing_jobs) - - @cached_property - def knowledge_bases(self) -> knowledge_bases.KnowledgeBasesResourceWithRawResponse: - from .resources.knowledge_bases import KnowledgeBasesResourceWithRawResponse - - return KnowledgeBasesResourceWithRawResponse(self._client.knowledge_bases) - - @cached_property - def api_keys(self) -> api_keys.APIKeysResourceWithRawResponse: - from .resources.api_keys import APIKeysResourceWithRawResponse - - return APIKeysResourceWithRawResponse(self._client.api_keys) - @cached_property def chat(self) -> chat.ChatResourceWithRawResponse: from .resources.chat import ChatResourceWithRawResponse @@ -589,10 +464,10 @@ def models(self) -> models.ModelsResourceWithRawResponse: return ModelsResourceWithRawResponse(self._client.models) -class AsyncDigitaloceanGenaiSDKWithRawResponse: - _client: AsyncDigitaloceanGenaiSDK +class AsyncGradientAIWithRawResponse: + _client: AsyncGradientAI - def __init__(self, client: AsyncDigitaloceanGenaiSDK) -> None: + def __init__(self, client: AsyncGradientAI) -> None: self._client = client @cached_property @@ -601,42 +476,6 @@ def agents(self) -> agents.AsyncAgentsResourceWithRawResponse: return AsyncAgentsResourceWithRawResponse(self._client.agents) - @cached_property - def providers(self) -> providers.AsyncProvidersResourceWithRawResponse: - from .resources.providers import AsyncProvidersResourceWithRawResponse - - return AsyncProvidersResourceWithRawResponse(self._client.providers) - - @cached_property - def auth(self) -> auth.AsyncAuthResourceWithRawResponse: - from .resources.auth import AsyncAuthResourceWithRawResponse - - return AsyncAuthResourceWithRawResponse(self._client.auth) - - @cached_property - def regions(self) -> regions.AsyncRegionsResourceWithRawResponse: - from .resources.regions import AsyncRegionsResourceWithRawResponse - - return AsyncRegionsResourceWithRawResponse(self._client.regions) - - @cached_property - def indexing_jobs(self) -> indexing_jobs.AsyncIndexingJobsResourceWithRawResponse: - from .resources.indexing_jobs import AsyncIndexingJobsResourceWithRawResponse - - return AsyncIndexingJobsResourceWithRawResponse(self._client.indexing_jobs) - - @cached_property - def knowledge_bases(self) -> knowledge_bases.AsyncKnowledgeBasesResourceWithRawResponse: - from .resources.knowledge_bases import AsyncKnowledgeBasesResourceWithRawResponse - - return AsyncKnowledgeBasesResourceWithRawResponse(self._client.knowledge_bases) - - @cached_property - def api_keys(self) -> api_keys.AsyncAPIKeysResourceWithRawResponse: - from .resources.api_keys import AsyncAPIKeysResourceWithRawResponse - - return AsyncAPIKeysResourceWithRawResponse(self._client.api_keys) - @cached_property def chat(self) -> chat.AsyncChatResourceWithRawResponse: from .resources.chat import AsyncChatResourceWithRawResponse @@ -656,10 +495,10 @@ def models(self) -> models.AsyncModelsResourceWithRawResponse: return AsyncModelsResourceWithRawResponse(self._client.models) -class DigitaloceanGenaiSDKWithStreamedResponse: - _client: DigitaloceanGenaiSDK +class GradientAIWithStreamedResponse: + _client: GradientAI - def __init__(self, client: DigitaloceanGenaiSDK) -> None: + def __init__(self, client: GradientAI) -> None: self._client = client @cached_property @@ -668,42 +507,6 @@ def agents(self) -> agents.AgentsResourceWithStreamingResponse: return AgentsResourceWithStreamingResponse(self._client.agents) - @cached_property - def providers(self) -> providers.ProvidersResourceWithStreamingResponse: - from .resources.providers import ProvidersResourceWithStreamingResponse - - return ProvidersResourceWithStreamingResponse(self._client.providers) - - @cached_property - def auth(self) -> auth.AuthResourceWithStreamingResponse: - from .resources.auth import AuthResourceWithStreamingResponse - - return AuthResourceWithStreamingResponse(self._client.auth) - - @cached_property - def regions(self) -> regions.RegionsResourceWithStreamingResponse: - from .resources.regions import RegionsResourceWithStreamingResponse - - return RegionsResourceWithStreamingResponse(self._client.regions) - - @cached_property - def indexing_jobs(self) -> indexing_jobs.IndexingJobsResourceWithStreamingResponse: - from .resources.indexing_jobs import IndexingJobsResourceWithStreamingResponse - - return IndexingJobsResourceWithStreamingResponse(self._client.indexing_jobs) - - @cached_property - def knowledge_bases(self) -> knowledge_bases.KnowledgeBasesResourceWithStreamingResponse: - from .resources.knowledge_bases import KnowledgeBasesResourceWithStreamingResponse - - return KnowledgeBasesResourceWithStreamingResponse(self._client.knowledge_bases) - - @cached_property - def api_keys(self) -> api_keys.APIKeysResourceWithStreamingResponse: - from .resources.api_keys import APIKeysResourceWithStreamingResponse - - return APIKeysResourceWithStreamingResponse(self._client.api_keys) - @cached_property def chat(self) -> chat.ChatResourceWithStreamingResponse: from .resources.chat import ChatResourceWithStreamingResponse @@ -723,10 +526,10 @@ def models(self) -> models.ModelsResourceWithStreamingResponse: return ModelsResourceWithStreamingResponse(self._client.models) -class AsyncDigitaloceanGenaiSDKWithStreamedResponse: - _client: AsyncDigitaloceanGenaiSDK +class AsyncGradientAIWithStreamedResponse: + _client: AsyncGradientAI - def __init__(self, client: AsyncDigitaloceanGenaiSDK) -> None: + def __init__(self, client: AsyncGradientAI) -> None: self._client = client @cached_property @@ -735,42 +538,6 @@ def agents(self) -> agents.AsyncAgentsResourceWithStreamingResponse: return AsyncAgentsResourceWithStreamingResponse(self._client.agents) - @cached_property - def providers(self) -> providers.AsyncProvidersResourceWithStreamingResponse: - from .resources.providers import AsyncProvidersResourceWithStreamingResponse - - return AsyncProvidersResourceWithStreamingResponse(self._client.providers) - - @cached_property - def auth(self) -> auth.AsyncAuthResourceWithStreamingResponse: - from .resources.auth import AsyncAuthResourceWithStreamingResponse - - return AsyncAuthResourceWithStreamingResponse(self._client.auth) - - @cached_property - def regions(self) -> regions.AsyncRegionsResourceWithStreamingResponse: - from .resources.regions import AsyncRegionsResourceWithStreamingResponse - - return AsyncRegionsResourceWithStreamingResponse(self._client.regions) - - @cached_property - def indexing_jobs(self) -> indexing_jobs.AsyncIndexingJobsResourceWithStreamingResponse: - from .resources.indexing_jobs import AsyncIndexingJobsResourceWithStreamingResponse - - return AsyncIndexingJobsResourceWithStreamingResponse(self._client.indexing_jobs) - - @cached_property - def knowledge_bases(self) -> knowledge_bases.AsyncKnowledgeBasesResourceWithStreamingResponse: - from .resources.knowledge_bases import AsyncKnowledgeBasesResourceWithStreamingResponse - - return AsyncKnowledgeBasesResourceWithStreamingResponse(self._client.knowledge_bases) - - @cached_property - def api_keys(self) -> api_keys.AsyncAPIKeysResourceWithStreamingResponse: - from .resources.api_keys import AsyncAPIKeysResourceWithStreamingResponse - - return AsyncAPIKeysResourceWithStreamingResponse(self._client.api_keys) - @cached_property def chat(self) -> chat.AsyncChatResourceWithStreamingResponse: from .resources.chat import AsyncChatResourceWithStreamingResponse @@ -790,6 +557,6 @@ def models(self) -> models.AsyncModelsResourceWithStreamingResponse: return AsyncModelsResourceWithStreamingResponse(self._client.models) -Client = DigitaloceanGenaiSDK +Client = GradientAI -AsyncClient = AsyncDigitaloceanGenaiSDK +AsyncClient = AsyncGradientAI diff --git a/src/digitalocean_genai_sdk/_compat.py b/src/gradientai/_compat.py similarity index 100% rename from src/digitalocean_genai_sdk/_compat.py rename to src/gradientai/_compat.py diff --git a/src/digitalocean_genai_sdk/_constants.py b/src/gradientai/_constants.py similarity index 100% rename from src/digitalocean_genai_sdk/_constants.py rename to src/gradientai/_constants.py diff --git a/src/digitalocean_genai_sdk/_exceptions.py b/src/gradientai/_exceptions.py similarity index 97% rename from src/digitalocean_genai_sdk/_exceptions.py rename to src/gradientai/_exceptions.py index 755e166e..759c8d86 100644 --- a/src/digitalocean_genai_sdk/_exceptions.py +++ b/src/gradientai/_exceptions.py @@ -18,11 +18,11 @@ ] -class DigitaloceanGenaiSDKError(Exception): +class GradientAIError(Exception): pass -class APIError(DigitaloceanGenaiSDKError): +class APIError(GradientAIError): message: str request: httpx.Request diff --git a/src/digitalocean_genai_sdk/_files.py b/src/gradientai/_files.py similarity index 100% rename from src/digitalocean_genai_sdk/_files.py rename to src/gradientai/_files.py diff --git a/src/digitalocean_genai_sdk/_models.py b/src/gradientai/_models.py similarity index 100% rename from src/digitalocean_genai_sdk/_models.py rename to src/gradientai/_models.py diff --git a/src/digitalocean_genai_sdk/_qs.py b/src/gradientai/_qs.py similarity index 100% rename from src/digitalocean_genai_sdk/_qs.py rename to src/gradientai/_qs.py diff --git a/src/digitalocean_genai_sdk/_resource.py b/src/gradientai/_resource.py similarity index 76% rename from src/digitalocean_genai_sdk/_resource.py rename to src/gradientai/_resource.py index fe43ec28..9182ee0b 100644 --- a/src/digitalocean_genai_sdk/_resource.py +++ b/src/gradientai/_resource.py @@ -8,13 +8,13 @@ import anyio if TYPE_CHECKING: - from ._client import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK + from ._client import GradientAI, AsyncGradientAI class SyncAPIResource: - _client: DigitaloceanGenaiSDK + _client: GradientAI - def __init__(self, client: DigitaloceanGenaiSDK) -> None: + def __init__(self, client: GradientAI) -> None: self._client = client self._get = client.get self._post = client.post @@ -28,9 +28,9 @@ def _sleep(self, seconds: float) -> None: class AsyncAPIResource: - _client: AsyncDigitaloceanGenaiSDK + _client: AsyncGradientAI - def __init__(self, client: AsyncDigitaloceanGenaiSDK) -> None: + def __init__(self, client: AsyncGradientAI) -> None: self._client = client self._get = client.get self._post = client.post diff --git a/src/digitalocean_genai_sdk/_response.py b/src/gradientai/_response.py similarity index 98% rename from src/digitalocean_genai_sdk/_response.py rename to src/gradientai/_response.py index 7f1fff1d..2037e4ca 100644 --- a/src/digitalocean_genai_sdk/_response.py +++ b/src/gradientai/_response.py @@ -29,7 +29,7 @@ from ._models import BaseModel, is_basemodel from ._constants import RAW_RESPONSE_HEADER, OVERRIDE_CAST_TO_HEADER from ._streaming import Stream, AsyncStream, is_stream_class_type, extract_stream_chunk_type -from ._exceptions import DigitaloceanGenaiSDKError, APIResponseValidationError +from ._exceptions import GradientAIError, APIResponseValidationError if TYPE_CHECKING: from ._models import FinalRequestOptions @@ -218,7 +218,7 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T: and issubclass(origin, pydantic.BaseModel) ): raise TypeError( - "Pydantic models must subclass our base model type, e.g. `from digitalocean_genai_sdk import BaseModel`" + "Pydantic models must subclass our base model type, e.g. `from gradientai import BaseModel`" ) if ( @@ -285,7 +285,7 @@ def parse(self, *, to: type[_T] | None = None) -> R | _T: the `to` argument, e.g. ```py - from digitalocean_genai_sdk import BaseModel + from gradientai import BaseModel class MyModel(BaseModel): @@ -387,7 +387,7 @@ async def parse(self, *, to: type[_T] | None = None) -> R | _T: the `to` argument, e.g. ```py - from digitalocean_genai_sdk import BaseModel + from gradientai import BaseModel class MyModel(BaseModel): @@ -558,11 +558,11 @@ async def stream_to_file( class MissingStreamClassError(TypeError): def __init__(self) -> None: super().__init__( - "The `stream` argument was set to `True` but the `stream_cls` argument was not given. See `digitalocean_genai_sdk._streaming` for reference", + "The `stream` argument was set to `True` but the `stream_cls` argument was not given. See `gradientai._streaming` for reference", ) -class StreamAlreadyConsumed(DigitaloceanGenaiSDKError): +class StreamAlreadyConsumed(GradientAIError): """ Attempted to read or stream content, but the content has already been streamed. diff --git a/src/digitalocean_genai_sdk/_streaming.py b/src/gradientai/_streaming.py similarity index 98% rename from src/digitalocean_genai_sdk/_streaming.py rename to src/gradientai/_streaming.py index 96c3f3d3..bab5eb80 100644 --- a/src/digitalocean_genai_sdk/_streaming.py +++ b/src/gradientai/_streaming.py @@ -12,7 +12,7 @@ from ._utils import extract_type_var_from_base if TYPE_CHECKING: - from ._client import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK + from ._client import GradientAI, AsyncGradientAI _T = TypeVar("_T") @@ -30,7 +30,7 @@ def __init__( *, cast_to: type[_T], response: httpx.Response, - client: DigitaloceanGenaiSDK, + client: GradientAI, ) -> None: self.response = response self._cast_to = cast_to @@ -93,7 +93,7 @@ def __init__( *, cast_to: type[_T], response: httpx.Response, - client: AsyncDigitaloceanGenaiSDK, + client: AsyncGradientAI, ) -> None: self.response = response self._cast_to = cast_to diff --git a/src/digitalocean_genai_sdk/_types.py b/src/gradientai/_types.py similarity index 99% rename from src/digitalocean_genai_sdk/_types.py rename to src/gradientai/_types.py index 3c0d156e..1bac876d 100644 --- a/src/digitalocean_genai_sdk/_types.py +++ b/src/gradientai/_types.py @@ -81,7 +81,7 @@ # This unfortunately means that you will either have # to import this type and pass it explicitly: # -# from digitalocean_genai_sdk import NoneType +# from gradientai import NoneType # client.get('/foo', cast_to=NoneType) # # or build it yourself: diff --git a/src/digitalocean_genai_sdk/_utils/__init__.py b/src/gradientai/_utils/__init__.py similarity index 100% rename from src/digitalocean_genai_sdk/_utils/__init__.py rename to src/gradientai/_utils/__init__.py diff --git a/src/digitalocean_genai_sdk/_utils/_logs.py b/src/gradientai/_utils/_logs.py similarity index 67% rename from src/digitalocean_genai_sdk/_utils/_logs.py rename to src/gradientai/_utils/_logs.py index e0c1fee5..9047e5c8 100644 --- a/src/digitalocean_genai_sdk/_utils/_logs.py +++ b/src/gradientai/_utils/_logs.py @@ -1,12 +1,12 @@ import os import logging -logger: logging.Logger = logging.getLogger("digitalocean_genai_sdk") +logger: logging.Logger = logging.getLogger("gradientai") httpx_logger: logging.Logger = logging.getLogger("httpx") def _basic_config() -> None: - # e.g. [2023-10-05 14:12:26 - digitalocean_genai_sdk._base_client:818 - DEBUG] HTTP Request: POST http://127.0.0.1:4010/foo/bar "200 OK" + # e.g. [2023-10-05 14:12:26 - gradientai._base_client:818 - DEBUG] HTTP Request: POST http://127.0.0.1:4010/foo/bar "200 OK" logging.basicConfig( format="[%(asctime)s - %(name)s:%(lineno)d - %(levelname)s] %(message)s", datefmt="%Y-%m-%d %H:%M:%S", @@ -14,7 +14,7 @@ def _basic_config() -> None: def setup_logging() -> None: - env = os.environ.get("DIGITALOCEAN_GENAI_SDK_LOG") + env = os.environ.get("GRADIENT_AI_LOG") if env == "debug": _basic_config() logger.setLevel(logging.DEBUG) diff --git a/src/digitalocean_genai_sdk/_utils/_proxy.py b/src/gradientai/_utils/_proxy.py similarity index 100% rename from src/digitalocean_genai_sdk/_utils/_proxy.py rename to src/gradientai/_utils/_proxy.py diff --git a/src/digitalocean_genai_sdk/_utils/_reflection.py b/src/gradientai/_utils/_reflection.py similarity index 100% rename from src/digitalocean_genai_sdk/_utils/_reflection.py rename to src/gradientai/_utils/_reflection.py diff --git a/src/gradientai/_utils/_resources_proxy.py b/src/gradientai/_utils/_resources_proxy.py new file mode 100644 index 00000000..b3bc4931 --- /dev/null +++ b/src/gradientai/_utils/_resources_proxy.py @@ -0,0 +1,24 @@ +from __future__ import annotations + +from typing import Any +from typing_extensions import override + +from ._proxy import LazyProxy + + +class ResourcesProxy(LazyProxy[Any]): + """A proxy for the `gradientai.resources` module. + + This is used so that we can lazily import `gradientai.resources` only when + needed *and* so that users can just import `gradientai` and reference `gradientai.resources` + """ + + @override + def __load__(self) -> Any: + import importlib + + mod = importlib.import_module("gradientai.resources") + return mod + + +resources = ResourcesProxy().__as_proxied__() diff --git a/src/digitalocean_genai_sdk/_utils/_streams.py b/src/gradientai/_utils/_streams.py similarity index 100% rename from src/digitalocean_genai_sdk/_utils/_streams.py rename to src/gradientai/_utils/_streams.py diff --git a/src/digitalocean_genai_sdk/_utils/_sync.py b/src/gradientai/_utils/_sync.py similarity index 100% rename from src/digitalocean_genai_sdk/_utils/_sync.py rename to src/gradientai/_utils/_sync.py diff --git a/src/digitalocean_genai_sdk/_utils/_transform.py b/src/gradientai/_utils/_transform.py similarity index 100% rename from src/digitalocean_genai_sdk/_utils/_transform.py rename to src/gradientai/_utils/_transform.py diff --git a/src/digitalocean_genai_sdk/_utils/_typing.py b/src/gradientai/_utils/_typing.py similarity index 100% rename from src/digitalocean_genai_sdk/_utils/_typing.py rename to src/gradientai/_utils/_typing.py diff --git a/src/digitalocean_genai_sdk/_utils/_utils.py b/src/gradientai/_utils/_utils.py similarity index 100% rename from src/digitalocean_genai_sdk/_utils/_utils.py rename to src/gradientai/_utils/_utils.py diff --git a/src/digitalocean_genai_sdk/_version.py b/src/gradientai/_version.py similarity index 79% rename from src/digitalocean_genai_sdk/_version.py rename to src/gradientai/_version.py index 50483bc2..2cf47e97 100644 --- a/src/digitalocean_genai_sdk/_version.py +++ b/src/gradientai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -__title__ = "digitalocean_genai_sdk" +__title__ = "gradientai" __version__ = "0.1.0-alpha.3" # x-release-please-version diff --git a/src/gradientai/lib/.keep b/src/gradientai/lib/.keep new file mode 100644 index 00000000..5e2c99fd --- /dev/null +++ b/src/gradientai/lib/.keep @@ -0,0 +1,4 @@ +File generated from our OpenAPI spec by Stainless. + +This directory can be used to store custom files to expand the SDK. +It is ignored by Stainless code generation and its content (other than this keep file) won't be touched. \ No newline at end of file diff --git a/src/digitalocean_genai_sdk/py.typed b/src/gradientai/py.typed similarity index 100% rename from src/digitalocean_genai_sdk/py.typed rename to src/gradientai/py.typed diff --git a/src/gradientai/resources/__init__.py b/src/gradientai/resources/__init__.py new file mode 100644 index 00000000..386e2ed6 --- /dev/null +++ b/src/gradientai/resources/__init__.py @@ -0,0 +1,61 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .chat import ( + ChatResource, + AsyncChatResource, + ChatResourceWithRawResponse, + AsyncChatResourceWithRawResponse, + ChatResourceWithStreamingResponse, + AsyncChatResourceWithStreamingResponse, +) +from .agents import ( + AgentsResource, + AsyncAgentsResource, + AgentsResourceWithRawResponse, + AsyncAgentsResourceWithRawResponse, + AgentsResourceWithStreamingResponse, + AsyncAgentsResourceWithStreamingResponse, +) +from .models import ( + ModelsResource, + AsyncModelsResource, + ModelsResourceWithRawResponse, + AsyncModelsResourceWithRawResponse, + ModelsResourceWithStreamingResponse, + AsyncModelsResourceWithStreamingResponse, +) +from .embeddings import ( + EmbeddingsResource, + AsyncEmbeddingsResource, + EmbeddingsResourceWithRawResponse, + AsyncEmbeddingsResourceWithRawResponse, + EmbeddingsResourceWithStreamingResponse, + AsyncEmbeddingsResourceWithStreamingResponse, +) + +__all__ = [ + "AgentsResource", + "AsyncAgentsResource", + "AgentsResourceWithRawResponse", + "AsyncAgentsResourceWithRawResponse", + "AgentsResourceWithStreamingResponse", + "AsyncAgentsResourceWithStreamingResponse", + "ChatResource", + "AsyncChatResource", + "ChatResourceWithRawResponse", + "AsyncChatResourceWithRawResponse", + "ChatResourceWithStreamingResponse", + "AsyncChatResourceWithStreamingResponse", + "EmbeddingsResource", + "AsyncEmbeddingsResource", + "EmbeddingsResourceWithRawResponse", + "AsyncEmbeddingsResourceWithRawResponse", + "EmbeddingsResourceWithStreamingResponse", + "AsyncEmbeddingsResourceWithStreamingResponse", + "ModelsResource", + "AsyncModelsResource", + "ModelsResourceWithRawResponse", + "AsyncModelsResourceWithRawResponse", + "ModelsResourceWithStreamingResponse", + "AsyncModelsResourceWithStreamingResponse", +] diff --git a/src/digitalocean_genai_sdk/resources/auth/__init__.py b/src/gradientai/resources/agents/__init__.py similarity index 53% rename from src/digitalocean_genai_sdk/resources/auth/__init__.py rename to src/gradientai/resources/agents/__init__.py index 7c844a98..2ae2658b 100644 --- a/src/digitalocean_genai_sdk/resources/auth/__init__.py +++ b/src/gradientai/resources/agents/__init__.py @@ -1,13 +1,5 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from .auth import ( - AuthResource, - AsyncAuthResource, - AuthResourceWithRawResponse, - AsyncAuthResourceWithRawResponse, - AuthResourceWithStreamingResponse, - AsyncAuthResourceWithStreamingResponse, -) from .agents import ( AgentsResource, AsyncAgentsResource, @@ -16,18 +8,26 @@ AgentsResourceWithStreamingResponse, AsyncAgentsResourceWithStreamingResponse, ) +from .versions import ( + VersionsResource, + AsyncVersionsResource, + VersionsResourceWithRawResponse, + AsyncVersionsResourceWithRawResponse, + VersionsResourceWithStreamingResponse, + AsyncVersionsResourceWithStreamingResponse, +) __all__ = [ + "VersionsResource", + "AsyncVersionsResource", + "VersionsResourceWithRawResponse", + "AsyncVersionsResourceWithRawResponse", + "VersionsResourceWithStreamingResponse", + "AsyncVersionsResourceWithStreamingResponse", "AgentsResource", "AsyncAgentsResource", "AgentsResourceWithRawResponse", "AsyncAgentsResourceWithRawResponse", "AgentsResourceWithStreamingResponse", "AsyncAgentsResourceWithStreamingResponse", - "AuthResource", - "AsyncAuthResource", - "AuthResourceWithRawResponse", - "AsyncAuthResourceWithRawResponse", - "AuthResourceWithStreamingResponse", - "AsyncAuthResourceWithStreamingResponse", ] diff --git a/src/digitalocean_genai_sdk/resources/auth/agents/agents.py b/src/gradientai/resources/agents/agents.py similarity index 64% rename from src/digitalocean_genai_sdk/resources/auth/agents/agents.py rename to src/gradientai/resources/agents/agents.py index a0aa9faf..9896d179 100644 --- a/src/digitalocean_genai_sdk/resources/auth/agents/agents.py +++ b/src/gradientai/resources/agents/agents.py @@ -2,24 +2,24 @@ from __future__ import annotations -from .token import ( - TokenResource, - AsyncTokenResource, - TokenResourceWithRawResponse, - AsyncTokenResourceWithRawResponse, - TokenResourceWithStreamingResponse, - AsyncTokenResourceWithStreamingResponse, +from .versions import ( + VersionsResource, + AsyncVersionsResource, + VersionsResourceWithRawResponse, + AsyncVersionsResourceWithRawResponse, + VersionsResourceWithStreamingResponse, + AsyncVersionsResourceWithStreamingResponse, ) -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource __all__ = ["AgentsResource", "AsyncAgentsResource"] class AgentsResource(SyncAPIResource): @cached_property - def token(self) -> TokenResource: - return TokenResource(self._client) + def versions(self) -> VersionsResource: + return VersionsResource(self._client) @cached_property def with_raw_response(self) -> AgentsResourceWithRawResponse: @@ -27,7 +27,7 @@ def with_raw_response(self) -> AgentsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AgentsResourceWithRawResponse(self) @@ -36,15 +36,15 @@ def with_streaming_response(self) -> AgentsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AgentsResourceWithStreamingResponse(self) class AsyncAgentsResource(AsyncAPIResource): @cached_property - def token(self) -> AsyncTokenResource: - return AsyncTokenResource(self._client) + def versions(self) -> AsyncVersionsResource: + return AsyncVersionsResource(self._client) @cached_property def with_raw_response(self) -> AsyncAgentsResourceWithRawResponse: @@ -52,7 +52,7 @@ def with_raw_response(self) -> AsyncAgentsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncAgentsResourceWithRawResponse(self) @@ -61,7 +61,7 @@ def with_streaming_response(self) -> AsyncAgentsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncAgentsResourceWithStreamingResponse(self) @@ -71,8 +71,8 @@ def __init__(self, agents: AgentsResource) -> None: self._agents = agents @cached_property - def token(self) -> TokenResourceWithRawResponse: - return TokenResourceWithRawResponse(self._agents.token) + def versions(self) -> VersionsResourceWithRawResponse: + return VersionsResourceWithRawResponse(self._agents.versions) class AsyncAgentsResourceWithRawResponse: @@ -80,8 +80,8 @@ def __init__(self, agents: AsyncAgentsResource) -> None: self._agents = agents @cached_property - def token(self) -> AsyncTokenResourceWithRawResponse: - return AsyncTokenResourceWithRawResponse(self._agents.token) + def versions(self) -> AsyncVersionsResourceWithRawResponse: + return AsyncVersionsResourceWithRawResponse(self._agents.versions) class AgentsResourceWithStreamingResponse: @@ -89,8 +89,8 @@ def __init__(self, agents: AgentsResource) -> None: self._agents = agents @cached_property - def token(self) -> TokenResourceWithStreamingResponse: - return TokenResourceWithStreamingResponse(self._agents.token) + def versions(self) -> VersionsResourceWithStreamingResponse: + return VersionsResourceWithStreamingResponse(self._agents.versions) class AsyncAgentsResourceWithStreamingResponse: @@ -98,5 +98,5 @@ def __init__(self, agents: AsyncAgentsResource) -> None: self._agents = agents @cached_property - def token(self) -> AsyncTokenResourceWithStreamingResponse: - return AsyncTokenResourceWithStreamingResponse(self._agents.token) + def versions(self) -> AsyncVersionsResourceWithStreamingResponse: + return AsyncVersionsResourceWithStreamingResponse(self._agents.versions) diff --git a/src/digitalocean_genai_sdk/resources/agents/versions.py b/src/gradientai/resources/agents/versions.py similarity index 97% rename from src/digitalocean_genai_sdk/resources/agents/versions.py rename to src/gradientai/resources/agents/versions.py index e77a252b..d71da8df 100644 --- a/src/digitalocean_genai_sdk/resources/agents/versions.py +++ b/src/gradientai/resources/agents/versions.py @@ -29,7 +29,7 @@ def with_raw_response(self) -> VersionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return VersionsResourceWithRawResponse(self) @@ -38,7 +38,7 @@ def with_streaming_response(self) -> VersionsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return VersionsResourceWithStreamingResponse(self) @@ -143,7 +143,7 @@ def with_raw_response(self) -> AsyncVersionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncVersionsResourceWithRawResponse(self) @@ -152,7 +152,7 @@ def with_streaming_response(self) -> AsyncVersionsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncVersionsResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/chat.py b/src/gradientai/resources/chat.py similarity index 98% rename from src/digitalocean_genai_sdk/resources/chat.py rename to src/gradientai/resources/chat.py index 518fbad8..223e7cf3 100644 --- a/src/digitalocean_genai_sdk/resources/chat.py +++ b/src/gradientai/resources/chat.py @@ -30,7 +30,7 @@ def with_raw_response(self) -> ChatResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return ChatResourceWithRawResponse(self) @@ -39,7 +39,7 @@ def with_streaming_response(self) -> ChatResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return ChatResourceWithStreamingResponse(self) @@ -191,7 +191,7 @@ def with_raw_response(self) -> AsyncChatResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncChatResourceWithRawResponse(self) @@ -200,7 +200,7 @@ def with_streaming_response(self) -> AsyncChatResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncChatResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/embeddings.py b/src/gradientai/resources/embeddings.py similarity index 97% rename from src/digitalocean_genai_sdk/resources/embeddings.py rename to src/gradientai/resources/embeddings.py index 1bcd3145..36ffe3c6 100644 --- a/src/digitalocean_genai_sdk/resources/embeddings.py +++ b/src/gradientai/resources/embeddings.py @@ -30,7 +30,7 @@ def with_raw_response(self) -> EmbeddingsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return EmbeddingsResourceWithRawResponse(self) @@ -39,7 +39,7 @@ def with_streaming_response(self) -> EmbeddingsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return EmbeddingsResourceWithStreamingResponse(self) @@ -101,7 +101,7 @@ def with_raw_response(self) -> AsyncEmbeddingsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncEmbeddingsResourceWithRawResponse(self) @@ -110,7 +110,7 @@ def with_streaming_response(self) -> AsyncEmbeddingsResourceWithStreamingRespons """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncEmbeddingsResourceWithStreamingResponse(self) diff --git a/src/digitalocean_genai_sdk/resources/models.py b/src/gradientai/resources/models.py similarity index 97% rename from src/digitalocean_genai_sdk/resources/models.py rename to src/gradientai/resources/models.py index 81b75441..c30e1135 100644 --- a/src/digitalocean_genai_sdk/resources/models.py +++ b/src/gradientai/resources/models.py @@ -27,7 +27,7 @@ def with_raw_response(self) -> ModelsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return ModelsResourceWithRawResponse(self) @@ -36,7 +36,7 @@ def with_streaming_response(self) -> ModelsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return ModelsResourceWithStreamingResponse(self) @@ -104,7 +104,7 @@ def with_raw_response(self) -> AsyncModelsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncModelsResourceWithRawResponse(self) @@ -113,7 +113,7 @@ def with_streaming_response(self) -> AsyncModelsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncModelsResourceWithStreamingResponse(self) diff --git a/src/gradientai/types/__init__.py b/src/gradientai/types/__init__.py new file mode 100644 index 00000000..7b80eca4 --- /dev/null +++ b/src/gradientai/types/__init__.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .model import Model as Model +from .model_list_response import ModelListResponse as ModelListResponse +from .api_retrieval_method import APIRetrievalMethod as APIRetrievalMethod +from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams +from .api_deployment_visibility import APIDeploymentVisibility as APIDeploymentVisibility +from .embedding_create_response import EmbeddingCreateResponse as EmbeddingCreateResponse +from .chat_completion_token_logprob import ChatCompletionTokenLogprob as ChatCompletionTokenLogprob +from .chat_create_completion_params import ChatCreateCompletionParams as ChatCreateCompletionParams +from .chat_create_completion_response import ChatCreateCompletionResponse as ChatCreateCompletionResponse +from .chat_completion_request_message_content_part_text_param import ( + ChatCompletionRequestMessageContentPartTextParam as ChatCompletionRequestMessageContentPartTextParam, +) diff --git a/src/gradientai/types/agents/__init__.py b/src/gradientai/types/agents/__init__.py new file mode 100644 index 00000000..fdee8834 --- /dev/null +++ b/src/gradientai/types/agents/__init__.py @@ -0,0 +1,10 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .api_meta import APIMeta as APIMeta +from .api_links import APILinks as APILinks +from .version_list_params import VersionListParams as VersionListParams +from .version_list_response import VersionListResponse as VersionListResponse +from .version_update_params import VersionUpdateParams as VersionUpdateParams +from .version_update_response import VersionUpdateResponse as VersionUpdateResponse diff --git a/src/digitalocean_genai_sdk/types/agents/api_links.py b/src/gradientai/types/agents/api_links.py similarity index 100% rename from src/digitalocean_genai_sdk/types/agents/api_links.py rename to src/gradientai/types/agents/api_links.py diff --git a/src/digitalocean_genai_sdk/types/agents/api_meta.py b/src/gradientai/types/agents/api_meta.py similarity index 100% rename from src/digitalocean_genai_sdk/types/agents/api_meta.py rename to src/gradientai/types/agents/api_meta.py diff --git a/src/digitalocean_genai_sdk/types/agents/version_list_params.py b/src/gradientai/types/agents/version_list_params.py similarity index 100% rename from src/digitalocean_genai_sdk/types/agents/version_list_params.py rename to src/gradientai/types/agents/version_list_params.py diff --git a/src/digitalocean_genai_sdk/types/agents/version_list_response.py b/src/gradientai/types/agents/version_list_response.py similarity index 100% rename from src/digitalocean_genai_sdk/types/agents/version_list_response.py rename to src/gradientai/types/agents/version_list_response.py diff --git a/src/digitalocean_genai_sdk/types/agents/version_update_params.py b/src/gradientai/types/agents/version_update_params.py similarity index 100% rename from src/digitalocean_genai_sdk/types/agents/version_update_params.py rename to src/gradientai/types/agents/version_update_params.py diff --git a/src/digitalocean_genai_sdk/types/agents/version_update_response.py b/src/gradientai/types/agents/version_update_response.py similarity index 100% rename from src/digitalocean_genai_sdk/types/agents/version_update_response.py rename to src/gradientai/types/agents/version_update_response.py diff --git a/src/digitalocean_genai_sdk/types/api_deployment_visibility.py b/src/gradientai/types/api_deployment_visibility.py similarity index 100% rename from src/digitalocean_genai_sdk/types/api_deployment_visibility.py rename to src/gradientai/types/api_deployment_visibility.py diff --git a/src/digitalocean_genai_sdk/types/auth/__init__.py b/src/gradientai/types/api_keys/__init__.py similarity index 100% rename from src/digitalocean_genai_sdk/types/auth/__init__.py rename to src/gradientai/types/api_keys/__init__.py diff --git a/src/digitalocean_genai_sdk/types/api_retrieval_method.py b/src/gradientai/types/api_retrieval_method.py similarity index 100% rename from src/digitalocean_genai_sdk/types/api_retrieval_method.py rename to src/gradientai/types/api_retrieval_method.py diff --git a/src/digitalocean_genai_sdk/types/providers/__init__.py b/src/gradientai/types/auth/__init__.py similarity index 100% rename from src/digitalocean_genai_sdk/types/providers/__init__.py rename to src/gradientai/types/auth/__init__.py diff --git a/tests/api_resources/api_keys/__init__.py b/src/gradientai/types/auth/agents/__init__.py similarity index 70% rename from tests/api_resources/api_keys/__init__.py rename to src/gradientai/types/auth/agents/__init__.py index fd8019a9..f8ee8b14 100644 --- a/tests/api_resources/api_keys/__init__.py +++ b/src/gradientai/types/auth/agents/__init__.py @@ -1 +1,3 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations diff --git a/src/digitalocean_genai_sdk/types/chat_completion_request_message_content_part_text_param.py b/src/gradientai/types/chat_completion_request_message_content_part_text_param.py similarity index 100% rename from src/digitalocean_genai_sdk/types/chat_completion_request_message_content_part_text_param.py rename to src/gradientai/types/chat_completion_request_message_content_part_text_param.py diff --git a/src/digitalocean_genai_sdk/types/chat_completion_token_logprob.py b/src/gradientai/types/chat_completion_token_logprob.py similarity index 100% rename from src/digitalocean_genai_sdk/types/chat_completion_token_logprob.py rename to src/gradientai/types/chat_completion_token_logprob.py diff --git a/src/digitalocean_genai_sdk/types/chat_create_completion_params.py b/src/gradientai/types/chat_create_completion_params.py similarity index 100% rename from src/digitalocean_genai_sdk/types/chat_create_completion_params.py rename to src/gradientai/types/chat_create_completion_params.py diff --git a/src/digitalocean_genai_sdk/types/chat_create_completion_response.py b/src/gradientai/types/chat_create_completion_response.py similarity index 100% rename from src/digitalocean_genai_sdk/types/chat_create_completion_response.py rename to src/gradientai/types/chat_create_completion_response.py diff --git a/src/digitalocean_genai_sdk/types/embedding_create_params.py b/src/gradientai/types/embedding_create_params.py similarity index 100% rename from src/digitalocean_genai_sdk/types/embedding_create_params.py rename to src/gradientai/types/embedding_create_params.py diff --git a/src/digitalocean_genai_sdk/types/embedding_create_response.py b/src/gradientai/types/embedding_create_response.py similarity index 100% rename from src/digitalocean_genai_sdk/types/embedding_create_response.py rename to src/gradientai/types/embedding_create_response.py diff --git a/tests/api_resources/auth/__init__.py b/src/gradientai/types/knowledge_bases/__init__.py similarity index 70% rename from tests/api_resources/auth/__init__.py rename to src/gradientai/types/knowledge_bases/__init__.py index fd8019a9..f8ee8b14 100644 --- a/tests/api_resources/auth/__init__.py +++ b/src/gradientai/types/knowledge_bases/__init__.py @@ -1 +1,3 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations diff --git a/src/digitalocean_genai_sdk/types/model.py b/src/gradientai/types/model.py similarity index 100% rename from src/digitalocean_genai_sdk/types/model.py rename to src/gradientai/types/model.py diff --git a/src/digitalocean_genai_sdk/types/model_list_response.py b/src/gradientai/types/model_list_response.py similarity index 100% rename from src/digitalocean_genai_sdk/types/model_list_response.py rename to src/gradientai/types/model_list_response.py diff --git a/tests/api_resources/auth/agents/__init__.py b/src/gradientai/types/providers/__init__.py similarity index 70% rename from tests/api_resources/auth/agents/__init__.py rename to src/gradientai/types/providers/__init__.py index fd8019a9..f8ee8b14 100644 --- a/tests/api_resources/auth/agents/__init__.py +++ b/src/gradientai/types/providers/__init__.py @@ -1 +1,3 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations diff --git a/tests/api_resources/knowledge_bases/__init__.py b/src/gradientai/types/providers/anthropic/__init__.py similarity index 70% rename from tests/api_resources/knowledge_bases/__init__.py rename to src/gradientai/types/providers/anthropic/__init__.py index fd8019a9..f8ee8b14 100644 --- a/tests/api_resources/knowledge_bases/__init__.py +++ b/src/gradientai/types/providers/anthropic/__init__.py @@ -1 +1,3 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations diff --git a/src/gradientai/types/providers/openai/__init__.py b/src/gradientai/types/providers/openai/__init__.py new file mode 100644 index 00000000..f8ee8b14 --- /dev/null +++ b/src/gradientai/types/providers/openai/__init__.py @@ -0,0 +1,3 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations diff --git a/tests/api_resources/agents/test_api_keys.py b/tests/api_resources/agents/test_api_keys.py deleted file mode 100644 index 911ac6f9..00000000 --- a/tests/api_resources/agents/test_api_keys.py +++ /dev/null @@ -1,572 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.agents import ( - APIKeyListResponse, - APIKeyCreateResponse, - APIKeyDeleteResponse, - APIKeyUpdateResponse, - APIKeyRegenerateResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestAPIKeys: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.agents.api_keys.create( - path_agent_uuid="agent_uuid", - ) - assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.agents.api_keys.create( - path_agent_uuid="agent_uuid", - body_agent_uuid="agent_uuid", - name="name", - ) - assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: - response = client.agents.api_keys.with_raw_response.create( - path_agent_uuid="agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = response.parse() - assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: - with client.agents.api_keys.with_streaming_response.create( - path_agent_uuid="agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = response.parse() - assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_create(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - client.agents.api_keys.with_raw_response.create( - path_agent_uuid="", - ) - - @pytest.mark.skip() - @parametrize - def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.agents.api_keys.update( - path_api_key_uuid="api_key_uuid", - path_agent_uuid="agent_uuid", - ) - assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.agents.api_keys.update( - path_api_key_uuid="api_key_uuid", - path_agent_uuid="agent_uuid", - body_agent_uuid="agent_uuid", - body_api_key_uuid="api_key_uuid", - name="name", - ) - assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: - response = client.agents.api_keys.with_raw_response.update( - path_api_key_uuid="api_key_uuid", - path_agent_uuid="agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = response.parse() - assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: - with client.agents.api_keys.with_streaming_response.update( - path_api_key_uuid="api_key_uuid", - path_agent_uuid="agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = response.parse() - assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - client.agents.api_keys.with_raw_response.update( - path_api_key_uuid="api_key_uuid", - path_agent_uuid="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - client.agents.api_keys.with_raw_response.update( - path_api_key_uuid="", - path_agent_uuid="agent_uuid", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.agents.api_keys.list( - agent_uuid="agent_uuid", - ) - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.agents.api_keys.list( - agent_uuid="agent_uuid", - page=0, - per_page=0, - ) - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: - response = client.agents.api_keys.with_raw_response.list( - agent_uuid="agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = response.parse() - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: - with client.agents.api_keys.with_streaming_response.list( - agent_uuid="agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = response.parse() - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_list(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - client.agents.api_keys.with_raw_response.list( - agent_uuid="", - ) - - @pytest.mark.skip() - @parametrize - def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.agents.api_keys.delete( - api_key_uuid="api_key_uuid", - agent_uuid="agent_uuid", - ) - assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - response = client.agents.api_keys.with_raw_response.delete( - api_key_uuid="api_key_uuid", - agent_uuid="agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = response.parse() - assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - with client.agents.api_keys.with_streaming_response.delete( - api_key_uuid="api_key_uuid", - agent_uuid="agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = response.parse() - assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - client.agents.api_keys.with_raw_response.delete( - api_key_uuid="api_key_uuid", - agent_uuid="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.agents.api_keys.with_raw_response.delete( - api_key_uuid="", - agent_uuid="agent_uuid", - ) - - @pytest.mark.skip() - @parametrize - def test_method_regenerate(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.agents.api_keys.regenerate( - api_key_uuid="api_key_uuid", - agent_uuid="agent_uuid", - ) - assert_matches_type(APIKeyRegenerateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_regenerate(self, client: DigitaloceanGenaiSDK) -> None: - response = client.agents.api_keys.with_raw_response.regenerate( - api_key_uuid="api_key_uuid", - agent_uuid="agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = response.parse() - assert_matches_type(APIKeyRegenerateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_regenerate(self, client: DigitaloceanGenaiSDK) -> None: - with client.agents.api_keys.with_streaming_response.regenerate( - api_key_uuid="api_key_uuid", - agent_uuid="agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = response.parse() - assert_matches_type(APIKeyRegenerateResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_regenerate(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - client.agents.api_keys.with_raw_response.regenerate( - api_key_uuid="api_key_uuid", - agent_uuid="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.agents.api_keys.with_raw_response.regenerate( - api_key_uuid="", - agent_uuid="agent_uuid", - ) - - -class TestAsyncAPIKeys: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.agents.api_keys.create( - path_agent_uuid="agent_uuid", - ) - assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.agents.api_keys.create( - path_agent_uuid="agent_uuid", - body_agent_uuid="agent_uuid", - name="name", - ) - assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.agents.api_keys.with_raw_response.create( - path_agent_uuid="agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = await response.parse() - assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.agents.api_keys.with_streaming_response.create( - path_agent_uuid="agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = await response.parse() - assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - await async_client.agents.api_keys.with_raw_response.create( - path_agent_uuid="", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.agents.api_keys.update( - path_api_key_uuid="api_key_uuid", - path_agent_uuid="agent_uuid", - ) - assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.agents.api_keys.update( - path_api_key_uuid="api_key_uuid", - path_agent_uuid="agent_uuid", - body_agent_uuid="agent_uuid", - body_api_key_uuid="api_key_uuid", - name="name", - ) - assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.agents.api_keys.with_raw_response.update( - path_api_key_uuid="api_key_uuid", - path_agent_uuid="agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = await response.parse() - assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.agents.api_keys.with_streaming_response.update( - path_api_key_uuid="api_key_uuid", - path_agent_uuid="agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = await response.parse() - assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - await async_client.agents.api_keys.with_raw_response.update( - path_api_key_uuid="api_key_uuid", - path_agent_uuid="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - await async_client.agents.api_keys.with_raw_response.update( - path_api_key_uuid="", - path_agent_uuid="agent_uuid", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.agents.api_keys.list( - agent_uuid="agent_uuid", - ) - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.agents.api_keys.list( - agent_uuid="agent_uuid", - page=0, - per_page=0, - ) - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.agents.api_keys.with_raw_response.list( - agent_uuid="agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = await response.parse() - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.agents.api_keys.with_streaming_response.list( - agent_uuid="agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = await response.parse() - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - await async_client.agents.api_keys.with_raw_response.list( - agent_uuid="", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.agents.api_keys.delete( - api_key_uuid="api_key_uuid", - agent_uuid="agent_uuid", - ) - assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.agents.api_keys.with_raw_response.delete( - api_key_uuid="api_key_uuid", - agent_uuid="agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = await response.parse() - assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.agents.api_keys.with_streaming_response.delete( - api_key_uuid="api_key_uuid", - agent_uuid="agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = await response.parse() - assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - await async_client.agents.api_keys.with_raw_response.delete( - api_key_uuid="api_key_uuid", - agent_uuid="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.agents.api_keys.with_raw_response.delete( - api_key_uuid="", - agent_uuid="agent_uuid", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_regenerate(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.agents.api_keys.regenerate( - api_key_uuid="api_key_uuid", - agent_uuid="agent_uuid", - ) - assert_matches_type(APIKeyRegenerateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_regenerate(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.agents.api_keys.with_raw_response.regenerate( - api_key_uuid="api_key_uuid", - agent_uuid="agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = await response.parse() - assert_matches_type(APIKeyRegenerateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_regenerate(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.agents.api_keys.with_streaming_response.regenerate( - api_key_uuid="api_key_uuid", - agent_uuid="agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = await response.parse() - assert_matches_type(APIKeyRegenerateResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_regenerate(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - await async_client.agents.api_keys.with_raw_response.regenerate( - api_key_uuid="api_key_uuid", - agent_uuid="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.agents.api_keys.with_raw_response.regenerate( - api_key_uuid="", - agent_uuid="agent_uuid", - ) diff --git a/tests/api_resources/agents/test_child_agents.py b/tests/api_resources/agents/test_child_agents.py deleted file mode 100644 index cfc8084e..00000000 --- a/tests/api_resources/agents/test_child_agents.py +++ /dev/null @@ -1,485 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.agents import ( - ChildAgentAddResponse, - ChildAgentViewResponse, - ChildAgentDeleteResponse, - ChildAgentUpdateResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestChildAgents: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: - child_agent = client.agents.child_agents.update( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", - ) - assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - child_agent = client.agents.child_agents.update( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", - body_child_agent_uuid="child_agent_uuid", - if_case="if_case", - body_parent_agent_uuid="parent_agent_uuid", - route_name="route_name", - uuid="uuid", - ) - assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: - response = client.agents.child_agents.with_raw_response.update( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - child_agent = response.parse() - assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: - with client.agents.child_agents.with_streaming_response.update( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - child_agent = response.parse() - assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises( - ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" - ): - client.agents.child_agents.with_raw_response.update( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): - client.agents.child_agents.with_raw_response.update( - path_child_agent_uuid="", - path_parent_agent_uuid="parent_agent_uuid", - ) - - @pytest.mark.skip() - @parametrize - def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: - child_agent = client.agents.child_agents.delete( - child_agent_uuid="child_agent_uuid", - parent_agent_uuid="parent_agent_uuid", - ) - assert_matches_type(ChildAgentDeleteResponse, child_agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - response = client.agents.child_agents.with_raw_response.delete( - child_agent_uuid="child_agent_uuid", - parent_agent_uuid="parent_agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - child_agent = response.parse() - assert_matches_type(ChildAgentDeleteResponse, child_agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - with client.agents.child_agents.with_streaming_response.delete( - child_agent_uuid="child_agent_uuid", - parent_agent_uuid="parent_agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - child_agent = response.parse() - assert_matches_type(ChildAgentDeleteResponse, child_agent, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `parent_agent_uuid` but received ''"): - client.agents.child_agents.with_raw_response.delete( - child_agent_uuid="child_agent_uuid", - parent_agent_uuid="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `child_agent_uuid` but received ''"): - client.agents.child_agents.with_raw_response.delete( - child_agent_uuid="", - parent_agent_uuid="parent_agent_uuid", - ) - - @pytest.mark.skip() - @parametrize - def test_method_add(self, client: DigitaloceanGenaiSDK) -> None: - child_agent = client.agents.child_agents.add( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", - ) - assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_add_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - child_agent = client.agents.child_agents.add( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", - body_child_agent_uuid="child_agent_uuid", - if_case="if_case", - body_parent_agent_uuid="parent_agent_uuid", - route_name="route_name", - ) - assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_add(self, client: DigitaloceanGenaiSDK) -> None: - response = client.agents.child_agents.with_raw_response.add( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - child_agent = response.parse() - assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_add(self, client: DigitaloceanGenaiSDK) -> None: - with client.agents.child_agents.with_streaming_response.add( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - child_agent = response.parse() - assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_add(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises( - ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" - ): - client.agents.child_agents.with_raw_response.add( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): - client.agents.child_agents.with_raw_response.add( - path_child_agent_uuid="", - path_parent_agent_uuid="parent_agent_uuid", - ) - - @pytest.mark.skip() - @parametrize - def test_method_view(self, client: DigitaloceanGenaiSDK) -> None: - child_agent = client.agents.child_agents.view( - "uuid", - ) - assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_view(self, client: DigitaloceanGenaiSDK) -> None: - response = client.agents.child_agents.with_raw_response.view( - "uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - child_agent = response.parse() - assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_view(self, client: DigitaloceanGenaiSDK) -> None: - with client.agents.child_agents.with_streaming_response.view( - "uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - child_agent = response.parse() - assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_view(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.agents.child_agents.with_raw_response.view( - "", - ) - - -class TestAsyncChildAgents: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - child_agent = await async_client.agents.child_agents.update( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", - ) - assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - child_agent = await async_client.agents.child_agents.update( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", - body_child_agent_uuid="child_agent_uuid", - if_case="if_case", - body_parent_agent_uuid="parent_agent_uuid", - route_name="route_name", - uuid="uuid", - ) - assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.agents.child_agents.with_raw_response.update( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - child_agent = await response.parse() - assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.agents.child_agents.with_streaming_response.update( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - child_agent = await response.parse() - assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises( - ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" - ): - await async_client.agents.child_agents.with_raw_response.update( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): - await async_client.agents.child_agents.with_raw_response.update( - path_child_agent_uuid="", - path_parent_agent_uuid="parent_agent_uuid", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - child_agent = await async_client.agents.child_agents.delete( - child_agent_uuid="child_agent_uuid", - parent_agent_uuid="parent_agent_uuid", - ) - assert_matches_type(ChildAgentDeleteResponse, child_agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.agents.child_agents.with_raw_response.delete( - child_agent_uuid="child_agent_uuid", - parent_agent_uuid="parent_agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - child_agent = await response.parse() - assert_matches_type(ChildAgentDeleteResponse, child_agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.agents.child_agents.with_streaming_response.delete( - child_agent_uuid="child_agent_uuid", - parent_agent_uuid="parent_agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - child_agent = await response.parse() - assert_matches_type(ChildAgentDeleteResponse, child_agent, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `parent_agent_uuid` but received ''"): - await async_client.agents.child_agents.with_raw_response.delete( - child_agent_uuid="child_agent_uuid", - parent_agent_uuid="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `child_agent_uuid` but received ''"): - await async_client.agents.child_agents.with_raw_response.delete( - child_agent_uuid="", - parent_agent_uuid="parent_agent_uuid", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_add(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - child_agent = await async_client.agents.child_agents.add( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", - ) - assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_add_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - child_agent = await async_client.agents.child_agents.add( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", - body_child_agent_uuid="child_agent_uuid", - if_case="if_case", - body_parent_agent_uuid="parent_agent_uuid", - route_name="route_name", - ) - assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_add(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.agents.child_agents.with_raw_response.add( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - child_agent = await response.parse() - assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_add(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.agents.child_agents.with_streaming_response.add( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - child_agent = await response.parse() - assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_add(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises( - ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" - ): - await async_client.agents.child_agents.with_raw_response.add( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): - await async_client.agents.child_agents.with_raw_response.add( - path_child_agent_uuid="", - path_parent_agent_uuid="parent_agent_uuid", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_view(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - child_agent = await async_client.agents.child_agents.view( - "uuid", - ) - assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_view(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.agents.child_agents.with_raw_response.view( - "uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - child_agent = await response.parse() - assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_view(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.agents.child_agents.with_streaming_response.view( - "uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - child_agent = await response.parse() - assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_view(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.agents.child_agents.with_raw_response.view( - "", - ) diff --git a/tests/api_resources/agents/test_functions.py b/tests/api_resources/agents/test_functions.py deleted file mode 100644 index d66590ba..00000000 --- a/tests/api_resources/agents/test_functions.py +++ /dev/null @@ -1,382 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.agents import ( - FunctionCreateResponse, - FunctionDeleteResponse, - FunctionUpdateResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestFunctions: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: - function = client.agents.functions.create( - path_agent_uuid="agent_uuid", - ) - assert_matches_type(FunctionCreateResponse, function, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - function = client.agents.functions.create( - path_agent_uuid="agent_uuid", - body_agent_uuid="agent_uuid", - description="description", - faas_name="faas_name", - faas_namespace="faas_namespace", - function_name="function_name", - input_schema={}, - output_schema={}, - ) - assert_matches_type(FunctionCreateResponse, function, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: - response = client.agents.functions.with_raw_response.create( - path_agent_uuid="agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - function = response.parse() - assert_matches_type(FunctionCreateResponse, function, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: - with client.agents.functions.with_streaming_response.create( - path_agent_uuid="agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - function = response.parse() - assert_matches_type(FunctionCreateResponse, function, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_create(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - client.agents.functions.with_raw_response.create( - path_agent_uuid="", - ) - - @pytest.mark.skip() - @parametrize - def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: - function = client.agents.functions.update( - path_function_uuid="function_uuid", - path_agent_uuid="agent_uuid", - ) - assert_matches_type(FunctionUpdateResponse, function, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - function = client.agents.functions.update( - path_function_uuid="function_uuid", - path_agent_uuid="agent_uuid", - body_agent_uuid="agent_uuid", - description="description", - faas_name="faas_name", - faas_namespace="faas_namespace", - function_name="function_name", - body_function_uuid="function_uuid", - input_schema={}, - output_schema={}, - ) - assert_matches_type(FunctionUpdateResponse, function, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: - response = client.agents.functions.with_raw_response.update( - path_function_uuid="function_uuid", - path_agent_uuid="agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - function = response.parse() - assert_matches_type(FunctionUpdateResponse, function, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: - with client.agents.functions.with_streaming_response.update( - path_function_uuid="function_uuid", - path_agent_uuid="agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - function = response.parse() - assert_matches_type(FunctionUpdateResponse, function, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - client.agents.functions.with_raw_response.update( - path_function_uuid="function_uuid", - path_agent_uuid="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_function_uuid` but received ''"): - client.agents.functions.with_raw_response.update( - path_function_uuid="", - path_agent_uuid="agent_uuid", - ) - - @pytest.mark.skip() - @parametrize - def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: - function = client.agents.functions.delete( - function_uuid="function_uuid", - agent_uuid="agent_uuid", - ) - assert_matches_type(FunctionDeleteResponse, function, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - response = client.agents.functions.with_raw_response.delete( - function_uuid="function_uuid", - agent_uuid="agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - function = response.parse() - assert_matches_type(FunctionDeleteResponse, function, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - with client.agents.functions.with_streaming_response.delete( - function_uuid="function_uuid", - agent_uuid="agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - function = response.parse() - assert_matches_type(FunctionDeleteResponse, function, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - client.agents.functions.with_raw_response.delete( - function_uuid="function_uuid", - agent_uuid="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `function_uuid` but received ''"): - client.agents.functions.with_raw_response.delete( - function_uuid="", - agent_uuid="agent_uuid", - ) - - -class TestAsyncFunctions: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - function = await async_client.agents.functions.create( - path_agent_uuid="agent_uuid", - ) - assert_matches_type(FunctionCreateResponse, function, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - function = await async_client.agents.functions.create( - path_agent_uuid="agent_uuid", - body_agent_uuid="agent_uuid", - description="description", - faas_name="faas_name", - faas_namespace="faas_namespace", - function_name="function_name", - input_schema={}, - output_schema={}, - ) - assert_matches_type(FunctionCreateResponse, function, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.agents.functions.with_raw_response.create( - path_agent_uuid="agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - function = await response.parse() - assert_matches_type(FunctionCreateResponse, function, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.agents.functions.with_streaming_response.create( - path_agent_uuid="agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - function = await response.parse() - assert_matches_type(FunctionCreateResponse, function, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - await async_client.agents.functions.with_raw_response.create( - path_agent_uuid="", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - function = await async_client.agents.functions.update( - path_function_uuid="function_uuid", - path_agent_uuid="agent_uuid", - ) - assert_matches_type(FunctionUpdateResponse, function, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - function = await async_client.agents.functions.update( - path_function_uuid="function_uuid", - path_agent_uuid="agent_uuid", - body_agent_uuid="agent_uuid", - description="description", - faas_name="faas_name", - faas_namespace="faas_namespace", - function_name="function_name", - body_function_uuid="function_uuid", - input_schema={}, - output_schema={}, - ) - assert_matches_type(FunctionUpdateResponse, function, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.agents.functions.with_raw_response.update( - path_function_uuid="function_uuid", - path_agent_uuid="agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - function = await response.parse() - assert_matches_type(FunctionUpdateResponse, function, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.agents.functions.with_streaming_response.update( - path_function_uuid="function_uuid", - path_agent_uuid="agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - function = await response.parse() - assert_matches_type(FunctionUpdateResponse, function, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - await async_client.agents.functions.with_raw_response.update( - path_function_uuid="function_uuid", - path_agent_uuid="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_function_uuid` but received ''"): - await async_client.agents.functions.with_raw_response.update( - path_function_uuid="", - path_agent_uuid="agent_uuid", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - function = await async_client.agents.functions.delete( - function_uuid="function_uuid", - agent_uuid="agent_uuid", - ) - assert_matches_type(FunctionDeleteResponse, function, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.agents.functions.with_raw_response.delete( - function_uuid="function_uuid", - agent_uuid="agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - function = await response.parse() - assert_matches_type(FunctionDeleteResponse, function, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.agents.functions.with_streaming_response.delete( - function_uuid="function_uuid", - agent_uuid="agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - function = await response.parse() - assert_matches_type(FunctionDeleteResponse, function, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - await async_client.agents.functions.with_raw_response.delete( - function_uuid="function_uuid", - agent_uuid="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `function_uuid` but received ''"): - await async_client.agents.functions.with_raw_response.delete( - function_uuid="", - agent_uuid="agent_uuid", - ) diff --git a/tests/api_resources/agents/test_knowledge_bases.py b/tests/api_resources/agents/test_knowledge_bases.py deleted file mode 100644 index b313b1af..00000000 --- a/tests/api_resources/agents/test_knowledge_bases.py +++ /dev/null @@ -1,314 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestKnowledgeBases: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_attach(self, client: DigitaloceanGenaiSDK) -> None: - knowledge_base = client.agents.knowledge_bases.attach( - "agent_uuid", - ) - assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_attach(self, client: DigitaloceanGenaiSDK) -> None: - response = client.agents.knowledge_bases.with_raw_response.attach( - "agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - knowledge_base = response.parse() - assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_attach(self, client: DigitaloceanGenaiSDK) -> None: - with client.agents.knowledge_bases.with_streaming_response.attach( - "agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - knowledge_base = response.parse() - assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_attach(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - client.agents.knowledge_bases.with_raw_response.attach( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_attach_single(self, client: DigitaloceanGenaiSDK) -> None: - knowledge_base = client.agents.knowledge_bases.attach_single( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="agent_uuid", - ) - assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_attach_single(self, client: DigitaloceanGenaiSDK) -> None: - response = client.agents.knowledge_bases.with_raw_response.attach_single( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - knowledge_base = response.parse() - assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_attach_single(self, client: DigitaloceanGenaiSDK) -> None: - with client.agents.knowledge_bases.with_streaming_response.attach_single( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - knowledge_base = response.parse() - assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_attach_single(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - client.agents.knowledge_bases.with_raw_response.attach_single( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): - client.agents.knowledge_bases.with_raw_response.attach_single( - knowledge_base_uuid="", - agent_uuid="agent_uuid", - ) - - @pytest.mark.skip() - @parametrize - def test_method_detach(self, client: DigitaloceanGenaiSDK) -> None: - knowledge_base = client.agents.knowledge_bases.detach( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="agent_uuid", - ) - assert_matches_type(KnowledgeBaseDetachResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_detach(self, client: DigitaloceanGenaiSDK) -> None: - response = client.agents.knowledge_bases.with_raw_response.detach( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - knowledge_base = response.parse() - assert_matches_type(KnowledgeBaseDetachResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_detach(self, client: DigitaloceanGenaiSDK) -> None: - with client.agents.knowledge_bases.with_streaming_response.detach( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - knowledge_base = response.parse() - assert_matches_type(KnowledgeBaseDetachResponse, knowledge_base, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_detach(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - client.agents.knowledge_bases.with_raw_response.detach( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): - client.agents.knowledge_bases.with_raw_response.detach( - knowledge_base_uuid="", - agent_uuid="agent_uuid", - ) - - -class TestAsyncKnowledgeBases: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_attach(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - knowledge_base = await async_client.agents.knowledge_bases.attach( - "agent_uuid", - ) - assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_attach(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.agents.knowledge_bases.with_raw_response.attach( - "agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - knowledge_base = await response.parse() - assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_attach(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.agents.knowledge_bases.with_streaming_response.attach( - "agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - knowledge_base = await response.parse() - assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_attach(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - await async_client.agents.knowledge_bases.with_raw_response.attach( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_attach_single(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - knowledge_base = await async_client.agents.knowledge_bases.attach_single( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="agent_uuid", - ) - assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_attach_single(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.agents.knowledge_bases.with_raw_response.attach_single( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - knowledge_base = await response.parse() - assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_attach_single(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.agents.knowledge_bases.with_streaming_response.attach_single( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - knowledge_base = await response.parse() - assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_attach_single(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - await async_client.agents.knowledge_bases.with_raw_response.attach_single( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): - await async_client.agents.knowledge_bases.with_raw_response.attach_single( - knowledge_base_uuid="", - agent_uuid="agent_uuid", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_detach(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - knowledge_base = await async_client.agents.knowledge_bases.detach( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="agent_uuid", - ) - assert_matches_type(KnowledgeBaseDetachResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_detach(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.agents.knowledge_bases.with_raw_response.detach( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - knowledge_base = await response.parse() - assert_matches_type(KnowledgeBaseDetachResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_detach(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.agents.knowledge_bases.with_streaming_response.detach( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - knowledge_base = await response.parse() - assert_matches_type(KnowledgeBaseDetachResponse, knowledge_base, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_detach(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - await async_client.agents.knowledge_bases.with_raw_response.detach( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): - await async_client.agents.knowledge_bases.with_raw_response.detach( - knowledge_base_uuid="", - agent_uuid="agent_uuid", - ) diff --git a/tests/api_resources/agents/test_versions.py b/tests/api_resources/agents/test_versions.py index 94f02d8c..77fee4c6 100644 --- a/tests/api_resources/agents/test_versions.py +++ b/tests/api_resources/agents/test_versions.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.agents import ( +from gradientai.types.agents import ( VersionListResponse, VersionUpdateResponse, ) @@ -22,7 +22,7 @@ class TestVersions: @pytest.mark.skip() @parametrize - def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: + def test_method_update(self, client: GradientAI) -> None: version = client.agents.versions.update( path_uuid="uuid", ) @@ -30,7 +30,7 @@ def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize - def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + def test_method_update_with_all_params(self, client: GradientAI) -> None: version = client.agents.versions.update( path_uuid="uuid", body_uuid="uuid", @@ -40,7 +40,7 @@ def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> No @pytest.mark.skip() @parametrize - def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: + def test_raw_response_update(self, client: GradientAI) -> None: response = client.agents.versions.with_raw_response.update( path_uuid="uuid", ) @@ -52,7 +52,7 @@ def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: + def test_streaming_response_update(self, client: GradientAI) -> None: with client.agents.versions.with_streaming_response.update( path_uuid="uuid", ) as response: @@ -66,7 +66,7 @@ def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize - def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: + def test_path_params_update(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): client.agents.versions.with_raw_response.update( path_uuid="", @@ -74,7 +74,7 @@ def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize - def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: + def test_method_list(self, client: GradientAI) -> None: version = client.agents.versions.list( uuid="uuid", ) @@ -82,7 +82,7 @@ def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize - def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + def test_method_list_with_all_params(self, client: GradientAI) -> None: version = client.agents.versions.list( uuid="uuid", page=0, @@ -92,7 +92,7 @@ def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None @pytest.mark.skip() @parametrize - def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: + def test_raw_response_list(self, client: GradientAI) -> None: response = client.agents.versions.with_raw_response.list( uuid="uuid", ) @@ -104,7 +104,7 @@ def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: + def test_streaming_response_list(self, client: GradientAI) -> None: with client.agents.versions.with_streaming_response.list( uuid="uuid", ) as response: @@ -118,7 +118,7 @@ def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize - def test_path_params_list(self, client: DigitaloceanGenaiSDK) -> None: + def test_path_params_list(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): client.agents.versions.with_raw_response.list( uuid="", @@ -130,7 +130,7 @@ class TestAsyncVersions: @pytest.mark.skip() @parametrize - async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async def test_method_update(self, async_client: AsyncGradientAI) -> None: version = await async_client.agents.versions.update( path_uuid="uuid", ) @@ -138,7 +138,7 @@ async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> N @pytest.mark.skip() @parametrize - async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: version = await async_client.agents.versions.update( path_uuid="uuid", body_uuid="uuid", @@ -148,7 +148,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncDigitaloce @pytest.mark.skip() @parametrize - async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.versions.with_raw_response.update( path_uuid="uuid", ) @@ -160,7 +160,7 @@ async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK @pytest.mark.skip() @parametrize - async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.versions.with_streaming_response.update( path_uuid="uuid", ) as response: @@ -174,7 +174,7 @@ async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGe @pytest.mark.skip() @parametrize - async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): await async_client.agents.versions.with_raw_response.update( path_uuid="", @@ -182,7 +182,7 @@ async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) @pytest.mark.skip() @parametrize - async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async def test_method_list(self, async_client: AsyncGradientAI) -> None: version = await async_client.agents.versions.list( uuid="uuid", ) @@ -190,7 +190,7 @@ async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> Non @pytest.mark.skip() @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: version = await async_client.agents.versions.list( uuid="uuid", page=0, @@ -200,7 +200,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncDigitalocean @pytest.mark.skip() @parametrize - async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.versions.with_raw_response.list( uuid="uuid", ) @@ -212,7 +212,7 @@ async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) @pytest.mark.skip() @parametrize - async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.versions.with_streaming_response.list( uuid="uuid", ) as response: @@ -226,7 +226,7 @@ async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGena @pytest.mark.skip() @parametrize - async def test_path_params_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async def test_path_params_list(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): await async_client.agents.versions.with_raw_response.list( uuid="", diff --git a/tests/api_resources/api_keys/test_api_keys_.py b/tests/api_resources/api_keys/test_api_keys_.py deleted file mode 100644 index 0ae74d6b..00000000 --- a/tests/api_resources/api_keys/test_api_keys_.py +++ /dev/null @@ -1,446 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.api_keys import ( - APIKeyListResponse, - APIKeyCreateResponse, - APIKeyDeleteResponse, - APIKeyUpdateResponse, - APIKeyUpdateRegenerateResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestAPIKeys: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.api_keys.api_keys.create() - assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.api_keys.api_keys.create( - name="name", - ) - assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: - response = client.api_keys.api_keys.with_raw_response.create() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = response.parse() - assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: - with client.api_keys.api_keys.with_streaming_response.create() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = response.parse() - assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.api_keys.api_keys.update( - path_api_key_uuid="api_key_uuid", - ) - assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.api_keys.api_keys.update( - path_api_key_uuid="api_key_uuid", - body_api_key_uuid="api_key_uuid", - name="name", - ) - assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: - response = client.api_keys.api_keys.with_raw_response.update( - path_api_key_uuid="api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = response.parse() - assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: - with client.api_keys.api_keys.with_streaming_response.update( - path_api_key_uuid="api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = response.parse() - assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - client.api_keys.api_keys.with_raw_response.update( - path_api_key_uuid="", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.api_keys.api_keys.list() - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.api_keys.api_keys.list( - page=0, - per_page=0, - ) - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: - response = client.api_keys.api_keys.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = response.parse() - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: - with client.api_keys.api_keys.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = response.parse() - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.api_keys.api_keys.delete( - "api_key_uuid", - ) - assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - response = client.api_keys.api_keys.with_raw_response.delete( - "api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = response.parse() - assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - with client.api_keys.api_keys.with_streaming_response.delete( - "api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = response.parse() - assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.api_keys.api_keys.with_raw_response.delete( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_update_regenerate(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.api_keys.api_keys.update_regenerate( - "api_key_uuid", - ) - assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_update_regenerate(self, client: DigitaloceanGenaiSDK) -> None: - response = client.api_keys.api_keys.with_raw_response.update_regenerate( - "api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = response.parse() - assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_update_regenerate(self, client: DigitaloceanGenaiSDK) -> None: - with client.api_keys.api_keys.with_streaming_response.update_regenerate( - "api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = response.parse() - assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_update_regenerate(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.api_keys.api_keys.with_raw_response.update_regenerate( - "", - ) - - -class TestAsyncAPIKeys: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.api_keys.api_keys.create() - assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.api_keys.api_keys.create( - name="name", - ) - assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.api_keys.api_keys.with_raw_response.create() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = await response.parse() - assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.api_keys.api_keys.with_streaming_response.create() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = await response.parse() - assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.api_keys.api_keys.update( - path_api_key_uuid="api_key_uuid", - ) - assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.api_keys.api_keys.update( - path_api_key_uuid="api_key_uuid", - body_api_key_uuid="api_key_uuid", - name="name", - ) - assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.api_keys.api_keys.with_raw_response.update( - path_api_key_uuid="api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = await response.parse() - assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.api_keys.api_keys.with_streaming_response.update( - path_api_key_uuid="api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = await response.parse() - assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - await async_client.api_keys.api_keys.with_raw_response.update( - path_api_key_uuid="", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.api_keys.api_keys.list() - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.api_keys.api_keys.list( - page=0, - per_page=0, - ) - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.api_keys.api_keys.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = await response.parse() - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.api_keys.api_keys.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = await response.parse() - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.api_keys.api_keys.delete( - "api_key_uuid", - ) - assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.api_keys.api_keys.with_raw_response.delete( - "api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = await response.parse() - assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.api_keys.api_keys.with_streaming_response.delete( - "api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = await response.parse() - assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.api_keys.api_keys.with_raw_response.delete( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_update_regenerate(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.api_keys.api_keys.update_regenerate( - "api_key_uuid", - ) - assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_update_regenerate(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.api_keys.api_keys.with_raw_response.update_regenerate( - "api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = await response.parse() - assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_update_regenerate(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.api_keys.api_keys.with_streaming_response.update_regenerate( - "api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = await response.parse() - assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_update_regenerate(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.api_keys.api_keys.with_raw_response.update_regenerate( - "", - ) diff --git a/tests/api_resources/auth/agents/test_token.py b/tests/api_resources/auth/agents/test_token.py deleted file mode 100644 index 1e505ccd..00000000 --- a/tests/api_resources/auth/agents/test_token.py +++ /dev/null @@ -1,124 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.auth.agents import TokenCreateResponse - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestToken: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: - token = client.auth.agents.token.create( - path_agent_uuid="agent_uuid", - ) - assert_matches_type(TokenCreateResponse, token, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - token = client.auth.agents.token.create( - path_agent_uuid="agent_uuid", - body_agent_uuid="agent_uuid", - ) - assert_matches_type(TokenCreateResponse, token, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: - response = client.auth.agents.token.with_raw_response.create( - path_agent_uuid="agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - token = response.parse() - assert_matches_type(TokenCreateResponse, token, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: - with client.auth.agents.token.with_streaming_response.create( - path_agent_uuid="agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - token = response.parse() - assert_matches_type(TokenCreateResponse, token, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_create(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - client.auth.agents.token.with_raw_response.create( - path_agent_uuid="", - ) - - -class TestAsyncToken: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - token = await async_client.auth.agents.token.create( - path_agent_uuid="agent_uuid", - ) - assert_matches_type(TokenCreateResponse, token, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - token = await async_client.auth.agents.token.create( - path_agent_uuid="agent_uuid", - body_agent_uuid="agent_uuid", - ) - assert_matches_type(TokenCreateResponse, token, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.auth.agents.token.with_raw_response.create( - path_agent_uuid="agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - token = await response.parse() - assert_matches_type(TokenCreateResponse, token, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.auth.agents.token.with_streaming_response.create( - path_agent_uuid="agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - token = await response.parse() - assert_matches_type(TokenCreateResponse, token, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - await async_client.auth.agents.token.with_raw_response.create( - path_agent_uuid="", - ) diff --git a/tests/api_resources/knowledge_bases/test_data_sources.py b/tests/api_resources/knowledge_bases/test_data_sources.py deleted file mode 100644 index 68fd67e5..00000000 --- a/tests/api_resources/knowledge_bases/test_data_sources.py +++ /dev/null @@ -1,374 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.knowledge_bases import ( - DataSourceListResponse, - DataSourceCreateResponse, - DataSourceDeleteResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestDataSources: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: - data_source = client.knowledge_bases.data_sources.create( - path_knowledge_base_uuid="knowledge_base_uuid", - ) - assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - data_source = client.knowledge_bases.data_sources.create( - path_knowledge_base_uuid="knowledge_base_uuid", - aws_data_source={ - "bucket_name": "bucket_name", - "item_path": "item_path", - "key_id": "key_id", - "region": "region", - "secret_key": "secret_key", - }, - body_knowledge_base_uuid="knowledge_base_uuid", - spaces_data_source={ - "bucket_name": "bucket_name", - "item_path": "item_path", - "region": "region", - }, - web_crawler_data_source={ - "base_url": "base_url", - "crawling_option": "UNKNOWN", - "embed_media": True, - }, - ) - assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: - response = client.knowledge_bases.data_sources.with_raw_response.create( - path_knowledge_base_uuid="knowledge_base_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - data_source = response.parse() - assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: - with client.knowledge_bases.data_sources.with_streaming_response.create( - path_knowledge_base_uuid="knowledge_base_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - data_source = response.parse() - assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_create(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises( - ValueError, match=r"Expected a non-empty value for `path_knowledge_base_uuid` but received ''" - ): - client.knowledge_bases.data_sources.with_raw_response.create( - path_knowledge_base_uuid="", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: - data_source = client.knowledge_bases.data_sources.list( - knowledge_base_uuid="knowledge_base_uuid", - ) - assert_matches_type(DataSourceListResponse, data_source, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - data_source = client.knowledge_bases.data_sources.list( - knowledge_base_uuid="knowledge_base_uuid", - page=0, - per_page=0, - ) - assert_matches_type(DataSourceListResponse, data_source, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: - response = client.knowledge_bases.data_sources.with_raw_response.list( - knowledge_base_uuid="knowledge_base_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - data_source = response.parse() - assert_matches_type(DataSourceListResponse, data_source, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: - with client.knowledge_bases.data_sources.with_streaming_response.list( - knowledge_base_uuid="knowledge_base_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - data_source = response.parse() - assert_matches_type(DataSourceListResponse, data_source, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_list(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): - client.knowledge_bases.data_sources.with_raw_response.list( - knowledge_base_uuid="", - ) - - @pytest.mark.skip() - @parametrize - def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: - data_source = client.knowledge_bases.data_sources.delete( - data_source_uuid="data_source_uuid", - knowledge_base_uuid="knowledge_base_uuid", - ) - assert_matches_type(DataSourceDeleteResponse, data_source, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - response = client.knowledge_bases.data_sources.with_raw_response.delete( - data_source_uuid="data_source_uuid", - knowledge_base_uuid="knowledge_base_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - data_source = response.parse() - assert_matches_type(DataSourceDeleteResponse, data_source, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - with client.knowledge_bases.data_sources.with_streaming_response.delete( - data_source_uuid="data_source_uuid", - knowledge_base_uuid="knowledge_base_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - data_source = response.parse() - assert_matches_type(DataSourceDeleteResponse, data_source, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): - client.knowledge_bases.data_sources.with_raw_response.delete( - data_source_uuid="data_source_uuid", - knowledge_base_uuid="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `data_source_uuid` but received ''"): - client.knowledge_bases.data_sources.with_raw_response.delete( - data_source_uuid="", - knowledge_base_uuid="knowledge_base_uuid", - ) - - -class TestAsyncDataSources: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - data_source = await async_client.knowledge_bases.data_sources.create( - path_knowledge_base_uuid="knowledge_base_uuid", - ) - assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - data_source = await async_client.knowledge_bases.data_sources.create( - path_knowledge_base_uuid="knowledge_base_uuid", - aws_data_source={ - "bucket_name": "bucket_name", - "item_path": "item_path", - "key_id": "key_id", - "region": "region", - "secret_key": "secret_key", - }, - body_knowledge_base_uuid="knowledge_base_uuid", - spaces_data_source={ - "bucket_name": "bucket_name", - "item_path": "item_path", - "region": "region", - }, - web_crawler_data_source={ - "base_url": "base_url", - "crawling_option": "UNKNOWN", - "embed_media": True, - }, - ) - assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.knowledge_bases.data_sources.with_raw_response.create( - path_knowledge_base_uuid="knowledge_base_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - data_source = await response.parse() - assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.knowledge_bases.data_sources.with_streaming_response.create( - path_knowledge_base_uuid="knowledge_base_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - data_source = await response.parse() - assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises( - ValueError, match=r"Expected a non-empty value for `path_knowledge_base_uuid` but received ''" - ): - await async_client.knowledge_bases.data_sources.with_raw_response.create( - path_knowledge_base_uuid="", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - data_source = await async_client.knowledge_bases.data_sources.list( - knowledge_base_uuid="knowledge_base_uuid", - ) - assert_matches_type(DataSourceListResponse, data_source, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - data_source = await async_client.knowledge_bases.data_sources.list( - knowledge_base_uuid="knowledge_base_uuid", - page=0, - per_page=0, - ) - assert_matches_type(DataSourceListResponse, data_source, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.knowledge_bases.data_sources.with_raw_response.list( - knowledge_base_uuid="knowledge_base_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - data_source = await response.parse() - assert_matches_type(DataSourceListResponse, data_source, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.knowledge_bases.data_sources.with_streaming_response.list( - knowledge_base_uuid="knowledge_base_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - data_source = await response.parse() - assert_matches_type(DataSourceListResponse, data_source, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): - await async_client.knowledge_bases.data_sources.with_raw_response.list( - knowledge_base_uuid="", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - data_source = await async_client.knowledge_bases.data_sources.delete( - data_source_uuid="data_source_uuid", - knowledge_base_uuid="knowledge_base_uuid", - ) - assert_matches_type(DataSourceDeleteResponse, data_source, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.knowledge_bases.data_sources.with_raw_response.delete( - data_source_uuid="data_source_uuid", - knowledge_base_uuid="knowledge_base_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - data_source = await response.parse() - assert_matches_type(DataSourceDeleteResponse, data_source, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.knowledge_bases.data_sources.with_streaming_response.delete( - data_source_uuid="data_source_uuid", - knowledge_base_uuid="knowledge_base_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - data_source = await response.parse() - assert_matches_type(DataSourceDeleteResponse, data_source, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): - await async_client.knowledge_bases.data_sources.with_raw_response.delete( - data_source_uuid="data_source_uuid", - knowledge_base_uuid="", - ) - - with pytest.raises(ValueError, match=r"Expected a non-empty value for `data_source_uuid` but received ''"): - await async_client.knowledge_bases.data_sources.with_raw_response.delete( - data_source_uuid="", - knowledge_base_uuid="knowledge_base_uuid", - ) diff --git a/tests/api_resources/providers/__init__.py b/tests/api_resources/providers/__init__.py deleted file mode 100644 index fd8019a9..00000000 --- a/tests/api_resources/providers/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/providers/anthropic/__init__.py b/tests/api_resources/providers/anthropic/__init__.py deleted file mode 100644 index fd8019a9..00000000 --- a/tests/api_resources/providers/anthropic/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/providers/anthropic/test_keys.py b/tests/api_resources/providers/anthropic/test_keys.py deleted file mode 100644 index c5491bd4..00000000 --- a/tests/api_resources/providers/anthropic/test_keys.py +++ /dev/null @@ -1,555 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.providers.anthropic import ( - KeyListResponse, - KeyCreateResponse, - KeyDeleteResponse, - KeyUpdateResponse, - KeyRetrieveResponse, - KeyListAgentsResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestKeys: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: - key = client.providers.anthropic.keys.create() - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - key = client.providers.anthropic.keys.create( - api_key="api_key", - name="name", - ) - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: - response = client.providers.anthropic.keys.with_raw_response.create() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: - with client.providers.anthropic.keys.with_streaming_response.create() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = response.parse() - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - key = client.providers.anthropic.keys.retrieve( - "api_key_uuid", - ) - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - response = client.providers.anthropic.keys.with_raw_response.retrieve( - "api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with client.providers.anthropic.keys.with_streaming_response.retrieve( - "api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = response.parse() - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.providers.anthropic.keys.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: - key = client.providers.anthropic.keys.update( - path_api_key_uuid="api_key_uuid", - ) - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - key = client.providers.anthropic.keys.update( - path_api_key_uuid="api_key_uuid", - api_key="api_key", - body_api_key_uuid="api_key_uuid", - name="name", - ) - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: - response = client.providers.anthropic.keys.with_raw_response.update( - path_api_key_uuid="api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: - with client.providers.anthropic.keys.with_streaming_response.update( - path_api_key_uuid="api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = response.parse() - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - client.providers.anthropic.keys.with_raw_response.update( - path_api_key_uuid="", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: - key = client.providers.anthropic.keys.list() - assert_matches_type(KeyListResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - key = client.providers.anthropic.keys.list( - page=0, - per_page=0, - ) - assert_matches_type(KeyListResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: - response = client.providers.anthropic.keys.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyListResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: - with client.providers.anthropic.keys.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = response.parse() - assert_matches_type(KeyListResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: - key = client.providers.anthropic.keys.delete( - "api_key_uuid", - ) - assert_matches_type(KeyDeleteResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - response = client.providers.anthropic.keys.with_raw_response.delete( - "api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyDeleteResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - with client.providers.anthropic.keys.with_streaming_response.delete( - "api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = response.parse() - assert_matches_type(KeyDeleteResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.providers.anthropic.keys.with_raw_response.delete( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list_agents(self, client: DigitaloceanGenaiSDK) -> None: - key = client.providers.anthropic.keys.list_agents( - uuid="uuid", - ) - assert_matches_type(KeyListAgentsResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_agents_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - key = client.providers.anthropic.keys.list_agents( - uuid="uuid", - page=0, - per_page=0, - ) - assert_matches_type(KeyListAgentsResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list_agents(self, client: DigitaloceanGenaiSDK) -> None: - response = client.providers.anthropic.keys.with_raw_response.list_agents( - uuid="uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyListAgentsResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list_agents(self, client: DigitaloceanGenaiSDK) -> None: - with client.providers.anthropic.keys.with_streaming_response.list_agents( - uuid="uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = response.parse() - assert_matches_type(KeyListAgentsResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_list_agents(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.providers.anthropic.keys.with_raw_response.list_agents( - uuid="", - ) - - -class TestAsyncKeys: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.providers.anthropic.keys.create() - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.providers.anthropic.keys.create( - api_key="api_key", - name="name", - ) - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.providers.anthropic.keys.with_raw_response.create() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.providers.anthropic.keys.with_streaming_response.create() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = await response.parse() - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.providers.anthropic.keys.retrieve( - "api_key_uuid", - ) - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.providers.anthropic.keys.with_raw_response.retrieve( - "api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.providers.anthropic.keys.with_streaming_response.retrieve( - "api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = await response.parse() - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.providers.anthropic.keys.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.providers.anthropic.keys.update( - path_api_key_uuid="api_key_uuid", - ) - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.providers.anthropic.keys.update( - path_api_key_uuid="api_key_uuid", - api_key="api_key", - body_api_key_uuid="api_key_uuid", - name="name", - ) - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.providers.anthropic.keys.with_raw_response.update( - path_api_key_uuid="api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.providers.anthropic.keys.with_streaming_response.update( - path_api_key_uuid="api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = await response.parse() - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - await async_client.providers.anthropic.keys.with_raw_response.update( - path_api_key_uuid="", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.providers.anthropic.keys.list() - assert_matches_type(KeyListResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.providers.anthropic.keys.list( - page=0, - per_page=0, - ) - assert_matches_type(KeyListResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.providers.anthropic.keys.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyListResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.providers.anthropic.keys.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = await response.parse() - assert_matches_type(KeyListResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.providers.anthropic.keys.delete( - "api_key_uuid", - ) - assert_matches_type(KeyDeleteResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.providers.anthropic.keys.with_raw_response.delete( - "api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyDeleteResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.providers.anthropic.keys.with_streaming_response.delete( - "api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = await response.parse() - assert_matches_type(KeyDeleteResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.providers.anthropic.keys.with_raw_response.delete( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list_agents(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.providers.anthropic.keys.list_agents( - uuid="uuid", - ) - assert_matches_type(KeyListAgentsResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_agents_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.providers.anthropic.keys.list_agents( - uuid="uuid", - page=0, - per_page=0, - ) - assert_matches_type(KeyListAgentsResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list_agents(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.providers.anthropic.keys.with_raw_response.list_agents( - uuid="uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyListAgentsResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list_agents(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.providers.anthropic.keys.with_streaming_response.list_agents( - uuid="uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = await response.parse() - assert_matches_type(KeyListAgentsResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_list_agents(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.providers.anthropic.keys.with_raw_response.list_agents( - uuid="", - ) diff --git a/tests/api_resources/providers/openai/__init__.py b/tests/api_resources/providers/openai/__init__.py deleted file mode 100644 index fd8019a9..00000000 --- a/tests/api_resources/providers/openai/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/providers/openai/test_keys.py b/tests/api_resources/providers/openai/test_keys.py deleted file mode 100644 index b88b6a5f..00000000 --- a/tests/api_resources/providers/openai/test_keys.py +++ /dev/null @@ -1,555 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types.providers.openai import ( - KeyListResponse, - KeyCreateResponse, - KeyDeleteResponse, - KeyUpdateResponse, - KeyRetrieveResponse, - KeyRetrieveAgentsResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestKeys: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: - key = client.providers.openai.keys.create() - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - key = client.providers.openai.keys.create( - api_key="api_key", - name="name", - ) - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: - response = client.providers.openai.keys.with_raw_response.create() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: - with client.providers.openai.keys.with_streaming_response.create() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = response.parse() - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - key = client.providers.openai.keys.retrieve( - "api_key_uuid", - ) - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - response = client.providers.openai.keys.with_raw_response.retrieve( - "api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with client.providers.openai.keys.with_streaming_response.retrieve( - "api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = response.parse() - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.providers.openai.keys.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: - key = client.providers.openai.keys.update( - path_api_key_uuid="api_key_uuid", - ) - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - key = client.providers.openai.keys.update( - path_api_key_uuid="api_key_uuid", - api_key="api_key", - body_api_key_uuid="api_key_uuid", - name="name", - ) - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: - response = client.providers.openai.keys.with_raw_response.update( - path_api_key_uuid="api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: - with client.providers.openai.keys.with_streaming_response.update( - path_api_key_uuid="api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = response.parse() - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - client.providers.openai.keys.with_raw_response.update( - path_api_key_uuid="", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: - key = client.providers.openai.keys.list() - assert_matches_type(KeyListResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - key = client.providers.openai.keys.list( - page=0, - per_page=0, - ) - assert_matches_type(KeyListResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: - response = client.providers.openai.keys.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyListResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: - with client.providers.openai.keys.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = response.parse() - assert_matches_type(KeyListResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: - key = client.providers.openai.keys.delete( - "api_key_uuid", - ) - assert_matches_type(KeyDeleteResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - response = client.providers.openai.keys.with_raw_response.delete( - "api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyDeleteResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - with client.providers.openai.keys.with_streaming_response.delete( - "api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = response.parse() - assert_matches_type(KeyDeleteResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.providers.openai.keys.with_raw_response.delete( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_retrieve_agents(self, client: DigitaloceanGenaiSDK) -> None: - key = client.providers.openai.keys.retrieve_agents( - uuid="uuid", - ) - assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_retrieve_agents_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - key = client.providers.openai.keys.retrieve_agents( - uuid="uuid", - page=0, - per_page=0, - ) - assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve_agents(self, client: DigitaloceanGenaiSDK) -> None: - response = client.providers.openai.keys.with_raw_response.retrieve_agents( - uuid="uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve_agents(self, client: DigitaloceanGenaiSDK) -> None: - with client.providers.openai.keys.with_streaming_response.retrieve_agents( - uuid="uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = response.parse() - assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve_agents(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.providers.openai.keys.with_raw_response.retrieve_agents( - uuid="", - ) - - -class TestAsyncKeys: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.providers.openai.keys.create() - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.providers.openai.keys.create( - api_key="api_key", - name="name", - ) - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.providers.openai.keys.with_raw_response.create() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.providers.openai.keys.with_streaming_response.create() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = await response.parse() - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.providers.openai.keys.retrieve( - "api_key_uuid", - ) - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.providers.openai.keys.with_raw_response.retrieve( - "api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.providers.openai.keys.with_streaming_response.retrieve( - "api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = await response.parse() - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.providers.openai.keys.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.providers.openai.keys.update( - path_api_key_uuid="api_key_uuid", - ) - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.providers.openai.keys.update( - path_api_key_uuid="api_key_uuid", - api_key="api_key", - body_api_key_uuid="api_key_uuid", - name="name", - ) - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.providers.openai.keys.with_raw_response.update( - path_api_key_uuid="api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.providers.openai.keys.with_streaming_response.update( - path_api_key_uuid="api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = await response.parse() - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - await async_client.providers.openai.keys.with_raw_response.update( - path_api_key_uuid="", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.providers.openai.keys.list() - assert_matches_type(KeyListResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.providers.openai.keys.list( - page=0, - per_page=0, - ) - assert_matches_type(KeyListResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.providers.openai.keys.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyListResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.providers.openai.keys.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = await response.parse() - assert_matches_type(KeyListResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.providers.openai.keys.delete( - "api_key_uuid", - ) - assert_matches_type(KeyDeleteResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.providers.openai.keys.with_raw_response.delete( - "api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyDeleteResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.providers.openai.keys.with_streaming_response.delete( - "api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = await response.parse() - assert_matches_type(KeyDeleteResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.providers.openai.keys.with_raw_response.delete( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve_agents(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.providers.openai.keys.retrieve_agents( - uuid="uuid", - ) - assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve_agents_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - key = await async_client.providers.openai.keys.retrieve_agents( - uuid="uuid", - page=0, - per_page=0, - ) - assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve_agents(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.providers.openai.keys.with_raw_response.retrieve_agents( - uuid="uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve_agents(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.providers.openai.keys.with_streaming_response.retrieve_agents( - uuid="uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = await response.parse() - assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve_agents(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.providers.openai.keys.with_raw_response.retrieve_agents( - uuid="", - ) diff --git a/tests/api_resources/test_agents.py b/tests/api_resources/test_agents.py deleted file mode 100644 index 3aafae23..00000000 --- a/tests/api_resources/test_agents.py +++ /dev/null @@ -1,597 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types import ( - AgentListResponse, - AgentCreateResponse, - AgentDeleteResponse, - AgentUpdateResponse, - AgentRetrieveResponse, - AgentUpdateStatusResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestAgents: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: - agent = client.agents.create() - assert_matches_type(AgentCreateResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - agent = client.agents.create( - anthropic_key_uuid="anthropic_key_uuid", - description="description", - instruction="instruction", - knowledge_base_uuid=["string"], - model_uuid="model_uuid", - name="name", - openai_key_uuid="open_ai_key_uuid", - project_id="project_id", - region="region", - tags=["string"], - ) - assert_matches_type(AgentCreateResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: - response = client.agents.with_raw_response.create() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = response.parse() - assert_matches_type(AgentCreateResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: - with client.agents.with_streaming_response.create() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - agent = response.parse() - assert_matches_type(AgentCreateResponse, agent, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - agent = client.agents.retrieve( - "uuid", - ) - assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - response = client.agents.with_raw_response.retrieve( - "uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = response.parse() - assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with client.agents.with_streaming_response.retrieve( - "uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - agent = response.parse() - assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.agents.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: - agent = client.agents.update( - path_uuid="uuid", - ) - assert_matches_type(AgentUpdateResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - agent = client.agents.update( - path_uuid="uuid", - anthropic_key_uuid="anthropic_key_uuid", - description="description", - instruction="instruction", - k=0, - max_tokens=0, - model_uuid="model_uuid", - name="name", - openai_key_uuid="open_ai_key_uuid", - project_id="project_id", - provide_citations=True, - retrieval_method="RETRIEVAL_METHOD_UNKNOWN", - tags=["string"], - temperature=0, - top_p=0, - body_uuid="uuid", - ) - assert_matches_type(AgentUpdateResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: - response = client.agents.with_raw_response.update( - path_uuid="uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = response.parse() - assert_matches_type(AgentUpdateResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: - with client.agents.with_streaming_response.update( - path_uuid="uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - agent = response.parse() - assert_matches_type(AgentUpdateResponse, agent, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - client.agents.with_raw_response.update( - path_uuid="", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: - agent = client.agents.list() - assert_matches_type(AgentListResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - agent = client.agents.list( - only_deployed=True, - page=0, - per_page=0, - ) - assert_matches_type(AgentListResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: - response = client.agents.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = response.parse() - assert_matches_type(AgentListResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: - with client.agents.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - agent = response.parse() - assert_matches_type(AgentListResponse, agent, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: - agent = client.agents.delete( - "uuid", - ) - assert_matches_type(AgentDeleteResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - response = client.agents.with_raw_response.delete( - "uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = response.parse() - assert_matches_type(AgentDeleteResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - with client.agents.with_streaming_response.delete( - "uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - agent = response.parse() - assert_matches_type(AgentDeleteResponse, agent, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.agents.with_raw_response.delete( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_update_status(self, client: DigitaloceanGenaiSDK) -> None: - agent = client.agents.update_status( - path_uuid="uuid", - ) - assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_update_status_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - agent = client.agents.update_status( - path_uuid="uuid", - body_uuid="uuid", - visibility="VISIBILITY_UNKNOWN", - ) - assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_update_status(self, client: DigitaloceanGenaiSDK) -> None: - response = client.agents.with_raw_response.update_status( - path_uuid="uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = response.parse() - assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_update_status(self, client: DigitaloceanGenaiSDK) -> None: - with client.agents.with_streaming_response.update_status( - path_uuid="uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - agent = response.parse() - assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_update_status(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - client.agents.with_raw_response.update_status( - path_uuid="", - ) - - -class TestAsyncAgents: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - agent = await async_client.agents.create() - assert_matches_type(AgentCreateResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - agent = await async_client.agents.create( - anthropic_key_uuid="anthropic_key_uuid", - description="description", - instruction="instruction", - knowledge_base_uuid=["string"], - model_uuid="model_uuid", - name="name", - openai_key_uuid="open_ai_key_uuid", - project_id="project_id", - region="region", - tags=["string"], - ) - assert_matches_type(AgentCreateResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.agents.with_raw_response.create() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = await response.parse() - assert_matches_type(AgentCreateResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.agents.with_streaming_response.create() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - agent = await response.parse() - assert_matches_type(AgentCreateResponse, agent, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - agent = await async_client.agents.retrieve( - "uuid", - ) - assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.agents.with_raw_response.retrieve( - "uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = await response.parse() - assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.agents.with_streaming_response.retrieve( - "uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - agent = await response.parse() - assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.agents.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - agent = await async_client.agents.update( - path_uuid="uuid", - ) - assert_matches_type(AgentUpdateResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - agent = await async_client.agents.update( - path_uuid="uuid", - anthropic_key_uuid="anthropic_key_uuid", - description="description", - instruction="instruction", - k=0, - max_tokens=0, - model_uuid="model_uuid", - name="name", - openai_key_uuid="open_ai_key_uuid", - project_id="project_id", - provide_citations=True, - retrieval_method="RETRIEVAL_METHOD_UNKNOWN", - tags=["string"], - temperature=0, - top_p=0, - body_uuid="uuid", - ) - assert_matches_type(AgentUpdateResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.agents.with_raw_response.update( - path_uuid="uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = await response.parse() - assert_matches_type(AgentUpdateResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.agents.with_streaming_response.update( - path_uuid="uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - agent = await response.parse() - assert_matches_type(AgentUpdateResponse, agent, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - await async_client.agents.with_raw_response.update( - path_uuid="", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - agent = await async_client.agents.list() - assert_matches_type(AgentListResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - agent = await async_client.agents.list( - only_deployed=True, - page=0, - per_page=0, - ) - assert_matches_type(AgentListResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.agents.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = await response.parse() - assert_matches_type(AgentListResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.agents.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - agent = await response.parse() - assert_matches_type(AgentListResponse, agent, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - agent = await async_client.agents.delete( - "uuid", - ) - assert_matches_type(AgentDeleteResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.agents.with_raw_response.delete( - "uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = await response.parse() - assert_matches_type(AgentDeleteResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.agents.with_streaming_response.delete( - "uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - agent = await response.parse() - assert_matches_type(AgentDeleteResponse, agent, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.agents.with_raw_response.delete( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_update_status(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - agent = await async_client.agents.update_status( - path_uuid="uuid", - ) - assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_update_status_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - agent = await async_client.agents.update_status( - path_uuid="uuid", - body_uuid="uuid", - visibility="VISIBILITY_UNKNOWN", - ) - assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_update_status(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.agents.with_raw_response.update_status( - path_uuid="uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = await response.parse() - assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_update_status(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.agents.with_streaming_response.update_status( - path_uuid="uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - agent = await response.parse() - assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_update_status(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - await async_client.agents.with_raw_response.update_status( - path_uuid="", - ) diff --git a/tests/api_resources/test_api_keys.py b/tests/api_resources/test_api_keys.py deleted file mode 100644 index 198eb261..00000000 --- a/tests/api_resources/test_api_keys.py +++ /dev/null @@ -1,100 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types import APIKeyListResponse - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestAPIKeys: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.api_keys.list() - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - api_key = client.api_keys.list( - page=0, - per_page=0, - public_only=True, - usecases=["MODEL_USECASE_UNKNOWN"], - ) - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: - response = client.api_keys.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = response.parse() - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: - with client.api_keys.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = response.parse() - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - -class TestAsyncAPIKeys: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.api_keys.list() - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - api_key = await async_client.api_keys.list( - page=0, - per_page=0, - public_only=True, - usecases=["MODEL_USECASE_UNKNOWN"], - ) - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.api_keys.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = await response.parse() - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.api_keys.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = await response.parse() - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_chat.py b/tests/api_resources/test_chat.py index 0bf48414..2c5bcbd8 100644 --- a/tests/api_resources/test_chat.py +++ b/tests/api_resources/test_chat.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types import ChatCreateCompletionResponse +from gradientai.types import ChatCreateCompletionResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -19,7 +19,7 @@ class TestChat: @pytest.mark.skip() @parametrize - def test_method_create_completion(self, client: DigitaloceanGenaiSDK) -> None: + def test_method_create_completion(self, client: GradientAI) -> None: chat = client.chat.create_completion( messages=[ { @@ -33,7 +33,7 @@ def test_method_create_completion(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize - def test_method_create_completion_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + def test_method_create_completion_with_all_params(self, client: GradientAI) -> None: chat = client.chat.create_completion( messages=[ { @@ -62,7 +62,7 @@ def test_method_create_completion_with_all_params(self, client: DigitaloceanGena @pytest.mark.skip() @parametrize - def test_raw_response_create_completion(self, client: DigitaloceanGenaiSDK) -> None: + def test_raw_response_create_completion(self, client: GradientAI) -> None: response = client.chat.with_raw_response.create_completion( messages=[ { @@ -80,7 +80,7 @@ def test_raw_response_create_completion(self, client: DigitaloceanGenaiSDK) -> N @pytest.mark.skip() @parametrize - def test_streaming_response_create_completion(self, client: DigitaloceanGenaiSDK) -> None: + def test_streaming_response_create_completion(self, client: GradientAI) -> None: with client.chat.with_streaming_response.create_completion( messages=[ { @@ -104,7 +104,7 @@ class TestAsyncChat: @pytest.mark.skip() @parametrize - async def test_method_create_completion(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async def test_method_create_completion(self, async_client: AsyncGradientAI) -> None: chat = await async_client.chat.create_completion( messages=[ { @@ -118,7 +118,7 @@ async def test_method_create_completion(self, async_client: AsyncDigitaloceanGen @pytest.mark.skip() @parametrize - async def test_method_create_completion_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async def test_method_create_completion_with_all_params(self, async_client: AsyncGradientAI) -> None: chat = await async_client.chat.create_completion( messages=[ { @@ -147,7 +147,7 @@ async def test_method_create_completion_with_all_params(self, async_client: Asyn @pytest.mark.skip() @parametrize - async def test_raw_response_create_completion(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async def test_raw_response_create_completion(self, async_client: AsyncGradientAI) -> None: response = await async_client.chat.with_raw_response.create_completion( messages=[ { @@ -165,7 +165,7 @@ async def test_raw_response_create_completion(self, async_client: AsyncDigitaloc @pytest.mark.skip() @parametrize - async def test_streaming_response_create_completion(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async def test_streaming_response_create_completion(self, async_client: AsyncGradientAI) -> None: async with async_client.chat.with_streaming_response.create_completion( messages=[ { diff --git a/tests/api_resources/test_embeddings.py b/tests/api_resources/test_embeddings.py index ea1b5879..e5b394ef 100644 --- a/tests/api_resources/test_embeddings.py +++ b/tests/api_resources/test_embeddings.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types import EmbeddingCreateResponse +from gradientai.types import EmbeddingCreateResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -19,7 +19,7 @@ class TestEmbeddings: @pytest.mark.skip() @parametrize - def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: + def test_method_create(self, client: GradientAI) -> None: embedding = client.embeddings.create( input="The quick brown fox jumped over the lazy dog", model="text-embedding-3-small", @@ -28,7 +28,7 @@ def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize - def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: + def test_method_create_with_all_params(self, client: GradientAI) -> None: embedding = client.embeddings.create( input="The quick brown fox jumped over the lazy dog", model="text-embedding-3-small", @@ -38,7 +38,7 @@ def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> No @pytest.mark.skip() @parametrize - def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: + def test_raw_response_create(self, client: GradientAI) -> None: response = client.embeddings.with_raw_response.create( input="The quick brown fox jumped over the lazy dog", model="text-embedding-3-small", @@ -51,7 +51,7 @@ def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: + def test_streaming_response_create(self, client: GradientAI) -> None: with client.embeddings.with_streaming_response.create( input="The quick brown fox jumped over the lazy dog", model="text-embedding-3-small", @@ -70,7 +70,7 @@ class TestAsyncEmbeddings: @pytest.mark.skip() @parametrize - async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async def test_method_create(self, async_client: AsyncGradientAI) -> None: embedding = await async_client.embeddings.create( input="The quick brown fox jumped over the lazy dog", model="text-embedding-3-small", @@ -79,7 +79,7 @@ async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> N @pytest.mark.skip() @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: embedding = await async_client.embeddings.create( input="The quick brown fox jumped over the lazy dog", model="text-embedding-3-small", @@ -89,7 +89,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncDigitaloce @pytest.mark.skip() @parametrize - async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: response = await async_client.embeddings.with_raw_response.create( input="The quick brown fox jumped over the lazy dog", model="text-embedding-3-small", @@ -102,7 +102,7 @@ async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK @pytest.mark.skip() @parametrize - async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: async with async_client.embeddings.with_streaming_response.create( input="The quick brown fox jumped over the lazy dog", model="text-embedding-3-small", diff --git a/tests/api_resources/test_indexing_jobs.py b/tests/api_resources/test_indexing_jobs.py deleted file mode 100644 index 9ae7ec50..00000000 --- a/tests/api_resources/test_indexing_jobs.py +++ /dev/null @@ -1,446 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types import ( - IndexingJobListResponse, - IndexingJobCreateResponse, - IndexingJobRetrieveResponse, - IndexingJobUpdateCancelResponse, - IndexingJobRetrieveDataSourcesResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestIndexingJobs: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: - indexing_job = client.indexing_jobs.create() - assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - indexing_job = client.indexing_jobs.create( - data_source_uuids=["string"], - knowledge_base_uuid="knowledge_base_uuid", - ) - assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: - response = client.indexing_jobs.with_raw_response.create() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - indexing_job = response.parse() - assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: - with client.indexing_jobs.with_streaming_response.create() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - indexing_job = response.parse() - assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - indexing_job = client.indexing_jobs.retrieve( - "uuid", - ) - assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - response = client.indexing_jobs.with_raw_response.retrieve( - "uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - indexing_job = response.parse() - assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with client.indexing_jobs.with_streaming_response.retrieve( - "uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - indexing_job = response.parse() - assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.indexing_jobs.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: - indexing_job = client.indexing_jobs.list() - assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - indexing_job = client.indexing_jobs.list( - page=0, - per_page=0, - ) - assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: - response = client.indexing_jobs.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - indexing_job = response.parse() - assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: - with client.indexing_jobs.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - indexing_job = response.parse() - assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_retrieve_data_sources(self, client: DigitaloceanGenaiSDK) -> None: - indexing_job = client.indexing_jobs.retrieve_data_sources( - "indexing_job_uuid", - ) - assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve_data_sources(self, client: DigitaloceanGenaiSDK) -> None: - response = client.indexing_jobs.with_raw_response.retrieve_data_sources( - "indexing_job_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - indexing_job = response.parse() - assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve_data_sources(self, client: DigitaloceanGenaiSDK) -> None: - with client.indexing_jobs.with_streaming_response.retrieve_data_sources( - "indexing_job_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - indexing_job = response.parse() - assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve_data_sources(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `indexing_job_uuid` but received ''"): - client.indexing_jobs.with_raw_response.retrieve_data_sources( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_update_cancel(self, client: DigitaloceanGenaiSDK) -> None: - indexing_job = client.indexing_jobs.update_cancel( - path_uuid="uuid", - ) - assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_update_cancel_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - indexing_job = client.indexing_jobs.update_cancel( - path_uuid="uuid", - body_uuid="uuid", - ) - assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_update_cancel(self, client: DigitaloceanGenaiSDK) -> None: - response = client.indexing_jobs.with_raw_response.update_cancel( - path_uuid="uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - indexing_job = response.parse() - assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_update_cancel(self, client: DigitaloceanGenaiSDK) -> None: - with client.indexing_jobs.with_streaming_response.update_cancel( - path_uuid="uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - indexing_job = response.parse() - assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_update_cancel(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - client.indexing_jobs.with_raw_response.update_cancel( - path_uuid="", - ) - - -class TestAsyncIndexingJobs: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - indexing_job = await async_client.indexing_jobs.create() - assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - indexing_job = await async_client.indexing_jobs.create( - data_source_uuids=["string"], - knowledge_base_uuid="knowledge_base_uuid", - ) - assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.indexing_jobs.with_raw_response.create() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - indexing_job = await response.parse() - assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.indexing_jobs.with_streaming_response.create() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - indexing_job = await response.parse() - assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - indexing_job = await async_client.indexing_jobs.retrieve( - "uuid", - ) - assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.indexing_jobs.with_raw_response.retrieve( - "uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - indexing_job = await response.parse() - assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.indexing_jobs.with_streaming_response.retrieve( - "uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - indexing_job = await response.parse() - assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.indexing_jobs.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - indexing_job = await async_client.indexing_jobs.list() - assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - indexing_job = await async_client.indexing_jobs.list( - page=0, - per_page=0, - ) - assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.indexing_jobs.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - indexing_job = await response.parse() - assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.indexing_jobs.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - indexing_job = await response.parse() - assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve_data_sources(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - indexing_job = await async_client.indexing_jobs.retrieve_data_sources( - "indexing_job_uuid", - ) - assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve_data_sources(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.indexing_jobs.with_raw_response.retrieve_data_sources( - "indexing_job_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - indexing_job = await response.parse() - assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve_data_sources(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.indexing_jobs.with_streaming_response.retrieve_data_sources( - "indexing_job_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - indexing_job = await response.parse() - assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve_data_sources(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `indexing_job_uuid` but received ''"): - await async_client.indexing_jobs.with_raw_response.retrieve_data_sources( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_update_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - indexing_job = await async_client.indexing_jobs.update_cancel( - path_uuid="uuid", - ) - assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_update_cancel_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - indexing_job = await async_client.indexing_jobs.update_cancel( - path_uuid="uuid", - body_uuid="uuid", - ) - assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_update_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.indexing_jobs.with_raw_response.update_cancel( - path_uuid="uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - indexing_job = await response.parse() - assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_update_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.indexing_jobs.with_streaming_response.update_cancel( - path_uuid="uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - indexing_job = await response.parse() - assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_update_cancel(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - await async_client.indexing_jobs.with_raw_response.update_cancel( - path_uuid="", - ) diff --git a/tests/api_resources/test_knowledge_bases.py b/tests/api_resources/test_knowledge_bases.py deleted file mode 100644 index 34e3d753..00000000 --- a/tests/api_resources/test_knowledge_bases.py +++ /dev/null @@ -1,510 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types import ( - KnowledgeBaseListResponse, - KnowledgeBaseCreateResponse, - KnowledgeBaseDeleteResponse, - KnowledgeBaseUpdateResponse, - KnowledgeBaseRetrieveResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestKnowledgeBases: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create(self, client: DigitaloceanGenaiSDK) -> None: - knowledge_base = client.knowledge_bases.create() - assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - knowledge_base = client.knowledge_bases.create( - database_id="database_id", - datasources=[ - { - "bucket_name": "bucket_name", - "bucket_region": "bucket_region", - "file_upload_data_source": { - "original_file_name": "original_file_name", - "size_in_bytes": "size_in_bytes", - "stored_object_key": "stored_object_key", - }, - "item_path": "item_path", - "spaces_data_source": { - "bucket_name": "bucket_name", - "item_path": "item_path", - "region": "region", - }, - "web_crawler_data_source": { - "base_url": "base_url", - "crawling_option": "UNKNOWN", - "embed_media": True, - }, - } - ], - embedding_model_uuid="embedding_model_uuid", - name="name", - project_id="project_id", - region="region", - tags=["string"], - vpc_uuid="vpc_uuid", - ) - assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create(self, client: DigitaloceanGenaiSDK) -> None: - response = client.knowledge_bases.with_raw_response.create() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - knowledge_base = response.parse() - assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create(self, client: DigitaloceanGenaiSDK) -> None: - with client.knowledge_bases.with_streaming_response.create() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - knowledge_base = response.parse() - assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - knowledge_base = client.knowledge_bases.retrieve( - "uuid", - ) - assert_matches_type(KnowledgeBaseRetrieveResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - response = client.knowledge_bases.with_raw_response.retrieve( - "uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - knowledge_base = response.parse() - assert_matches_type(KnowledgeBaseRetrieveResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with client.knowledge_bases.with_streaming_response.retrieve( - "uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - knowledge_base = response.parse() - assert_matches_type(KnowledgeBaseRetrieveResponse, knowledge_base, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.knowledge_bases.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_update(self, client: DigitaloceanGenaiSDK) -> None: - knowledge_base = client.knowledge_bases.update( - path_uuid="uuid", - ) - assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_update_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - knowledge_base = client.knowledge_bases.update( - path_uuid="uuid", - database_id="database_id", - embedding_model_uuid="embedding_model_uuid", - name="name", - project_id="project_id", - tags=["string"], - body_uuid="uuid", - ) - assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_update(self, client: DigitaloceanGenaiSDK) -> None: - response = client.knowledge_bases.with_raw_response.update( - path_uuid="uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - knowledge_base = response.parse() - assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_update(self, client: DigitaloceanGenaiSDK) -> None: - with client.knowledge_bases.with_streaming_response.update( - path_uuid="uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - knowledge_base = response.parse() - assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_update(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - client.knowledge_bases.with_raw_response.update( - path_uuid="", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: - knowledge_base = client.knowledge_bases.list() - assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - knowledge_base = client.knowledge_bases.list( - page=0, - per_page=0, - ) - assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: - response = client.knowledge_bases.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - knowledge_base = response.parse() - assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: - with client.knowledge_bases.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - knowledge_base = response.parse() - assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_delete(self, client: DigitaloceanGenaiSDK) -> None: - knowledge_base = client.knowledge_bases.delete( - "uuid", - ) - assert_matches_type(KnowledgeBaseDeleteResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - response = client.knowledge_bases.with_raw_response.delete( - "uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - knowledge_base = response.parse() - assert_matches_type(KnowledgeBaseDeleteResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete(self, client: DigitaloceanGenaiSDK) -> None: - with client.knowledge_bases.with_streaming_response.delete( - "uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - knowledge_base = response.parse() - assert_matches_type(KnowledgeBaseDeleteResponse, knowledge_base, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_delete(self, client: DigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.knowledge_bases.with_raw_response.delete( - "", - ) - - -class TestAsyncKnowledgeBases: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - knowledge_base = await async_client.knowledge_bases.create() - assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - knowledge_base = await async_client.knowledge_bases.create( - database_id="database_id", - datasources=[ - { - "bucket_name": "bucket_name", - "bucket_region": "bucket_region", - "file_upload_data_source": { - "original_file_name": "original_file_name", - "size_in_bytes": "size_in_bytes", - "stored_object_key": "stored_object_key", - }, - "item_path": "item_path", - "spaces_data_source": { - "bucket_name": "bucket_name", - "item_path": "item_path", - "region": "region", - }, - "web_crawler_data_source": { - "base_url": "base_url", - "crawling_option": "UNKNOWN", - "embed_media": True, - }, - } - ], - embedding_model_uuid="embedding_model_uuid", - name="name", - project_id="project_id", - region="region", - tags=["string"], - vpc_uuid="vpc_uuid", - ) - assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.knowledge_bases.with_raw_response.create() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - knowledge_base = await response.parse() - assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.knowledge_bases.with_streaming_response.create() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - knowledge_base = await response.parse() - assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - knowledge_base = await async_client.knowledge_bases.retrieve( - "uuid", - ) - assert_matches_type(KnowledgeBaseRetrieveResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.knowledge_bases.with_raw_response.retrieve( - "uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - knowledge_base = await response.parse() - assert_matches_type(KnowledgeBaseRetrieveResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.knowledge_bases.with_streaming_response.retrieve( - "uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - knowledge_base = await response.parse() - assert_matches_type(KnowledgeBaseRetrieveResponse, knowledge_base, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.knowledge_bases.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - knowledge_base = await async_client.knowledge_bases.update( - path_uuid="uuid", - ) - assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_update_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - knowledge_base = await async_client.knowledge_bases.update( - path_uuid="uuid", - database_id="database_id", - embedding_model_uuid="embedding_model_uuid", - name="name", - project_id="project_id", - tags=["string"], - body_uuid="uuid", - ) - assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.knowledge_bases.with_raw_response.update( - path_uuid="uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - knowledge_base = await response.parse() - assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.knowledge_bases.with_streaming_response.update( - path_uuid="uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - knowledge_base = await response.parse() - assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_update(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - await async_client.knowledge_bases.with_raw_response.update( - path_uuid="", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - knowledge_base = await async_client.knowledge_bases.list() - assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - knowledge_base = await async_client.knowledge_bases.list( - page=0, - per_page=0, - ) - assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.knowledge_bases.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - knowledge_base = await response.parse() - assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.knowledge_bases.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - knowledge_base = await response.parse() - assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - knowledge_base = await async_client.knowledge_bases.delete( - "uuid", - ) - assert_matches_type(KnowledgeBaseDeleteResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.knowledge_bases.with_raw_response.delete( - "uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - knowledge_base = await response.parse() - assert_matches_type(KnowledgeBaseDeleteResponse, knowledge_base, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.knowledge_bases.with_streaming_response.delete( - "uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - knowledge_base = await response.parse() - assert_matches_type(KnowledgeBaseDeleteResponse, knowledge_base, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_delete(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.knowledge_bases.with_raw_response.delete( - "", - ) diff --git a/tests/api_resources/test_models.py b/tests/api_resources/test_models.py index 1148affb..b9559c8e 100644 --- a/tests/api_resources/test_models.py +++ b/tests/api_resources/test_models.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types import Model, ModelListResponse +from gradientai.types import Model, ModelListResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -19,7 +19,7 @@ class TestModels: @pytest.mark.skip() @parametrize - def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + def test_method_retrieve(self, client: GradientAI) -> None: model = client.models.retrieve( "llama3-8b-instruct", ) @@ -27,7 +27,7 @@ def test_method_retrieve(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize - def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + def test_raw_response_retrieve(self, client: GradientAI) -> None: response = client.models.with_raw_response.retrieve( "llama3-8b-instruct", ) @@ -39,7 +39,7 @@ def test_raw_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + def test_streaming_response_retrieve(self, client: GradientAI) -> None: with client.models.with_streaming_response.retrieve( "llama3-8b-instruct", ) as response: @@ -53,7 +53,7 @@ def test_streaming_response_retrieve(self, client: DigitaloceanGenaiSDK) -> None @pytest.mark.skip() @parametrize - def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: + def test_path_params_retrieve(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `model` but received ''"): client.models.with_raw_response.retrieve( "", @@ -61,13 +61,13 @@ def test_path_params_retrieve(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize - def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: + def test_method_list(self, client: GradientAI) -> None: model = client.models.list() assert_matches_type(ModelListResponse, model, path=["response"]) @pytest.mark.skip() @parametrize - def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: + def test_raw_response_list(self, client: GradientAI) -> None: response = client.models.with_raw_response.list() assert response.is_closed is True @@ -77,7 +77,7 @@ def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.skip() @parametrize - def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: + def test_streaming_response_list(self, client: GradientAI) -> None: with client.models.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -93,7 +93,7 @@ class TestAsyncModels: @pytest.mark.skip() @parametrize - async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: model = await async_client.models.retrieve( "llama3-8b-instruct", ) @@ -101,7 +101,7 @@ async def test_method_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> @pytest.mark.skip() @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: response = await async_client.models.with_raw_response.retrieve( "llama3-8b-instruct", ) @@ -113,7 +113,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncDigitaloceanGenaiS @pytest.mark.skip() @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: async with async_client.models.with_streaming_response.retrieve( "llama3-8b-instruct", ) as response: @@ -127,7 +127,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncDigitalocean @pytest.mark.skip() @parametrize - async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `model` but received ''"): await async_client.models.with_raw_response.retrieve( "", @@ -135,13 +135,13 @@ async def test_path_params_retrieve(self, async_client: AsyncDigitaloceanGenaiSD @pytest.mark.skip() @parametrize - async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async def test_method_list(self, async_client: AsyncGradientAI) -> None: model = await async_client.models.list() assert_matches_type(ModelListResponse, model, path=["response"]) @pytest.mark.skip() @parametrize - async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: response = await async_client.models.with_raw_response.list() assert response.is_closed is True @@ -151,7 +151,7 @@ async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) @pytest.mark.skip() @parametrize - async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: async with async_client.models.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/test_regions.py b/tests/api_resources/test_regions.py deleted file mode 100644 index f36b6c63..00000000 --- a/tests/api_resources/test_regions.py +++ /dev/null @@ -1,96 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from tests.utils import assert_matches_type -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk.types import RegionListResponse - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestRegions: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: DigitaloceanGenaiSDK) -> None: - region = client.regions.list() - assert_matches_type(RegionListResponse, region, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: DigitaloceanGenaiSDK) -> None: - region = client.regions.list( - serves_batch=True, - serves_inference=True, - ) - assert_matches_type(RegionListResponse, region, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: DigitaloceanGenaiSDK) -> None: - response = client.regions.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - region = response.parse() - assert_matches_type(RegionListResponse, region, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: DigitaloceanGenaiSDK) -> None: - with client.regions.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - region = response.parse() - assert_matches_type(RegionListResponse, region, path=["response"]) - - assert cast(Any, response.is_closed) is True - - -class TestAsyncRegions: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - region = await async_client.regions.list() - assert_matches_type(RegionListResponse, region, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - region = await async_client.regions.list( - serves_batch=True, - serves_inference=True, - ) - assert_matches_type(RegionListResponse, region, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - response = await async_client.regions.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - region = await response.parse() - assert_matches_type(RegionListResponse, region, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: - async with async_client.regions.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - region = await response.parse() - assert_matches_type(RegionListResponse, region, path=["response"]) - - assert cast(Any, response.is_closed) is True diff --git a/tests/conftest.py b/tests/conftest.py index abd9aa51..04c66a33 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -7,14 +7,14 @@ import pytest from pytest_asyncio import is_async_test -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK +from gradientai import GradientAI, AsyncGradientAI if TYPE_CHECKING: from _pytest.fixtures import FixtureRequest # pyright: ignore[reportPrivateImportUsage] pytest.register_assert_rewrite("tests.utils") -logging.getLogger("digitalocean_genai_sdk").setLevel(logging.DEBUG) +logging.getLogger("gradientai").setLevel(logging.DEBUG) # automatically add `pytest.mark.asyncio()` to all of our async tests @@ -32,22 +32,20 @@ def pytest_collection_modifyitems(items: list[pytest.Function]) -> None: @pytest.fixture(scope="session") -def client(request: FixtureRequest) -> Iterator[DigitaloceanGenaiSDK]: +def client(request: FixtureRequest) -> Iterator[GradientAI]: strict = getattr(request, "param", True) if not isinstance(strict, bool): raise TypeError(f"Unexpected fixture parameter type {type(strict)}, expected {bool}") - with DigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=strict) as client: + with GradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=strict) as client: yield client @pytest.fixture(scope="session") -async def async_client(request: FixtureRequest) -> AsyncIterator[AsyncDigitaloceanGenaiSDK]: +async def async_client(request: FixtureRequest) -> AsyncIterator[AsyncGradientAI]: strict = getattr(request, "param", True) if not isinstance(strict, bool): raise TypeError(f"Unexpected fixture parameter type {type(strict)}, expected {bool}") - async with AsyncDigitaloceanGenaiSDK( - base_url=base_url, api_key=api_key, _strict_response_validation=strict - ) as client: + async with AsyncGradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=strict) as client: yield client diff --git a/tests/test_client.py b/tests/test_client.py index d6412ded..59eee2ff 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -21,17 +21,12 @@ from respx import MockRouter from pydantic import ValidationError -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK, APIResponseValidationError -from digitalocean_genai_sdk._types import Omit -from digitalocean_genai_sdk._models import BaseModel, FinalRequestOptions -from digitalocean_genai_sdk._constants import RAW_RESPONSE_HEADER -from digitalocean_genai_sdk._exceptions import ( - APIStatusError, - APITimeoutError, - DigitaloceanGenaiSDKError, - APIResponseValidationError, -) -from digitalocean_genai_sdk._base_client import ( +from gradientai import GradientAI, AsyncGradientAI, APIResponseValidationError +from gradientai._types import Omit +from gradientai._models import BaseModel, FinalRequestOptions +from gradientai._constants import RAW_RESPONSE_HEADER +from gradientai._exceptions import APIStatusError, APITimeoutError, GradientAIError, APIResponseValidationError +from gradientai._base_client import ( DEFAULT_TIMEOUT, HTTPX_DEFAULT_TIMEOUT, BaseClient, @@ -54,7 +49,7 @@ def _low_retry_timeout(*_args: Any, **_kwargs: Any) -> float: return 0.1 -def _get_open_connections(client: DigitaloceanGenaiSDK | AsyncDigitaloceanGenaiSDK) -> int: +def _get_open_connections(client: GradientAI | AsyncGradientAI) -> int: transport = client._client._transport assert isinstance(transport, httpx.HTTPTransport) or isinstance(transport, httpx.AsyncHTTPTransport) @@ -62,8 +57,8 @@ def _get_open_connections(client: DigitaloceanGenaiSDK | AsyncDigitaloceanGenaiS return len(pool._requests) -class TestDigitaloceanGenaiSDK: - client = DigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True) +class TestGradientAI: + client = GradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) @pytest.mark.respx(base_url=base_url) def test_raw_response(self, respx_mock: MockRouter) -> None: @@ -110,7 +105,7 @@ def test_copy_default_options(self) -> None: assert isinstance(self.client.timeout, httpx.Timeout) def test_copy_default_headers(self) -> None: - client = DigitaloceanGenaiSDK( + client = GradientAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"} ) assert client.default_headers["X-Foo"] == "bar" @@ -144,7 +139,7 @@ def test_copy_default_headers(self) -> None: client.copy(set_default_headers={}, default_headers={"X-Foo": "Bar"}) def test_copy_default_query(self) -> None: - client = DigitaloceanGenaiSDK( + client = GradientAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, default_query={"foo": "bar"} ) assert _get_params(client)["foo"] == "bar" @@ -235,10 +230,10 @@ def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.Statistic # to_raw_response_wrapper leaks through the @functools.wraps() decorator. # # removing the decorator fixes the leak for reasons we don't understand. - "digitalocean_genai_sdk/_legacy_response.py", - "digitalocean_genai_sdk/_response.py", + "gradientai/_legacy_response.py", + "gradientai/_response.py", # pydantic.BaseModel.model_dump || pydantic.BaseModel.dict leak memory for some reason. - "digitalocean_genai_sdk/_compat.py", + "gradientai/_compat.py", # Standard library leaks we don't care about. "/logging/__init__.py", ] @@ -269,7 +264,7 @@ def test_request_timeout(self) -> None: assert timeout == httpx.Timeout(100.0) def test_client_timeout_option(self) -> None: - client = DigitaloceanGenaiSDK( + client = GradientAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, timeout=httpx.Timeout(0) ) @@ -280,7 +275,7 @@ def test_client_timeout_option(self) -> None: def test_http_client_timeout_option(self) -> None: # custom timeout given to the httpx client should be used with httpx.Client(timeout=None) as http_client: - client = DigitaloceanGenaiSDK( + client = GradientAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client ) @@ -290,7 +285,7 @@ def test_http_client_timeout_option(self) -> None: # no timeout given to the httpx client should not use the httpx default with httpx.Client() as http_client: - client = DigitaloceanGenaiSDK( + client = GradientAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client ) @@ -300,7 +295,7 @@ def test_http_client_timeout_option(self) -> None: # explicitly passing the default timeout currently results in it being ignored with httpx.Client(timeout=HTTPX_DEFAULT_TIMEOUT) as http_client: - client = DigitaloceanGenaiSDK( + client = GradientAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client ) @@ -311,7 +306,7 @@ def test_http_client_timeout_option(self) -> None: async def test_invalid_http_client(self) -> None: with pytest.raises(TypeError, match="Invalid `http_client` arg"): async with httpx.AsyncClient() as http_client: - DigitaloceanGenaiSDK( + GradientAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, @@ -319,14 +314,14 @@ async def test_invalid_http_client(self) -> None: ) def test_default_headers_option(self) -> None: - client = DigitaloceanGenaiSDK( + client = GradientAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"} ) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) assert request.headers.get("x-foo") == "bar" assert request.headers.get("x-stainless-lang") == "python" - client2 = DigitaloceanGenaiSDK( + client2 = GradientAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, @@ -340,17 +335,17 @@ def test_default_headers_option(self) -> None: assert request.headers.get("x-stainless-lang") == "my-overriding-header" def test_validate_headers(self) -> None: - client = DigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True) + client = GradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) assert request.headers.get("Authorization") == f"Bearer {api_key}" - with pytest.raises(DigitaloceanGenaiSDKError): + with pytest.raises(GradientAIError): with update_env(**{"DIGITALOCEAN_GENAI_SDK_API_KEY": Omit()}): - client2 = DigitaloceanGenaiSDK(base_url=base_url, api_key=None, _strict_response_validation=True) + client2 = GradientAI(base_url=base_url, api_key=None, _strict_response_validation=True) _ = client2 def test_default_query_option(self) -> None: - client = DigitaloceanGenaiSDK( + client = GradientAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, default_query={"query_param": "bar"} ) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) @@ -464,7 +459,7 @@ def test_request_extra_query(self) -> None: params = dict(request.url.params) assert params == {"foo": "2"} - def test_multipart_repeating_array(self, client: DigitaloceanGenaiSDK) -> None: + def test_multipart_repeating_array(self, client: GradientAI) -> None: request = client._build_request( FinalRequestOptions.construct( method="get", @@ -551,9 +546,7 @@ class Model(BaseModel): assert response.foo == 2 def test_base_url_setter(self) -> None: - client = DigitaloceanGenaiSDK( - base_url="https://example.com/from_init", api_key=api_key, _strict_response_validation=True - ) + client = GradientAI(base_url="https://example.com/from_init", api_key=api_key, _strict_response_validation=True) assert client.base_url == "https://example.com/from_init/" client.base_url = "https://example.com/from_setter" # type: ignore[assignment] @@ -561,17 +554,17 @@ def test_base_url_setter(self) -> None: assert client.base_url == "https://example.com/from_setter/" def test_base_url_env(self) -> None: - with update_env(DIGITALOCEAN_GENAI_SDK_BASE_URL="http://localhost:5000/from/env"): - client = DigitaloceanGenaiSDK(api_key=api_key, _strict_response_validation=True) + with update_env(GRADIENT_AI_BASE_URL="http://localhost:5000/from/env"): + client = GradientAI(api_key=api_key, _strict_response_validation=True) assert client.base_url == "http://localhost:5000/from/env/" @pytest.mark.parametrize( "client", [ - DigitaloceanGenaiSDK( + GradientAI( base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True ), - DigitaloceanGenaiSDK( + GradientAI( base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True, @@ -580,7 +573,7 @@ def test_base_url_env(self) -> None: ], ids=["standard", "custom http client"], ) - def test_base_url_trailing_slash(self, client: DigitaloceanGenaiSDK) -> None: + def test_base_url_trailing_slash(self, client: GradientAI) -> None: request = client._build_request( FinalRequestOptions( method="post", @@ -593,10 +586,10 @@ def test_base_url_trailing_slash(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.parametrize( "client", [ - DigitaloceanGenaiSDK( + GradientAI( base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True ), - DigitaloceanGenaiSDK( + GradientAI( base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True, @@ -605,7 +598,7 @@ def test_base_url_trailing_slash(self, client: DigitaloceanGenaiSDK) -> None: ], ids=["standard", "custom http client"], ) - def test_base_url_no_trailing_slash(self, client: DigitaloceanGenaiSDK) -> None: + def test_base_url_no_trailing_slash(self, client: GradientAI) -> None: request = client._build_request( FinalRequestOptions( method="post", @@ -618,10 +611,10 @@ def test_base_url_no_trailing_slash(self, client: DigitaloceanGenaiSDK) -> None: @pytest.mark.parametrize( "client", [ - DigitaloceanGenaiSDK( + GradientAI( base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True ), - DigitaloceanGenaiSDK( + GradientAI( base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True, @@ -630,7 +623,7 @@ def test_base_url_no_trailing_slash(self, client: DigitaloceanGenaiSDK) -> None: ], ids=["standard", "custom http client"], ) - def test_absolute_request_url(self, client: DigitaloceanGenaiSDK) -> None: + def test_absolute_request_url(self, client: GradientAI) -> None: request = client._build_request( FinalRequestOptions( method="post", @@ -641,7 +634,7 @@ def test_absolute_request_url(self, client: DigitaloceanGenaiSDK) -> None: assert request.url == "https://myapi.com/foo" def test_copied_client_does_not_close_http(self) -> None: - client = DigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True) + client = GradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) assert not client.is_closed() copied = client.copy() @@ -652,7 +645,7 @@ def test_copied_client_does_not_close_http(self) -> None: assert not client.is_closed() def test_client_context_manager(self) -> None: - client = DigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True) + client = GradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) with client as c2: assert c2 is client assert not c2.is_closed() @@ -673,7 +666,7 @@ class Model(BaseModel): def test_client_max_retries_validation(self) -> None: with pytest.raises(TypeError, match=r"max_retries cannot be None"): - DigitaloceanGenaiSDK( + GradientAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, max_retries=cast(Any, None) ) @@ -684,12 +677,12 @@ class Model(BaseModel): respx_mock.get("/foo").mock(return_value=httpx.Response(200, text="my-custom-format")) - strict_client = DigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True) + strict_client = GradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) with pytest.raises(APIResponseValidationError): strict_client.get("/foo", cast_to=Model) - client = DigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=False) + client = GradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=False) response = client.get("/foo", cast_to=Model) assert isinstance(response, str) # type: ignore[unreachable] @@ -717,14 +710,14 @@ class Model(BaseModel): ) @mock.patch("time.time", mock.MagicMock(return_value=1696004797)) def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str, timeout: float) -> None: - client = DigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True) + client = GradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) headers = httpx.Headers({"retry-after": retry_after}) options = FinalRequestOptions(method="get", url="/foo", max_retries=3) calculated = client._calculate_retry_timeout(remaining_retries, options, headers) assert calculated == pytest.approx(timeout, 0.5 * 0.875) # pyright: ignore[reportUnknownMemberType] - @mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter) -> None: respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=httpx.TimeoutException("Test timeout error")) @@ -738,7 +731,7 @@ def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter) -> No assert _get_open_connections(self.client) == 0 - @mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter) -> None: respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(return_value=httpx.Response(500)) @@ -753,12 +746,12 @@ def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter) -> Non assert _get_open_connections(self.client) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) @pytest.mark.parametrize("failure_mode", ["status", "exception"]) def test_retries_taken( self, - client: DigitaloceanGenaiSDK, + client: GradientAI, failures_before_success: int, failure_mode: Literal["status", "exception"], respx_mock: MockRouter, @@ -784,10 +777,10 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) def test_omit_retry_count_header( - self, client: DigitaloceanGenaiSDK, failures_before_success: int, respx_mock: MockRouter + self, client: GradientAI, failures_before_success: int, respx_mock: MockRouter ) -> None: client = client.with_options(max_retries=4) @@ -809,10 +802,10 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) def test_overwrite_retry_count_header( - self, client: DigitaloceanGenaiSDK, failures_before_success: int, respx_mock: MockRouter + self, client: GradientAI, failures_before_success: int, respx_mock: MockRouter ) -> None: client = client.with_options(max_retries=4) @@ -861,8 +854,8 @@ def test_follow_redirects_disabled(self, respx_mock: MockRouter) -> None: assert exc_info.value.response.headers["Location"] == f"{base_url}/redirected" -class TestAsyncDigitaloceanGenaiSDK: - client = AsyncDigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True) +class TestAsyncGradientAI: + client = AsyncGradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) @pytest.mark.respx(base_url=base_url) @pytest.mark.asyncio @@ -911,7 +904,7 @@ def test_copy_default_options(self) -> None: assert isinstance(self.client.timeout, httpx.Timeout) def test_copy_default_headers(self) -> None: - client = AsyncDigitaloceanGenaiSDK( + client = AsyncGradientAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"} ) assert client.default_headers["X-Foo"] == "bar" @@ -945,7 +938,7 @@ def test_copy_default_headers(self) -> None: client.copy(set_default_headers={}, default_headers={"X-Foo": "Bar"}) def test_copy_default_query(self) -> None: - client = AsyncDigitaloceanGenaiSDK( + client = AsyncGradientAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, default_query={"foo": "bar"} ) assert _get_params(client)["foo"] == "bar" @@ -1036,10 +1029,10 @@ def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.Statistic # to_raw_response_wrapper leaks through the @functools.wraps() decorator. # # removing the decorator fixes the leak for reasons we don't understand. - "digitalocean_genai_sdk/_legacy_response.py", - "digitalocean_genai_sdk/_response.py", + "gradientai/_legacy_response.py", + "gradientai/_response.py", # pydantic.BaseModel.model_dump || pydantic.BaseModel.dict leak memory for some reason. - "digitalocean_genai_sdk/_compat.py", + "gradientai/_compat.py", # Standard library leaks we don't care about. "/logging/__init__.py", ] @@ -1070,7 +1063,7 @@ async def test_request_timeout(self) -> None: assert timeout == httpx.Timeout(100.0) async def test_client_timeout_option(self) -> None: - client = AsyncDigitaloceanGenaiSDK( + client = AsyncGradientAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, timeout=httpx.Timeout(0) ) @@ -1081,7 +1074,7 @@ async def test_client_timeout_option(self) -> None: async def test_http_client_timeout_option(self) -> None: # custom timeout given to the httpx client should be used async with httpx.AsyncClient(timeout=None) as http_client: - client = AsyncDigitaloceanGenaiSDK( + client = AsyncGradientAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client ) @@ -1091,7 +1084,7 @@ async def test_http_client_timeout_option(self) -> None: # no timeout given to the httpx client should not use the httpx default async with httpx.AsyncClient() as http_client: - client = AsyncDigitaloceanGenaiSDK( + client = AsyncGradientAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client ) @@ -1101,7 +1094,7 @@ async def test_http_client_timeout_option(self) -> None: # explicitly passing the default timeout currently results in it being ignored async with httpx.AsyncClient(timeout=HTTPX_DEFAULT_TIMEOUT) as http_client: - client = AsyncDigitaloceanGenaiSDK( + client = AsyncGradientAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client ) @@ -1112,7 +1105,7 @@ async def test_http_client_timeout_option(self) -> None: def test_invalid_http_client(self) -> None: with pytest.raises(TypeError, match="Invalid `http_client` arg"): with httpx.Client() as http_client: - AsyncDigitaloceanGenaiSDK( + AsyncGradientAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, @@ -1120,14 +1113,14 @@ def test_invalid_http_client(self) -> None: ) def test_default_headers_option(self) -> None: - client = AsyncDigitaloceanGenaiSDK( + client = AsyncGradientAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"} ) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) assert request.headers.get("x-foo") == "bar" assert request.headers.get("x-stainless-lang") == "python" - client2 = AsyncDigitaloceanGenaiSDK( + client2 = AsyncGradientAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, @@ -1141,17 +1134,17 @@ def test_default_headers_option(self) -> None: assert request.headers.get("x-stainless-lang") == "my-overriding-header" def test_validate_headers(self) -> None: - client = AsyncDigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True) + client = AsyncGradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) assert request.headers.get("Authorization") == f"Bearer {api_key}" - with pytest.raises(DigitaloceanGenaiSDKError): + with pytest.raises(GradientAIError): with update_env(**{"DIGITALOCEAN_GENAI_SDK_API_KEY": Omit()}): - client2 = AsyncDigitaloceanGenaiSDK(base_url=base_url, api_key=None, _strict_response_validation=True) + client2 = AsyncGradientAI(base_url=base_url, api_key=None, _strict_response_validation=True) _ = client2 def test_default_query_option(self) -> None: - client = AsyncDigitaloceanGenaiSDK( + client = AsyncGradientAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, default_query={"query_param": "bar"} ) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) @@ -1265,7 +1258,7 @@ def test_request_extra_query(self) -> None: params = dict(request.url.params) assert params == {"foo": "2"} - def test_multipart_repeating_array(self, async_client: AsyncDigitaloceanGenaiSDK) -> None: + def test_multipart_repeating_array(self, async_client: AsyncGradientAI) -> None: request = async_client._build_request( FinalRequestOptions.construct( method="get", @@ -1352,7 +1345,7 @@ class Model(BaseModel): assert response.foo == 2 def test_base_url_setter(self) -> None: - client = AsyncDigitaloceanGenaiSDK( + client = AsyncGradientAI( base_url="https://example.com/from_init", api_key=api_key, _strict_response_validation=True ) assert client.base_url == "https://example.com/from_init/" @@ -1362,17 +1355,17 @@ def test_base_url_setter(self) -> None: assert client.base_url == "https://example.com/from_setter/" def test_base_url_env(self) -> None: - with update_env(DIGITALOCEAN_GENAI_SDK_BASE_URL="http://localhost:5000/from/env"): - client = AsyncDigitaloceanGenaiSDK(api_key=api_key, _strict_response_validation=True) + with update_env(GRADIENT_AI_BASE_URL="http://localhost:5000/from/env"): + client = AsyncGradientAI(api_key=api_key, _strict_response_validation=True) assert client.base_url == "http://localhost:5000/from/env/" @pytest.mark.parametrize( "client", [ - AsyncDigitaloceanGenaiSDK( + AsyncGradientAI( base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True ), - AsyncDigitaloceanGenaiSDK( + AsyncGradientAI( base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True, @@ -1381,7 +1374,7 @@ def test_base_url_env(self) -> None: ], ids=["standard", "custom http client"], ) - def test_base_url_trailing_slash(self, client: AsyncDigitaloceanGenaiSDK) -> None: + def test_base_url_trailing_slash(self, client: AsyncGradientAI) -> None: request = client._build_request( FinalRequestOptions( method="post", @@ -1394,10 +1387,10 @@ def test_base_url_trailing_slash(self, client: AsyncDigitaloceanGenaiSDK) -> Non @pytest.mark.parametrize( "client", [ - AsyncDigitaloceanGenaiSDK( + AsyncGradientAI( base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True ), - AsyncDigitaloceanGenaiSDK( + AsyncGradientAI( base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True, @@ -1406,7 +1399,7 @@ def test_base_url_trailing_slash(self, client: AsyncDigitaloceanGenaiSDK) -> Non ], ids=["standard", "custom http client"], ) - def test_base_url_no_trailing_slash(self, client: AsyncDigitaloceanGenaiSDK) -> None: + def test_base_url_no_trailing_slash(self, client: AsyncGradientAI) -> None: request = client._build_request( FinalRequestOptions( method="post", @@ -1419,10 +1412,10 @@ def test_base_url_no_trailing_slash(self, client: AsyncDigitaloceanGenaiSDK) -> @pytest.mark.parametrize( "client", [ - AsyncDigitaloceanGenaiSDK( + AsyncGradientAI( base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True ), - AsyncDigitaloceanGenaiSDK( + AsyncGradientAI( base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True, @@ -1431,7 +1424,7 @@ def test_base_url_no_trailing_slash(self, client: AsyncDigitaloceanGenaiSDK) -> ], ids=["standard", "custom http client"], ) - def test_absolute_request_url(self, client: AsyncDigitaloceanGenaiSDK) -> None: + def test_absolute_request_url(self, client: AsyncGradientAI) -> None: request = client._build_request( FinalRequestOptions( method="post", @@ -1442,7 +1435,7 @@ def test_absolute_request_url(self, client: AsyncDigitaloceanGenaiSDK) -> None: assert request.url == "https://myapi.com/foo" async def test_copied_client_does_not_close_http(self) -> None: - client = AsyncDigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True) + client = AsyncGradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) assert not client.is_closed() copied = client.copy() @@ -1454,7 +1447,7 @@ async def test_copied_client_does_not_close_http(self) -> None: assert not client.is_closed() async def test_client_context_manager(self) -> None: - client = AsyncDigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True) + client = AsyncGradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) async with client as c2: assert c2 is client assert not c2.is_closed() @@ -1476,7 +1469,7 @@ class Model(BaseModel): async def test_client_max_retries_validation(self) -> None: with pytest.raises(TypeError, match=r"max_retries cannot be None"): - AsyncDigitaloceanGenaiSDK( + AsyncGradientAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, max_retries=cast(Any, None) ) @@ -1488,12 +1481,12 @@ class Model(BaseModel): respx_mock.get("/foo").mock(return_value=httpx.Response(200, text="my-custom-format")) - strict_client = AsyncDigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True) + strict_client = AsyncGradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) with pytest.raises(APIResponseValidationError): await strict_client.get("/foo", cast_to=Model) - client = AsyncDigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=False) + client = AsyncGradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=False) response = await client.get("/foo", cast_to=Model) assert isinstance(response, str) # type: ignore[unreachable] @@ -1522,14 +1515,14 @@ class Model(BaseModel): @mock.patch("time.time", mock.MagicMock(return_value=1696004797)) @pytest.mark.asyncio async def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str, timeout: float) -> None: - client = AsyncDigitaloceanGenaiSDK(base_url=base_url, api_key=api_key, _strict_response_validation=True) + client = AsyncGradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) headers = httpx.Headers({"retry-after": retry_after}) options = FinalRequestOptions(method="get", url="/foo", max_retries=3) calculated = client._calculate_retry_timeout(remaining_retries, options, headers) assert calculated == pytest.approx(timeout, 0.5 * 0.875) # pyright: ignore[reportUnknownMemberType] - @mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) async def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter) -> None: respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=httpx.TimeoutException("Test timeout error")) @@ -1543,7 +1536,7 @@ async def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter) assert _get_open_connections(self.client) == 0 - @mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) async def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter) -> None: respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(return_value=httpx.Response(500)) @@ -1558,13 +1551,13 @@ async def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter) assert _get_open_connections(self.client) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) @pytest.mark.asyncio @pytest.mark.parametrize("failure_mode", ["status", "exception"]) async def test_retries_taken( self, - async_client: AsyncDigitaloceanGenaiSDK, + async_client: AsyncGradientAI, failures_before_success: int, failure_mode: Literal["status", "exception"], respx_mock: MockRouter, @@ -1590,11 +1583,11 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) @pytest.mark.asyncio async def test_omit_retry_count_header( - self, async_client: AsyncDigitaloceanGenaiSDK, failures_before_success: int, respx_mock: MockRouter + self, async_client: AsyncGradientAI, failures_before_success: int, respx_mock: MockRouter ) -> None: client = async_client.with_options(max_retries=4) @@ -1616,11 +1609,11 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("digitalocean_genai_sdk._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) @pytest.mark.asyncio async def test_overwrite_retry_count_header( - self, async_client: AsyncDigitaloceanGenaiSDK, failures_before_success: int, respx_mock: MockRouter + self, async_client: AsyncGradientAI, failures_before_success: int, respx_mock: MockRouter ) -> None: client = async_client.with_options(max_retries=4) @@ -1652,8 +1645,8 @@ def test_get_platform(self) -> None: import nest_asyncio import threading - from digitalocean_genai_sdk._utils import asyncify - from digitalocean_genai_sdk._base_client import get_platform + from gradientai._utils import asyncify + from gradientai._base_client import get_platform async def test_main() -> None: result = await asyncify(get_platform)() diff --git a/tests/test_deepcopy.py b/tests/test_deepcopy.py index 317130ef..9d1579a8 100644 --- a/tests/test_deepcopy.py +++ b/tests/test_deepcopy.py @@ -1,4 +1,4 @@ -from digitalocean_genai_sdk._utils import deepcopy_minimal +from gradientai._utils import deepcopy_minimal def assert_different_identities(obj1: object, obj2: object) -> None: diff --git a/tests/test_extract_files.py b/tests/test_extract_files.py index aad87e09..2905d59c 100644 --- a/tests/test_extract_files.py +++ b/tests/test_extract_files.py @@ -4,8 +4,8 @@ import pytest -from digitalocean_genai_sdk._types import FileTypes -from digitalocean_genai_sdk._utils import extract_files +from gradientai._types import FileTypes +from gradientai._utils import extract_files def test_removes_files_from_input() -> None: diff --git a/tests/test_files.py b/tests/test_files.py index f3a07ce0..4a723313 100644 --- a/tests/test_files.py +++ b/tests/test_files.py @@ -4,7 +4,7 @@ import pytest from dirty_equals import IsDict, IsList, IsBytes, IsTuple -from digitalocean_genai_sdk._files import to_httpx_files, async_to_httpx_files +from gradientai._files import to_httpx_files, async_to_httpx_files readme_path = Path(__file__).parent.parent.joinpath("README.md") diff --git a/tests/test_models.py b/tests/test_models.py index 0be34866..28aff1f3 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -7,9 +7,9 @@ import pydantic from pydantic import Field -from digitalocean_genai_sdk._utils import PropertyInfo -from digitalocean_genai_sdk._compat import PYDANTIC_V2, parse_obj, model_dump, model_json -from digitalocean_genai_sdk._models import BaseModel, construct_type +from gradientai._utils import PropertyInfo +from gradientai._compat import PYDANTIC_V2, parse_obj, model_dump, model_json +from gradientai._models import BaseModel, construct_type class BasicModel(BaseModel): diff --git a/tests/test_qs.py b/tests/test_qs.py index 41824698..9080377b 100644 --- a/tests/test_qs.py +++ b/tests/test_qs.py @@ -4,7 +4,7 @@ import pytest -from digitalocean_genai_sdk._qs import Querystring, stringify +from gradientai._qs import Querystring, stringify def test_empty() -> None: diff --git a/tests/test_required_args.py b/tests/test_required_args.py index 379ac794..c4e6b9d8 100644 --- a/tests/test_required_args.py +++ b/tests/test_required_args.py @@ -2,7 +2,7 @@ import pytest -from digitalocean_genai_sdk._utils import required_args +from gradientai._utils import required_args def test_too_many_positional_params() -> None: diff --git a/tests/test_response.py b/tests/test_response.py index 768537aa..1a8f241e 100644 --- a/tests/test_response.py +++ b/tests/test_response.py @@ -6,8 +6,8 @@ import pytest import pydantic -from digitalocean_genai_sdk import BaseModel, DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk._response import ( +from gradientai import BaseModel, GradientAI, AsyncGradientAI +from gradientai._response import ( APIResponse, BaseAPIResponse, AsyncAPIResponse, @@ -15,8 +15,8 @@ AsyncBinaryAPIResponse, extract_response_type, ) -from digitalocean_genai_sdk._streaming import Stream -from digitalocean_genai_sdk._base_client import FinalRequestOptions +from gradientai._streaming import Stream +from gradientai._base_client import FinalRequestOptions class ConcreteBaseAPIResponse(APIResponse[bytes]): ... @@ -37,7 +37,7 @@ def test_extract_response_type_direct_classes() -> None: def test_extract_response_type_direct_class_missing_type_arg() -> None: with pytest.raises( RuntimeError, - match="Expected type to have a type argument at index 0 but it did not", + match="Expected type to have a type argument at index 0 but it did not", ): extract_response_type(AsyncAPIResponse) @@ -56,7 +56,7 @@ def test_extract_response_type_binary_response() -> None: class PydanticModel(pydantic.BaseModel): ... -def test_response_parse_mismatched_basemodel(client: DigitaloceanGenaiSDK) -> None: +def test_response_parse_mismatched_basemodel(client: GradientAI) -> None: response = APIResponse( raw=httpx.Response(200, content=b"foo"), client=client, @@ -68,13 +68,13 @@ def test_response_parse_mismatched_basemodel(client: DigitaloceanGenaiSDK) -> No with pytest.raises( TypeError, - match="Pydantic models must subclass our base model type, e.g. `from digitalocean_genai_sdk import BaseModel`", + match="Pydantic models must subclass our base model type, e.g. `from gradientai import BaseModel`", ): response.parse(to=PydanticModel) @pytest.mark.asyncio -async def test_async_response_parse_mismatched_basemodel(async_client: AsyncDigitaloceanGenaiSDK) -> None: +async def test_async_response_parse_mismatched_basemodel(async_client: AsyncGradientAI) -> None: response = AsyncAPIResponse( raw=httpx.Response(200, content=b"foo"), client=async_client, @@ -86,12 +86,12 @@ async def test_async_response_parse_mismatched_basemodel(async_client: AsyncDigi with pytest.raises( TypeError, - match="Pydantic models must subclass our base model type, e.g. `from digitalocean_genai_sdk import BaseModel`", + match="Pydantic models must subclass our base model type, e.g. `from gradientai import BaseModel`", ): await response.parse(to=PydanticModel) -def test_response_parse_custom_stream(client: DigitaloceanGenaiSDK) -> None: +def test_response_parse_custom_stream(client: GradientAI) -> None: response = APIResponse( raw=httpx.Response(200, content=b"foo"), client=client, @@ -106,7 +106,7 @@ def test_response_parse_custom_stream(client: DigitaloceanGenaiSDK) -> None: @pytest.mark.asyncio -async def test_async_response_parse_custom_stream(async_client: AsyncDigitaloceanGenaiSDK) -> None: +async def test_async_response_parse_custom_stream(async_client: AsyncGradientAI) -> None: response = AsyncAPIResponse( raw=httpx.Response(200, content=b"foo"), client=async_client, @@ -125,7 +125,7 @@ class CustomModel(BaseModel): bar: int -def test_response_parse_custom_model(client: DigitaloceanGenaiSDK) -> None: +def test_response_parse_custom_model(client: GradientAI) -> None: response = APIResponse( raw=httpx.Response(200, content=json.dumps({"foo": "hello!", "bar": 2})), client=client, @@ -141,7 +141,7 @@ def test_response_parse_custom_model(client: DigitaloceanGenaiSDK) -> None: @pytest.mark.asyncio -async def test_async_response_parse_custom_model(async_client: AsyncDigitaloceanGenaiSDK) -> None: +async def test_async_response_parse_custom_model(async_client: AsyncGradientAI) -> None: response = AsyncAPIResponse( raw=httpx.Response(200, content=json.dumps({"foo": "hello!", "bar": 2})), client=async_client, @@ -156,7 +156,7 @@ async def test_async_response_parse_custom_model(async_client: AsyncDigitalocean assert obj.bar == 2 -def test_response_parse_annotated_type(client: DigitaloceanGenaiSDK) -> None: +def test_response_parse_annotated_type(client: GradientAI) -> None: response = APIResponse( raw=httpx.Response(200, content=json.dumps({"foo": "hello!", "bar": 2})), client=client, @@ -173,7 +173,7 @@ def test_response_parse_annotated_type(client: DigitaloceanGenaiSDK) -> None: assert obj.bar == 2 -async def test_async_response_parse_annotated_type(async_client: AsyncDigitaloceanGenaiSDK) -> None: +async def test_async_response_parse_annotated_type(async_client: AsyncGradientAI) -> None: response = AsyncAPIResponse( raw=httpx.Response(200, content=json.dumps({"foo": "hello!", "bar": 2})), client=async_client, @@ -201,7 +201,7 @@ async def test_async_response_parse_annotated_type(async_client: AsyncDigitaloce ("FalSe", False), ], ) -def test_response_parse_bool(client: DigitaloceanGenaiSDK, content: str, expected: bool) -> None: +def test_response_parse_bool(client: GradientAI, content: str, expected: bool) -> None: response = APIResponse( raw=httpx.Response(200, content=content), client=client, @@ -226,7 +226,7 @@ def test_response_parse_bool(client: DigitaloceanGenaiSDK, content: str, expecte ("FalSe", False), ], ) -async def test_async_response_parse_bool(client: AsyncDigitaloceanGenaiSDK, content: str, expected: bool) -> None: +async def test_async_response_parse_bool(client: AsyncGradientAI, content: str, expected: bool) -> None: response = AsyncAPIResponse( raw=httpx.Response(200, content=content), client=client, @@ -245,7 +245,7 @@ class OtherModel(BaseModel): @pytest.mark.parametrize("client", [False], indirect=True) # loose validation -def test_response_parse_expect_model_union_non_json_content(client: DigitaloceanGenaiSDK) -> None: +def test_response_parse_expect_model_union_non_json_content(client: GradientAI) -> None: response = APIResponse( raw=httpx.Response(200, content=b"foo", headers={"Content-Type": "application/text"}), client=client, @@ -262,9 +262,7 @@ def test_response_parse_expect_model_union_non_json_content(client: Digitalocean @pytest.mark.asyncio @pytest.mark.parametrize("async_client", [False], indirect=True) # loose validation -async def test_async_response_parse_expect_model_union_non_json_content( - async_client: AsyncDigitaloceanGenaiSDK, -) -> None: +async def test_async_response_parse_expect_model_union_non_json_content(async_client: AsyncGradientAI) -> None: response = AsyncAPIResponse( raw=httpx.Response(200, content=b"foo", headers={"Content-Type": "application/text"}), client=async_client, diff --git a/tests/test_streaming.py b/tests/test_streaming.py index e707c674..cdb41a77 100644 --- a/tests/test_streaming.py +++ b/tests/test_streaming.py @@ -5,13 +5,13 @@ import httpx import pytest -from digitalocean_genai_sdk import DigitaloceanGenaiSDK, AsyncDigitaloceanGenaiSDK -from digitalocean_genai_sdk._streaming import Stream, AsyncStream, ServerSentEvent +from gradientai import GradientAI, AsyncGradientAI +from gradientai._streaming import Stream, AsyncStream, ServerSentEvent @pytest.mark.asyncio @pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) -async def test_basic(sync: bool, client: DigitaloceanGenaiSDK, async_client: AsyncDigitaloceanGenaiSDK) -> None: +async def test_basic(sync: bool, client: GradientAI, async_client: AsyncGradientAI) -> None: def body() -> Iterator[bytes]: yield b"event: completion\n" yield b'data: {"foo":true}\n' @@ -28,9 +28,7 @@ def body() -> Iterator[bytes]: @pytest.mark.asyncio @pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) -async def test_data_missing_event( - sync: bool, client: DigitaloceanGenaiSDK, async_client: AsyncDigitaloceanGenaiSDK -) -> None: +async def test_data_missing_event(sync: bool, client: GradientAI, async_client: AsyncGradientAI) -> None: def body() -> Iterator[bytes]: yield b'data: {"foo":true}\n' yield b"\n" @@ -46,9 +44,7 @@ def body() -> Iterator[bytes]: @pytest.mark.asyncio @pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) -async def test_event_missing_data( - sync: bool, client: DigitaloceanGenaiSDK, async_client: AsyncDigitaloceanGenaiSDK -) -> None: +async def test_event_missing_data(sync: bool, client: GradientAI, async_client: AsyncGradientAI) -> None: def body() -> Iterator[bytes]: yield b"event: ping\n" yield b"\n" @@ -64,9 +60,7 @@ def body() -> Iterator[bytes]: @pytest.mark.asyncio @pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) -async def test_multiple_events( - sync: bool, client: DigitaloceanGenaiSDK, async_client: AsyncDigitaloceanGenaiSDK -) -> None: +async def test_multiple_events(sync: bool, client: GradientAI, async_client: AsyncGradientAI) -> None: def body() -> Iterator[bytes]: yield b"event: ping\n" yield b"\n" @@ -88,9 +82,7 @@ def body() -> Iterator[bytes]: @pytest.mark.asyncio @pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) -async def test_multiple_events_with_data( - sync: bool, client: DigitaloceanGenaiSDK, async_client: AsyncDigitaloceanGenaiSDK -) -> None: +async def test_multiple_events_with_data(sync: bool, client: GradientAI, async_client: AsyncGradientAI) -> None: def body() -> Iterator[bytes]: yield b"event: ping\n" yield b'data: {"foo":true}\n' @@ -115,7 +107,7 @@ def body() -> Iterator[bytes]: @pytest.mark.asyncio @pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) async def test_multiple_data_lines_with_empty_line( - sync: bool, client: DigitaloceanGenaiSDK, async_client: AsyncDigitaloceanGenaiSDK + sync: bool, client: GradientAI, async_client: AsyncGradientAI ) -> None: def body() -> Iterator[bytes]: yield b"event: ping\n" @@ -138,9 +130,7 @@ def body() -> Iterator[bytes]: @pytest.mark.asyncio @pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) -async def test_data_json_escaped_double_new_line( - sync: bool, client: DigitaloceanGenaiSDK, async_client: AsyncDigitaloceanGenaiSDK -) -> None: +async def test_data_json_escaped_double_new_line(sync: bool, client: GradientAI, async_client: AsyncGradientAI) -> None: def body() -> Iterator[bytes]: yield b"event: ping\n" yield b'data: {"foo": "my long\\n\\ncontent"}' @@ -157,9 +147,7 @@ def body() -> Iterator[bytes]: @pytest.mark.asyncio @pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) -async def test_multiple_data_lines( - sync: bool, client: DigitaloceanGenaiSDK, async_client: AsyncDigitaloceanGenaiSDK -) -> None: +async def test_multiple_data_lines(sync: bool, client: GradientAI, async_client: AsyncGradientAI) -> None: def body() -> Iterator[bytes]: yield b"event: ping\n" yield b"data: {\n" @@ -179,8 +167,8 @@ def body() -> Iterator[bytes]: @pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) async def test_special_new_line_character( sync: bool, - client: DigitaloceanGenaiSDK, - async_client: AsyncDigitaloceanGenaiSDK, + client: GradientAI, + async_client: AsyncGradientAI, ) -> None: def body() -> Iterator[bytes]: yield b'data: {"content":" culpa"}\n' @@ -210,8 +198,8 @@ def body() -> Iterator[bytes]: @pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) async def test_multi_byte_character_multiple_chunks( sync: bool, - client: DigitaloceanGenaiSDK, - async_client: AsyncDigitaloceanGenaiSDK, + client: GradientAI, + async_client: AsyncGradientAI, ) -> None: def body() -> Iterator[bytes]: yield b'data: {"content":"' @@ -251,8 +239,8 @@ def make_event_iterator( content: Iterator[bytes], *, sync: bool, - client: DigitaloceanGenaiSDK, - async_client: AsyncDigitaloceanGenaiSDK, + client: GradientAI, + async_client: AsyncGradientAI, ) -> Iterator[ServerSentEvent] | AsyncIterator[ServerSentEvent]: if sync: return Stream(cast_to=object, client=client, response=httpx.Response(200, content=content))._iter_events() diff --git a/tests/test_transform.py b/tests/test_transform.py index 3c29084e..825fe048 100644 --- a/tests/test_transform.py +++ b/tests/test_transform.py @@ -8,15 +8,15 @@ import pytest -from digitalocean_genai_sdk._types import NOT_GIVEN, Base64FileInput -from digitalocean_genai_sdk._utils import ( +from gradientai._types import NOT_GIVEN, Base64FileInput +from gradientai._utils import ( PropertyInfo, transform as _transform, parse_datetime, async_transform as _async_transform, ) -from digitalocean_genai_sdk._compat import PYDANTIC_V2 -from digitalocean_genai_sdk._models import BaseModel +from gradientai._compat import PYDANTIC_V2 +from gradientai._models import BaseModel _T = TypeVar("_T") diff --git a/tests/test_utils/test_proxy.py b/tests/test_utils/test_proxy.py index 6fe8c808..3856b2c9 100644 --- a/tests/test_utils/test_proxy.py +++ b/tests/test_utils/test_proxy.py @@ -2,7 +2,7 @@ from typing import Any from typing_extensions import override -from digitalocean_genai_sdk._utils import LazyProxy +from gradientai._utils import LazyProxy class RecursiveLazyProxy(LazyProxy[Any]): diff --git a/tests/test_utils/test_typing.py b/tests/test_utils/test_typing.py index 72bf3422..66ad064f 100644 --- a/tests/test_utils/test_typing.py +++ b/tests/test_utils/test_typing.py @@ -2,7 +2,7 @@ from typing import Generic, TypeVar, cast -from digitalocean_genai_sdk._utils import extract_type_var_from_base +from gradientai._utils import extract_type_var_from_base _T = TypeVar("_T") _T2 = TypeVar("_T2") diff --git a/tests/utils.py b/tests/utils.py index e795e2e8..b539ed2c 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -8,8 +8,8 @@ from datetime import date, datetime from typing_extensions import Literal, get_args, get_origin, assert_type -from digitalocean_genai_sdk._types import Omit, NoneType -from digitalocean_genai_sdk._utils import ( +from gradientai._types import Omit, NoneType +from gradientai._utils import ( is_dict, is_list, is_list_type, @@ -18,8 +18,8 @@ is_annotated_type, is_type_alias_type, ) -from digitalocean_genai_sdk._compat import PYDANTIC_V2, field_outer_type, get_model_fields -from digitalocean_genai_sdk._models import BaseModel +from gradientai._compat import PYDANTIC_V2, field_outer_type, get_model_fields +from gradientai._models import BaseModel BaseModelT = TypeVar("BaseModelT", bound=BaseModel) From 8f5761b1d18fb48ad7488e6f0ad771c077eb7961 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 14 Jun 2025 05:13:58 +0000 Subject: [PATCH 04/41] feat(api): update via SDK Studio --- .stats.yml | 6 +- api.md | 7 + src/gradientai/resources/agents/agents.py | 272 ++++++++++++++++++ src/gradientai/types/__init__.py | 13 + src/gradientai/types/agent_create_params.py | 39 +++ src/gradientai/types/agent_create_response.py | 16 ++ src/gradientai/types/agent_list_params.py | 18 ++ src/gradientai/types/agent_list_response.py | 198 +++++++++++++ src/gradientai/types/api_agent.py | 263 +++++++++++++++++ .../types/api_agent_api_key_info.py | 22 ++ src/gradientai/types/api_agreement.py | 17 ++ .../types/api_anthropic_api_key_info.py | 22 ++ src/gradientai/types/api_indexing_job.py | 43 +++ src/gradientai/types/api_knowledge_base.py | 37 +++ src/gradientai/types/api_model.py | 57 ++++ src/gradientai/types/api_model_version.py | 15 + .../types/api_openai_api_key_info.py | 25 ++ tests/api_resources/test_agents.py | 188 ++++++++++++ 18 files changed, 1255 insertions(+), 3 deletions(-) create mode 100644 src/gradientai/types/agent_create_params.py create mode 100644 src/gradientai/types/agent_create_response.py create mode 100644 src/gradientai/types/agent_list_params.py create mode 100644 src/gradientai/types/agent_list_response.py create mode 100644 src/gradientai/types/api_agent.py create mode 100644 src/gradientai/types/api_agent_api_key_info.py create mode 100644 src/gradientai/types/api_agreement.py create mode 100644 src/gradientai/types/api_anthropic_api_key_info.py create mode 100644 src/gradientai/types/api_indexing_job.py create mode 100644 src/gradientai/types/api_knowledge_base.py create mode 100644 src/gradientai/types/api_model.py create mode 100644 src/gradientai/types/api_model_version.py create mode 100644 src/gradientai/types/api_openai_api_key_info.py create mode 100644 tests/api_resources/test_agents.py diff --git a/.stats.yml b/.stats.yml index 652e9eac..1a1a584d 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 6 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-17838dec38ee8475c4bf4695b8dc70fe42a8f4da8ae9ffd415dc895b6628a952.yml -openapi_spec_hash: cfe5453e150989c8a9dbc9d7b4d1f76a +configured_endpoints: 8 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-2b5d132a76e51849a4fdbb2da2818132d1f8208f137acb86ee71e4a5c130154e.yml +openapi_spec_hash: 6c13968b99ef16b717854a096b6ca506 config_hash: 69dc66269416b2e01e8852b5a6788b97 diff --git a/api.md b/api.md index d05dac3c..d39072be 100644 --- a/api.md +++ b/api.md @@ -11,9 +11,16 @@ from gradientai.types import ( APIModel, APIOpenAIAPIKeyInfo, APIRetrievalMethod, + AgentCreateResponse, + AgentListResponse, ) ``` +Methods: + +- client.agents.create(\*\*params) -> AgentCreateResponse +- client.agents.list(\*\*params) -> AgentListResponse + ## Versions Types: diff --git a/src/gradientai/resources/agents/agents.py b/src/gradientai/resources/agents/agents.py index 9896d179..b42dc03c 100644 --- a/src/gradientai/resources/agents/agents.py +++ b/src/gradientai/resources/agents/agents.py @@ -2,6 +2,13 @@ from __future__ import annotations +from typing import List + +import httpx + +from ...types import agent_list_params, agent_create_params +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform from .versions import ( VersionsResource, AsyncVersionsResource, @@ -12,6 +19,15 @@ ) from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.agent_list_response import AgentListResponse +from ...types.agent_create_response import AgentCreateResponse __all__ = ["AgentsResource", "AsyncAgentsResource"] @@ -40,6 +56,120 @@ def with_streaming_response(self) -> AgentsResourceWithStreamingResponse: """ return AgentsResourceWithStreamingResponse(self) + def create( + self, + *, + anthropic_key_uuid: str | NotGiven = NOT_GIVEN, + description: str | NotGiven = NOT_GIVEN, + instruction: str | NotGiven = NOT_GIVEN, + knowledge_base_uuid: List[str] | NotGiven = NOT_GIVEN, + model_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + openai_key_uuid: str | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + region: str | NotGiven = NOT_GIVEN, + tags: List[str] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AgentCreateResponse: + """To create a new agent, send a POST request to `/v2/gen-ai/agents`. + + The response + body contains a JSON object with the newly created agent object. + + Args: + instruction: Agent instruction. Instructions help your agent to perform its job effectively. + See + [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) + for best practices. + + model_uuid: Identifier for the foundation model. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v2/gen-ai/agents", + body=maybe_transform( + { + "anthropic_key_uuid": anthropic_key_uuid, + "description": description, + "instruction": instruction, + "knowledge_base_uuid": knowledge_base_uuid, + "model_uuid": model_uuid, + "name": name, + "openai_key_uuid": openai_key_uuid, + "project_id": project_id, + "region": region, + "tags": tags, + }, + agent_create_params.AgentCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AgentCreateResponse, + ) + + def list( + self, + *, + only_deployed: bool | NotGiven = NOT_GIVEN, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AgentListResponse: + """ + To list all agents, send a GET request to `/v2/gen-ai/agents`. + + Args: + only_deployed: only list agents that are deployed. + + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/gen-ai/agents", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "only_deployed": only_deployed, + "page": page, + "per_page": per_page, + }, + agent_list_params.AgentListParams, + ), + ), + cast_to=AgentListResponse, + ) + class AsyncAgentsResource(AsyncAPIResource): @cached_property @@ -65,11 +195,132 @@ def with_streaming_response(self) -> AsyncAgentsResourceWithStreamingResponse: """ return AsyncAgentsResourceWithStreamingResponse(self) + async def create( + self, + *, + anthropic_key_uuid: str | NotGiven = NOT_GIVEN, + description: str | NotGiven = NOT_GIVEN, + instruction: str | NotGiven = NOT_GIVEN, + knowledge_base_uuid: List[str] | NotGiven = NOT_GIVEN, + model_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + openai_key_uuid: str | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + region: str | NotGiven = NOT_GIVEN, + tags: List[str] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AgentCreateResponse: + """To create a new agent, send a POST request to `/v2/gen-ai/agents`. + + The response + body contains a JSON object with the newly created agent object. + + Args: + instruction: Agent instruction. Instructions help your agent to perform its job effectively. + See + [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) + for best practices. + + model_uuid: Identifier for the foundation model. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v2/gen-ai/agents", + body=await async_maybe_transform( + { + "anthropic_key_uuid": anthropic_key_uuid, + "description": description, + "instruction": instruction, + "knowledge_base_uuid": knowledge_base_uuid, + "model_uuid": model_uuid, + "name": name, + "openai_key_uuid": openai_key_uuid, + "project_id": project_id, + "region": region, + "tags": tags, + }, + agent_create_params.AgentCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AgentCreateResponse, + ) + + async def list( + self, + *, + only_deployed: bool | NotGiven = NOT_GIVEN, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AgentListResponse: + """ + To list all agents, send a GET request to `/v2/gen-ai/agents`. + + Args: + only_deployed: only list agents that are deployed. + + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/gen-ai/agents", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "only_deployed": only_deployed, + "page": page, + "per_page": per_page, + }, + agent_list_params.AgentListParams, + ), + ), + cast_to=AgentListResponse, + ) + class AgentsResourceWithRawResponse: def __init__(self, agents: AgentsResource) -> None: self._agents = agents + self.create = to_raw_response_wrapper( + agents.create, + ) + self.list = to_raw_response_wrapper( + agents.list, + ) + @cached_property def versions(self) -> VersionsResourceWithRawResponse: return VersionsResourceWithRawResponse(self._agents.versions) @@ -79,6 +330,13 @@ class AsyncAgentsResourceWithRawResponse: def __init__(self, agents: AsyncAgentsResource) -> None: self._agents = agents + self.create = async_to_raw_response_wrapper( + agents.create, + ) + self.list = async_to_raw_response_wrapper( + agents.list, + ) + @cached_property def versions(self) -> AsyncVersionsResourceWithRawResponse: return AsyncVersionsResourceWithRawResponse(self._agents.versions) @@ -88,6 +346,13 @@ class AgentsResourceWithStreamingResponse: def __init__(self, agents: AgentsResource) -> None: self._agents = agents + self.create = to_streamed_response_wrapper( + agents.create, + ) + self.list = to_streamed_response_wrapper( + agents.list, + ) + @cached_property def versions(self) -> VersionsResourceWithStreamingResponse: return VersionsResourceWithStreamingResponse(self._agents.versions) @@ -97,6 +362,13 @@ class AsyncAgentsResourceWithStreamingResponse: def __init__(self, agents: AsyncAgentsResource) -> None: self._agents = agents + self.create = async_to_streamed_response_wrapper( + agents.create, + ) + self.list = async_to_streamed_response_wrapper( + agents.list, + ) + @cached_property def versions(self) -> AsyncVersionsResourceWithStreamingResponse: return AsyncVersionsResourceWithStreamingResponse(self._agents.versions) diff --git a/src/gradientai/types/__init__.py b/src/gradientai/types/__init__.py index 7b80eca4..25d7b58d 100644 --- a/src/gradientai/types/__init__.py +++ b/src/gradientai/types/__init__.py @@ -3,11 +3,24 @@ from __future__ import annotations from .model import Model as Model +from .api_agent import APIAgent as APIAgent +from .api_model import APIModel as APIModel +from .api_agreement import APIAgreement as APIAgreement +from .api_indexing_job import APIIndexingJob as APIIndexingJob +from .agent_list_params import AgentListParams as AgentListParams +from .api_model_version import APIModelVersion as APIModelVersion +from .api_knowledge_base import APIKnowledgeBase as APIKnowledgeBase +from .agent_create_params import AgentCreateParams as AgentCreateParams +from .agent_list_response import AgentListResponse as AgentListResponse from .model_list_response import ModelListResponse as ModelListResponse from .api_retrieval_method import APIRetrievalMethod as APIRetrievalMethod +from .agent_create_response import AgentCreateResponse as AgentCreateResponse +from .api_agent_api_key_info import APIAgentAPIKeyInfo as APIAgentAPIKeyInfo +from .api_openai_api_key_info import APIOpenAIAPIKeyInfo as APIOpenAIAPIKeyInfo from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams from .api_deployment_visibility import APIDeploymentVisibility as APIDeploymentVisibility from .embedding_create_response import EmbeddingCreateResponse as EmbeddingCreateResponse +from .api_anthropic_api_key_info import APIAnthropicAPIKeyInfo as APIAnthropicAPIKeyInfo from .chat_completion_token_logprob import ChatCompletionTokenLogprob as ChatCompletionTokenLogprob from .chat_create_completion_params import ChatCreateCompletionParams as ChatCreateCompletionParams from .chat_create_completion_response import ChatCreateCompletionResponse as ChatCreateCompletionResponse diff --git a/src/gradientai/types/agent_create_params.py b/src/gradientai/types/agent_create_params.py new file mode 100644 index 00000000..58b99df7 --- /dev/null +++ b/src/gradientai/types/agent_create_params.py @@ -0,0 +1,39 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Annotated, TypedDict + +from .._utils import PropertyInfo + +__all__ = ["AgentCreateParams"] + + +class AgentCreateParams(TypedDict, total=False): + anthropic_key_uuid: str + + description: str + + instruction: str + """Agent instruction. + + Instructions help your agent to perform its job effectively. See + [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) + for best practices. + """ + + knowledge_base_uuid: List[str] + + model_uuid: str + """Identifier for the foundation model.""" + + name: str + + openai_key_uuid: Annotated[str, PropertyInfo(alias="open_ai_key_uuid")] + + project_id: str + + region: str + + tags: List[str] diff --git a/src/gradientai/types/agent_create_response.py b/src/gradientai/types/agent_create_response.py new file mode 100644 index 00000000..48545fe9 --- /dev/null +++ b/src/gradientai/types/agent_create_response.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional + +from .._models import BaseModel + +__all__ = ["AgentCreateResponse"] + + +class AgentCreateResponse(BaseModel): + agent: Optional["APIAgent"] = None + + +from .api_agent import APIAgent diff --git a/src/gradientai/types/agent_list_params.py b/src/gradientai/types/agent_list_params.py new file mode 100644 index 00000000..e13a10c9 --- /dev/null +++ b/src/gradientai/types/agent_list_params.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["AgentListParams"] + + +class AgentListParams(TypedDict, total=False): + only_deployed: bool + """only list agents that are deployed.""" + + page: int + """page number.""" + + per_page: int + """items per page.""" diff --git a/src/gradientai/types/agent_list_response.py b/src/gradientai/types/agent_list_response.py new file mode 100644 index 00000000..4cedbb39 --- /dev/null +++ b/src/gradientai/types/agent_list_response.py @@ -0,0 +1,198 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime +from typing_extensions import Literal + +from .._models import BaseModel +from .api_model import APIModel +from .agents.api_meta import APIMeta +from .agents.api_links import APILinks +from .api_knowledge_base import APIKnowledgeBase +from .api_retrieval_method import APIRetrievalMethod +from .api_deployment_visibility import APIDeploymentVisibility + +__all__ = [ + "AgentListResponse", + "Agent", + "AgentChatbot", + "AgentChatbotIdentifier", + "AgentDeployment", + "AgentTemplate", + "AgentTemplateGuardrail", +] + + +class AgentChatbot(BaseModel): + button_background_color: Optional[str] = None + + logo: Optional[str] = None + + name: Optional[str] = None + + primary_color: Optional[str] = None + + secondary_color: Optional[str] = None + + starting_message: Optional[str] = None + + +class AgentChatbotIdentifier(BaseModel): + agent_chatbot_identifier: Optional[str] = None + + +class AgentDeployment(BaseModel): + created_at: Optional[datetime] = None + + name: Optional[str] = None + + status: Optional[ + Literal[ + "STATUS_UNKNOWN", + "STATUS_WAITING_FOR_DEPLOYMENT", + "STATUS_DEPLOYING", + "STATUS_RUNNING", + "STATUS_FAILED", + "STATUS_WAITING_FOR_UNDEPLOYMENT", + "STATUS_UNDEPLOYING", + "STATUS_UNDEPLOYMENT_FAILED", + "STATUS_DELETED", + ] + ] = None + + updated_at: Optional[datetime] = None + + url: Optional[str] = None + + uuid: Optional[str] = None + + visibility: Optional[APIDeploymentVisibility] = None + + +class AgentTemplateGuardrail(BaseModel): + priority: Optional[int] = None + + uuid: Optional[str] = None + + +class AgentTemplate(BaseModel): + created_at: Optional[datetime] = None + + description: Optional[str] = None + + guardrails: Optional[List[AgentTemplateGuardrail]] = None + + instruction: Optional[str] = None + + k: Optional[int] = None + + knowledge_bases: Optional[List[APIKnowledgeBase]] = None + + long_description: Optional[str] = None + + max_tokens: Optional[int] = None + + model: Optional[APIModel] = None + + name: Optional[str] = None + + short_description: Optional[str] = None + + summary: Optional[str] = None + + tags: Optional[List[str]] = None + + temperature: Optional[float] = None + + template_type: Optional[Literal["AGENT_TEMPLATE_TYPE_STANDARD", "AGENT_TEMPLATE_TYPE_ONE_CLICK"]] = None + + top_p: Optional[float] = None + + updated_at: Optional[datetime] = None + + uuid: Optional[str] = None + + +class Agent(BaseModel): + chatbot: Optional[AgentChatbot] = None + + chatbot_identifiers: Optional[List[AgentChatbotIdentifier]] = None + + created_at: Optional[datetime] = None + + deployment: Optional[AgentDeployment] = None + + description: Optional[str] = None + + if_case: Optional[str] = None + + instruction: Optional[str] = None + """Agent instruction. + + Instructions help your agent to perform its job effectively. See + [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) + for best practices. + """ + + k: Optional[int] = None + + max_tokens: Optional[int] = None + """ + Specifies the maximum number of tokens the model can process in a single input + or output, set as a number between 1 and 512. This determines the length of each + response. + """ + + model: Optional[APIModel] = None + + name: Optional[str] = None + + project_id: Optional[str] = None + + provide_citations: Optional[bool] = None + + region: Optional[str] = None + + retrieval_method: Optional[APIRetrievalMethod] = None + + route_created_at: Optional[datetime] = None + + route_created_by: Optional[str] = None + + route_name: Optional[str] = None + + route_uuid: Optional[str] = None + + tags: Optional[List[str]] = None + + temperature: Optional[float] = None + """Controls the model’s creativity, specified as a number between 0 and 1. + + Lower values produce more predictable and conservative responses, while higher + values encourage creativity and variation. + """ + + template: Optional[AgentTemplate] = None + + top_p: Optional[float] = None + """ + Defines the cumulative probability threshold for word selection, specified as a + number between 0 and 1. Higher values allow for more diverse outputs, while + lower values ensure focused and coherent responses. + """ + + updated_at: Optional[datetime] = None + + url: Optional[str] = None + + user_id: Optional[str] = None + + uuid: Optional[str] = None + + +class AgentListResponse(BaseModel): + agents: Optional[List[Agent]] = None + + links: Optional[APILinks] = None + + meta: Optional[APIMeta] = None diff --git a/src/gradientai/types/api_agent.py b/src/gradientai/types/api_agent.py new file mode 100644 index 00000000..d6e18ca2 --- /dev/null +++ b/src/gradientai/types/api_agent.py @@ -0,0 +1,263 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Optional +from datetime import datetime +from typing_extensions import Literal + +from .._models import BaseModel +from .api_model import APIModel +from .api_knowledge_base import APIKnowledgeBase +from .api_retrieval_method import APIRetrievalMethod +from .api_agent_api_key_info import APIAgentAPIKeyInfo +from .api_openai_api_key_info import APIOpenAIAPIKeyInfo +from .api_deployment_visibility import APIDeploymentVisibility +from .api_anthropic_api_key_info import APIAnthropicAPIKeyInfo + +__all__ = [ + "APIAgent", + "APIKey", + "Chatbot", + "ChatbotIdentifier", + "Deployment", + "Function", + "Guardrail", + "Template", + "TemplateGuardrail", +] + + +class APIKey(BaseModel): + api_key: Optional[str] = None + + +class Chatbot(BaseModel): + button_background_color: Optional[str] = None + + logo: Optional[str] = None + + name: Optional[str] = None + + primary_color: Optional[str] = None + + secondary_color: Optional[str] = None + + starting_message: Optional[str] = None + + +class ChatbotIdentifier(BaseModel): + agent_chatbot_identifier: Optional[str] = None + + +class Deployment(BaseModel): + created_at: Optional[datetime] = None + + name: Optional[str] = None + + status: Optional[ + Literal[ + "STATUS_UNKNOWN", + "STATUS_WAITING_FOR_DEPLOYMENT", + "STATUS_DEPLOYING", + "STATUS_RUNNING", + "STATUS_FAILED", + "STATUS_WAITING_FOR_UNDEPLOYMENT", + "STATUS_UNDEPLOYING", + "STATUS_UNDEPLOYMENT_FAILED", + "STATUS_DELETED", + ] + ] = None + + updated_at: Optional[datetime] = None + + url: Optional[str] = None + + uuid: Optional[str] = None + + visibility: Optional[APIDeploymentVisibility] = None + + +class Function(BaseModel): + api_key: Optional[str] = None + + created_at: Optional[datetime] = None + + created_by: Optional[str] = None + + description: Optional[str] = None + + faas_name: Optional[str] = None + + faas_namespace: Optional[str] = None + + input_schema: Optional[object] = None + + name: Optional[str] = None + + output_schema: Optional[object] = None + + updated_at: Optional[datetime] = None + + url: Optional[str] = None + + uuid: Optional[str] = None + + +class Guardrail(BaseModel): + agent_uuid: Optional[str] = None + + created_at: Optional[datetime] = None + + default_response: Optional[str] = None + + description: Optional[str] = None + + guardrail_uuid: Optional[str] = None + + is_attached: Optional[bool] = None + + is_default: Optional[bool] = None + + metadata: Optional[object] = None + + name: Optional[str] = None + + priority: Optional[int] = None + + type: Optional[ + Literal[ + "GUARDRAIL_TYPE_UNKNOWN", + "GUARDRAIL_TYPE_JAILBREAK", + "GUARDRAIL_TYPE_SENSITIVE_DATA", + "GUARDRAIL_TYPE_CONTENT_MODERATION", + ] + ] = None + + updated_at: Optional[datetime] = None + + uuid: Optional[str] = None + + +class TemplateGuardrail(BaseModel): + priority: Optional[int] = None + + uuid: Optional[str] = None + + +class Template(BaseModel): + created_at: Optional[datetime] = None + + description: Optional[str] = None + + guardrails: Optional[List[TemplateGuardrail]] = None + + instruction: Optional[str] = None + + k: Optional[int] = None + + knowledge_bases: Optional[List[APIKnowledgeBase]] = None + + long_description: Optional[str] = None + + max_tokens: Optional[int] = None + + model: Optional[APIModel] = None + + name: Optional[str] = None + + short_description: Optional[str] = None + + summary: Optional[str] = None + + tags: Optional[List[str]] = None + + temperature: Optional[float] = None + + template_type: Optional[Literal["AGENT_TEMPLATE_TYPE_STANDARD", "AGENT_TEMPLATE_TYPE_ONE_CLICK"]] = None + + top_p: Optional[float] = None + + updated_at: Optional[datetime] = None + + uuid: Optional[str] = None + + +class APIAgent(BaseModel): + anthropic_api_key: Optional[APIAnthropicAPIKeyInfo] = None + + api_key_infos: Optional[List[APIAgentAPIKeyInfo]] = None + + api_keys: Optional[List[APIKey]] = None + + chatbot: Optional[Chatbot] = None + + chatbot_identifiers: Optional[List[ChatbotIdentifier]] = None + + child_agents: Optional[List["APIAgent"]] = None + + created_at: Optional[datetime] = None + + deployment: Optional[Deployment] = None + + description: Optional[str] = None + + functions: Optional[List[Function]] = None + + guardrails: Optional[List[Guardrail]] = None + + if_case: Optional[str] = None + + instruction: Optional[str] = None + """Agent instruction. + + Instructions help your agent to perform its job effectively. See + [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) + for best practices. + """ + + k: Optional[int] = None + + knowledge_bases: Optional[List[APIKnowledgeBase]] = None + + max_tokens: Optional[int] = None + + model: Optional[APIModel] = None + + name: Optional[str] = None + + openai_api_key: Optional[APIOpenAIAPIKeyInfo] = None + + parent_agents: Optional[List["APIAgent"]] = None + + project_id: Optional[str] = None + + provide_citations: Optional[bool] = None + + region: Optional[str] = None + + retrieval_method: Optional[APIRetrievalMethod] = None + + route_created_at: Optional[datetime] = None + + route_created_by: Optional[str] = None + + route_name: Optional[str] = None + + route_uuid: Optional[str] = None + + tags: Optional[List[str]] = None + + temperature: Optional[float] = None + + template: Optional[Template] = None + + top_p: Optional[float] = None + + updated_at: Optional[datetime] = None + + url: Optional[str] = None + + user_id: Optional[str] = None + + uuid: Optional[str] = None diff --git a/src/gradientai/types/api_agent_api_key_info.py b/src/gradientai/types/api_agent_api_key_info.py new file mode 100644 index 00000000..8dc71564 --- /dev/null +++ b/src/gradientai/types/api_agent_api_key_info.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from datetime import datetime + +from .._models import BaseModel + +__all__ = ["APIAgentAPIKeyInfo"] + + +class APIAgentAPIKeyInfo(BaseModel): + created_at: Optional[datetime] = None + + created_by: Optional[str] = None + + deleted_at: Optional[datetime] = None + + name: Optional[str] = None + + secret_key: Optional[str] = None + + uuid: Optional[str] = None diff --git a/src/gradientai/types/api_agreement.py b/src/gradientai/types/api_agreement.py new file mode 100644 index 00000000..c4359f1f --- /dev/null +++ b/src/gradientai/types/api_agreement.py @@ -0,0 +1,17 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from .._models import BaseModel + +__all__ = ["APIAgreement"] + + +class APIAgreement(BaseModel): + description: Optional[str] = None + + name: Optional[str] = None + + url: Optional[str] = None + + uuid: Optional[str] = None diff --git a/src/gradientai/types/api_anthropic_api_key_info.py b/src/gradientai/types/api_anthropic_api_key_info.py new file mode 100644 index 00000000..e2e04a8a --- /dev/null +++ b/src/gradientai/types/api_anthropic_api_key_info.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from datetime import datetime + +from .._models import BaseModel + +__all__ = ["APIAnthropicAPIKeyInfo"] + + +class APIAnthropicAPIKeyInfo(BaseModel): + created_at: Optional[datetime] = None + + created_by: Optional[str] = None + + deleted_at: Optional[datetime] = None + + name: Optional[str] = None + + updated_at: Optional[datetime] = None + + uuid: Optional[str] = None diff --git a/src/gradientai/types/api_indexing_job.py b/src/gradientai/types/api_indexing_job.py new file mode 100644 index 00000000..f24aac94 --- /dev/null +++ b/src/gradientai/types/api_indexing_job.py @@ -0,0 +1,43 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime +from typing_extensions import Literal + +from .._models import BaseModel + +__all__ = ["APIIndexingJob"] + + +class APIIndexingJob(BaseModel): + completed_datasources: Optional[int] = None + + created_at: Optional[datetime] = None + + data_source_uuids: Optional[List[str]] = None + + finished_at: Optional[datetime] = None + + knowledge_base_uuid: Optional[str] = None + + phase: Optional[ + Literal[ + "BATCH_JOB_PHASE_UNKNOWN", + "BATCH_JOB_PHASE_PENDING", + "BATCH_JOB_PHASE_RUNNING", + "BATCH_JOB_PHASE_SUCCEEDED", + "BATCH_JOB_PHASE_FAILED", + "BATCH_JOB_PHASE_ERROR", + "BATCH_JOB_PHASE_CANCELLED", + ] + ] = None + + started_at: Optional[datetime] = None + + tokens: Optional[int] = None + + total_datasources: Optional[int] = None + + updated_at: Optional[datetime] = None + + uuid: Optional[str] = None diff --git a/src/gradientai/types/api_knowledge_base.py b/src/gradientai/types/api_knowledge_base.py new file mode 100644 index 00000000..5b4b6e2c --- /dev/null +++ b/src/gradientai/types/api_knowledge_base.py @@ -0,0 +1,37 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime + +from .._models import BaseModel +from .api_indexing_job import APIIndexingJob + +__all__ = ["APIKnowledgeBase"] + + +class APIKnowledgeBase(BaseModel): + added_to_agent_at: Optional[datetime] = None + + created_at: Optional[datetime] = None + + database_id: Optional[str] = None + + embedding_model_uuid: Optional[str] = None + + is_public: Optional[bool] = None + + last_indexing_job: Optional[APIIndexingJob] = None + + name: Optional[str] = None + + project_id: Optional[str] = None + + region: Optional[str] = None + + tags: Optional[List[str]] = None + + updated_at: Optional[datetime] = None + + user_id: Optional[str] = None + + uuid: Optional[str] = None diff --git a/src/gradientai/types/api_model.py b/src/gradientai/types/api_model.py new file mode 100644 index 00000000..d680a638 --- /dev/null +++ b/src/gradientai/types/api_model.py @@ -0,0 +1,57 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime +from typing_extensions import Literal + +from .._models import BaseModel +from .api_agreement import APIAgreement +from .api_model_version import APIModelVersion + +__all__ = ["APIModel"] + + +class APIModel(BaseModel): + agreement: Optional[APIAgreement] = None + + created_at: Optional[datetime] = None + + inference_name: Optional[str] = None + + inference_version: Optional[str] = None + + is_foundational: Optional[bool] = None + + metadata: Optional[object] = None + + name: Optional[str] = None + + parent_uuid: Optional[str] = None + + provider: Optional[Literal["MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", "MODEL_PROVIDER_OPENAI"]] = ( + None + ) + + updated_at: Optional[datetime] = None + + upload_complete: Optional[bool] = None + + url: Optional[str] = None + + usecases: Optional[ + List[ + Literal[ + "MODEL_USECASE_UNKNOWN", + "MODEL_USECASE_AGENT", + "MODEL_USECASE_FINETUNED", + "MODEL_USECASE_KNOWLEDGEBASE", + "MODEL_USECASE_GUARDRAIL", + "MODEL_USECASE_REASONING", + "MODEL_USECASE_SERVERLESS", + ] + ] + ] = None + + uuid: Optional[str] = None + + version: Optional[APIModelVersion] = None diff --git a/src/gradientai/types/api_model_version.py b/src/gradientai/types/api_model_version.py new file mode 100644 index 00000000..2e118632 --- /dev/null +++ b/src/gradientai/types/api_model_version.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from .._models import BaseModel + +__all__ = ["APIModelVersion"] + + +class APIModelVersion(BaseModel): + major: Optional[int] = None + + minor: Optional[int] = None + + patch: Optional[int] = None diff --git a/src/gradientai/types/api_openai_api_key_info.py b/src/gradientai/types/api_openai_api_key_info.py new file mode 100644 index 00000000..39328f80 --- /dev/null +++ b/src/gradientai/types/api_openai_api_key_info.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime + +from .._models import BaseModel +from .api_model import APIModel + +__all__ = ["APIOpenAIAPIKeyInfo"] + + +class APIOpenAIAPIKeyInfo(BaseModel): + created_at: Optional[datetime] = None + + created_by: Optional[str] = None + + deleted_at: Optional[datetime] = None + + models: Optional[List[APIModel]] = None + + name: Optional[str] = None + + updated_at: Optional[datetime] = None + + uuid: Optional[str] = None diff --git a/tests/api_resources/test_agents.py b/tests/api_resources/test_agents.py new file mode 100644 index 00000000..d88d4791 --- /dev/null +++ b/tests/api_resources/test_agents.py @@ -0,0 +1,188 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types import AgentListResponse, AgentCreateResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestAgents: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: GradientAI) -> None: + agent = client.agents.create() + assert_matches_type(AgentCreateResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: GradientAI) -> None: + agent = client.agents.create( + anthropic_key_uuid="anthropic_key_uuid", + description="description", + instruction="instruction", + knowledge_base_uuid=["string"], + model_uuid="model_uuid", + name="name", + openai_key_uuid="open_ai_key_uuid", + project_id="project_id", + region="region", + tags=["string"], + ) + assert_matches_type(AgentCreateResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: GradientAI) -> None: + response = client.agents.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + agent = response.parse() + assert_matches_type(AgentCreateResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: GradientAI) -> None: + with client.agents.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + agent = response.parse() + assert_matches_type(AgentCreateResponse, agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + agent = client.agents.list() + assert_matches_type(AgentListResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + agent = client.agents.list( + only_deployed=True, + page=0, + per_page=0, + ) + assert_matches_type(AgentListResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.agents.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + agent = response.parse() + assert_matches_type(AgentListResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.agents.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + agent = response.parse() + assert_matches_type(AgentListResponse, agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncAgents: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncGradientAI) -> None: + agent = await async_client.agents.create() + assert_matches_type(AgentCreateResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + agent = await async_client.agents.create( + anthropic_key_uuid="anthropic_key_uuid", + description="description", + instruction="instruction", + knowledge_base_uuid=["string"], + model_uuid="model_uuid", + name="name", + openai_key_uuid="open_ai_key_uuid", + project_id="project_id", + region="region", + tags=["string"], + ) + assert_matches_type(AgentCreateResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + agent = await response.parse() + assert_matches_type(AgentCreateResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + agent = await response.parse() + assert_matches_type(AgentCreateResponse, agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + agent = await async_client.agents.list() + assert_matches_type(AgentListResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + agent = await async_client.agents.list( + only_deployed=True, + page=0, + per_page=0, + ) + assert_matches_type(AgentListResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + agent = await response.parse() + assert_matches_type(AgentListResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + agent = await response.parse() + assert_matches_type(AgentListResponse, agent, path=["response"]) + + assert cast(Any, response.is_closed) is True From 6254ccf45cbe50ca8191c7149824964f5d00d82f Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 14 Jun 2025 05:14:22 +0000 Subject: [PATCH 05/41] feat(api): update via SDK Studio --- .stats.yml | 6 +- api.md | 13 + src/gradientai/resources/agents/__init__.py | 14 + src/gradientai/resources/agents/agents.py | 32 ++ src/gradientai/resources/agents/api_keys.py | 298 ++++++++++++++++++ src/gradientai/types/agents/__init__.py | 4 + .../types/agents/api_key_create_params.py | 15 + .../types/agents/api_key_create_response.py | 12 + .../types/agents/api_key_list_params.py | 15 + .../types/agents/api_key_list_response.py | 18 ++ tests/api_resources/agents/test_api_keys.py | 230 ++++++++++++++ 11 files changed, 654 insertions(+), 3 deletions(-) create mode 100644 src/gradientai/resources/agents/api_keys.py create mode 100644 src/gradientai/types/agents/api_key_create_params.py create mode 100644 src/gradientai/types/agents/api_key_create_response.py create mode 100644 src/gradientai/types/agents/api_key_list_params.py create mode 100644 src/gradientai/types/agents/api_key_list_response.py create mode 100644 tests/api_resources/agents/test_api_keys.py diff --git a/.stats.yml b/.stats.yml index 1a1a584d..e46abea5 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 8 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-2b5d132a76e51849a4fdbb2da2818132d1f8208f137acb86ee71e4a5c130154e.yml -openapi_spec_hash: 6c13968b99ef16b717854a096b6ca506 +configured_endpoints: 10 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-dfc4c90814a9503f4796d2b0ac258becf67a135292bd57d55545430bbc125770.yml +openapi_spec_hash: 55413c66920b0f073f598043822addb5 config_hash: 69dc66269416b2e01e8852b5a6788b97 diff --git a/api.md b/api.md index d39072be..88ff5fa1 100644 --- a/api.md +++ b/api.md @@ -21,6 +21,19 @@ Methods: - client.agents.create(\*\*params) -> AgentCreateResponse - client.agents.list(\*\*params) -> AgentListResponse +## APIKeys + +Types: + +```python +from gradientai.types.agents import APIKeyCreateResponse, APIKeyListResponse +``` + +Methods: + +- client.agents.api_keys.create(path_agent_uuid, \*\*params) -> APIKeyCreateResponse +- client.agents.api_keys.list(agent_uuid, \*\*params) -> APIKeyListResponse + ## Versions Types: diff --git a/src/gradientai/resources/agents/__init__.py b/src/gradientai/resources/agents/__init__.py index 2ae2658b..a4d7d576 100644 --- a/src/gradientai/resources/agents/__init__.py +++ b/src/gradientai/resources/agents/__init__.py @@ -8,6 +8,14 @@ AgentsResourceWithStreamingResponse, AsyncAgentsResourceWithStreamingResponse, ) +from .api_keys import ( + APIKeysResource, + AsyncAPIKeysResource, + APIKeysResourceWithRawResponse, + AsyncAPIKeysResourceWithRawResponse, + APIKeysResourceWithStreamingResponse, + AsyncAPIKeysResourceWithStreamingResponse, +) from .versions import ( VersionsResource, AsyncVersionsResource, @@ -18,6 +26,12 @@ ) __all__ = [ + "APIKeysResource", + "AsyncAPIKeysResource", + "APIKeysResourceWithRawResponse", + "AsyncAPIKeysResourceWithRawResponse", + "APIKeysResourceWithStreamingResponse", + "AsyncAPIKeysResourceWithStreamingResponse", "VersionsResource", "AsyncVersionsResource", "VersionsResourceWithRawResponse", diff --git a/src/gradientai/resources/agents/agents.py b/src/gradientai/resources/agents/agents.py index b42dc03c..30ae68da 100644 --- a/src/gradientai/resources/agents/agents.py +++ b/src/gradientai/resources/agents/agents.py @@ -9,6 +9,14 @@ from ...types import agent_list_params, agent_create_params from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven from ..._utils import maybe_transform, async_maybe_transform +from .api_keys import ( + APIKeysResource, + AsyncAPIKeysResource, + APIKeysResourceWithRawResponse, + AsyncAPIKeysResourceWithRawResponse, + APIKeysResourceWithStreamingResponse, + AsyncAPIKeysResourceWithStreamingResponse, +) from .versions import ( VersionsResource, AsyncVersionsResource, @@ -33,6 +41,10 @@ class AgentsResource(SyncAPIResource): + @cached_property + def api_keys(self) -> APIKeysResource: + return APIKeysResource(self._client) + @cached_property def versions(self) -> VersionsResource: return VersionsResource(self._client) @@ -172,6 +184,10 @@ def list( class AsyncAgentsResource(AsyncAPIResource): + @cached_property + def api_keys(self) -> AsyncAPIKeysResource: + return AsyncAPIKeysResource(self._client) + @cached_property def versions(self) -> AsyncVersionsResource: return AsyncVersionsResource(self._client) @@ -321,6 +337,10 @@ def __init__(self, agents: AgentsResource) -> None: agents.list, ) + @cached_property + def api_keys(self) -> APIKeysResourceWithRawResponse: + return APIKeysResourceWithRawResponse(self._agents.api_keys) + @cached_property def versions(self) -> VersionsResourceWithRawResponse: return VersionsResourceWithRawResponse(self._agents.versions) @@ -337,6 +357,10 @@ def __init__(self, agents: AsyncAgentsResource) -> None: agents.list, ) + @cached_property + def api_keys(self) -> AsyncAPIKeysResourceWithRawResponse: + return AsyncAPIKeysResourceWithRawResponse(self._agents.api_keys) + @cached_property def versions(self) -> AsyncVersionsResourceWithRawResponse: return AsyncVersionsResourceWithRawResponse(self._agents.versions) @@ -353,6 +377,10 @@ def __init__(self, agents: AgentsResource) -> None: agents.list, ) + @cached_property + def api_keys(self) -> APIKeysResourceWithStreamingResponse: + return APIKeysResourceWithStreamingResponse(self._agents.api_keys) + @cached_property def versions(self) -> VersionsResourceWithStreamingResponse: return VersionsResourceWithStreamingResponse(self._agents.versions) @@ -369,6 +397,10 @@ def __init__(self, agents: AsyncAgentsResource) -> None: agents.list, ) + @cached_property + def api_keys(self) -> AsyncAPIKeysResourceWithStreamingResponse: + return AsyncAPIKeysResourceWithStreamingResponse(self._agents.api_keys) + @cached_property def versions(self) -> AsyncVersionsResourceWithStreamingResponse: return AsyncVersionsResourceWithStreamingResponse(self._agents.versions) diff --git a/src/gradientai/resources/agents/api_keys.py b/src/gradientai/resources/agents/api_keys.py new file mode 100644 index 00000000..ad021d34 --- /dev/null +++ b/src/gradientai/resources/agents/api_keys.py @@ -0,0 +1,298 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.agents import api_key_list_params, api_key_create_params +from ...types.agents.api_key_list_response import APIKeyListResponse +from ...types.agents.api_key_create_response import APIKeyCreateResponse + +__all__ = ["APIKeysResource", "AsyncAPIKeysResource"] + + +class APIKeysResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> APIKeysResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return APIKeysResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> APIKeysResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return APIKeysResourceWithStreamingResponse(self) + + def create( + self, + path_agent_uuid: str, + *, + body_agent_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyCreateResponse: + """ + To create an agent API key, send a POST request to + `/v2/gen-ai/agents/{agent_uuid}/api_keys`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_agent_uuid: + raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") + return self._post( + f"/v2/gen-ai/agents/{path_agent_uuid}/api_keys", + body=maybe_transform( + { + "body_agent_uuid": body_agent_uuid, + "name": name, + }, + api_key_create_params.APIKeyCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKeyCreateResponse, + ) + + def list( + self, + agent_uuid: str, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyListResponse: + """ + To list all agent API keys, send a GET request to + `/v2/gen-ai/agents/{agent_uuid}/api_keys`. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not agent_uuid: + raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") + return self._get( + f"/v2/gen-ai/agents/{agent_uuid}/api_keys", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + api_key_list_params.APIKeyListParams, + ), + ), + cast_to=APIKeyListResponse, + ) + + +class AsyncAPIKeysResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncAPIKeysResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncAPIKeysResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncAPIKeysResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncAPIKeysResourceWithStreamingResponse(self) + + async def create( + self, + path_agent_uuid: str, + *, + body_agent_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyCreateResponse: + """ + To create an agent API key, send a POST request to + `/v2/gen-ai/agents/{agent_uuid}/api_keys`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_agent_uuid: + raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") + return await self._post( + f"/v2/gen-ai/agents/{path_agent_uuid}/api_keys", + body=await async_maybe_transform( + { + "body_agent_uuid": body_agent_uuid, + "name": name, + }, + api_key_create_params.APIKeyCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKeyCreateResponse, + ) + + async def list( + self, + agent_uuid: str, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyListResponse: + """ + To list all agent API keys, send a GET request to + `/v2/gen-ai/agents/{agent_uuid}/api_keys`. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not agent_uuid: + raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") + return await self._get( + f"/v2/gen-ai/agents/{agent_uuid}/api_keys", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + api_key_list_params.APIKeyListParams, + ), + ), + cast_to=APIKeyListResponse, + ) + + +class APIKeysResourceWithRawResponse: + def __init__(self, api_keys: APIKeysResource) -> None: + self._api_keys = api_keys + + self.create = to_raw_response_wrapper( + api_keys.create, + ) + self.list = to_raw_response_wrapper( + api_keys.list, + ) + + +class AsyncAPIKeysResourceWithRawResponse: + def __init__(self, api_keys: AsyncAPIKeysResource) -> None: + self._api_keys = api_keys + + self.create = async_to_raw_response_wrapper( + api_keys.create, + ) + self.list = async_to_raw_response_wrapper( + api_keys.list, + ) + + +class APIKeysResourceWithStreamingResponse: + def __init__(self, api_keys: APIKeysResource) -> None: + self._api_keys = api_keys + + self.create = to_streamed_response_wrapper( + api_keys.create, + ) + self.list = to_streamed_response_wrapper( + api_keys.list, + ) + + +class AsyncAPIKeysResourceWithStreamingResponse: + def __init__(self, api_keys: AsyncAPIKeysResource) -> None: + self._api_keys = api_keys + + self.create = async_to_streamed_response_wrapper( + api_keys.create, + ) + self.list = async_to_streamed_response_wrapper( + api_keys.list, + ) diff --git a/src/gradientai/types/agents/__init__.py b/src/gradientai/types/agents/__init__.py index fdee8834..f07f4bfd 100644 --- a/src/gradientai/types/agents/__init__.py +++ b/src/gradientai/types/agents/__init__.py @@ -4,7 +4,11 @@ from .api_meta import APIMeta as APIMeta from .api_links import APILinks as APILinks +from .api_key_list_params import APIKeyListParams as APIKeyListParams from .version_list_params import VersionListParams as VersionListParams +from .api_key_create_params import APIKeyCreateParams as APIKeyCreateParams +from .api_key_list_response import APIKeyListResponse as APIKeyListResponse from .version_list_response import VersionListResponse as VersionListResponse from .version_update_params import VersionUpdateParams as VersionUpdateParams +from .api_key_create_response import APIKeyCreateResponse as APIKeyCreateResponse from .version_update_response import VersionUpdateResponse as VersionUpdateResponse diff --git a/src/gradientai/types/agents/api_key_create_params.py b/src/gradientai/types/agents/api_key_create_params.py new file mode 100644 index 00000000..c3fc44cd --- /dev/null +++ b/src/gradientai/types/agents/api_key_create_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Annotated, TypedDict + +from ..._utils import PropertyInfo + +__all__ = ["APIKeyCreateParams"] + + +class APIKeyCreateParams(TypedDict, total=False): + body_agent_uuid: Annotated[str, PropertyInfo(alias="agent_uuid")] + + name: str diff --git a/src/gradientai/types/agents/api_key_create_response.py b/src/gradientai/types/agents/api_key_create_response.py new file mode 100644 index 00000000..09689fe7 --- /dev/null +++ b/src/gradientai/types/agents/api_key_create_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from ..api_agent_api_key_info import APIAgentAPIKeyInfo + +__all__ = ["APIKeyCreateResponse"] + + +class APIKeyCreateResponse(BaseModel): + api_key_info: Optional[APIAgentAPIKeyInfo] = None diff --git a/src/gradientai/types/agents/api_key_list_params.py b/src/gradientai/types/agents/api_key_list_params.py new file mode 100644 index 00000000..11da9398 --- /dev/null +++ b/src/gradientai/types/agents/api_key_list_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["APIKeyListParams"] + + +class APIKeyListParams(TypedDict, total=False): + page: int + """page number.""" + + per_page: int + """items per page.""" diff --git a/src/gradientai/types/agents/api_key_list_response.py b/src/gradientai/types/agents/api_key_list_response.py new file mode 100644 index 00000000..eff98649 --- /dev/null +++ b/src/gradientai/types/agents/api_key_list_response.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from .api_meta import APIMeta +from ..._models import BaseModel +from .api_links import APILinks +from ..api_agent_api_key_info import APIAgentAPIKeyInfo + +__all__ = ["APIKeyListResponse"] + + +class APIKeyListResponse(BaseModel): + api_key_infos: Optional[List[APIAgentAPIKeyInfo]] = None + + links: Optional[APILinks] = None + + meta: Optional[APIMeta] = None diff --git a/tests/api_resources/agents/test_api_keys.py b/tests/api_resources/agents/test_api_keys.py new file mode 100644 index 00000000..135fdb21 --- /dev/null +++ b/tests/api_resources/agents/test_api_keys.py @@ -0,0 +1,230 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.agents import APIKeyListResponse, APIKeyCreateResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestAPIKeys: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: GradientAI) -> None: + api_key = client.agents.api_keys.create( + path_agent_uuid="agent_uuid", + ) + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: GradientAI) -> None: + api_key = client.agents.api_keys.create( + path_agent_uuid="agent_uuid", + body_agent_uuid="agent_uuid", + name="name", + ) + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: GradientAI) -> None: + response = client.agents.api_keys.with_raw_response.create( + path_agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = response.parse() + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: GradientAI) -> None: + with client.agents.api_keys.with_streaming_response.create( + path_agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = response.parse() + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_create(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): + client.agents.api_keys.with_raw_response.create( + path_agent_uuid="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + api_key = client.agents.api_keys.list( + agent_uuid="agent_uuid", + ) + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + api_key = client.agents.api_keys.list( + agent_uuid="agent_uuid", + page=0, + per_page=0, + ) + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.agents.api_keys.with_raw_response.list( + agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = response.parse() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.agents.api_keys.with_streaming_response.list( + agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = response.parse() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_list(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): + client.agents.api_keys.with_raw_response.list( + agent_uuid="", + ) + + +class TestAsyncAPIKeys: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncGradientAI) -> None: + api_key = await async_client.agents.api_keys.create( + path_agent_uuid="agent_uuid", + ) + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + api_key = await async_client.agents.api_keys.create( + path_agent_uuid="agent_uuid", + body_agent_uuid="agent_uuid", + name="name", + ) + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.api_keys.with_raw_response.create( + path_agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = await response.parse() + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.api_keys.with_streaming_response.create( + path_agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = await response.parse() + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_create(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): + await async_client.agents.api_keys.with_raw_response.create( + path_agent_uuid="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + api_key = await async_client.agents.api_keys.list( + agent_uuid="agent_uuid", + ) + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + api_key = await async_client.agents.api_keys.list( + agent_uuid="agent_uuid", + page=0, + per_page=0, + ) + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.api_keys.with_raw_response.list( + agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = await response.parse() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.api_keys.with_streaming_response.list( + agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = await response.parse() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_list(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): + await async_client.agents.api_keys.with_raw_response.list( + agent_uuid="", + ) From 8bd264b4b4686ca078bf4eb4b5462f058406df3e Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 14 Jun 2025 05:14:46 +0000 Subject: [PATCH 06/41] feat(api): update via SDK Studio --- .stats.yml | 6 +- api.md | 9 +- src/gradientai/resources/agents/api_keys.py | 198 ++++++++++++++- src/gradientai/types/agents/__init__.py | 3 + .../types/agents/api_key_delete_response.py | 12 + .../types/agents/api_key_update_params.py | 19 ++ .../types/agents/api_key_update_response.py | 12 + tests/api_resources/agents/test_api_keys.py | 239 +++++++++++++++++- 8 files changed, 492 insertions(+), 6 deletions(-) create mode 100644 src/gradientai/types/agents/api_key_delete_response.py create mode 100644 src/gradientai/types/agents/api_key_update_params.py create mode 100644 src/gradientai/types/agents/api_key_update_response.py diff --git a/.stats.yml b/.stats.yml index e46abea5..b93ec388 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 10 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-dfc4c90814a9503f4796d2b0ac258becf67a135292bd57d55545430bbc125770.yml -openapi_spec_hash: 55413c66920b0f073f598043822addb5 +configured_endpoints: 12 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-2bbf73b1efbb5271e264d160d4d802781d18b94df56050565fb0579ba06147bd.yml +openapi_spec_hash: 40cced684005d4713404e1c77f0d194f config_hash: 69dc66269416b2e01e8852b5a6788b97 diff --git a/api.md b/api.md index 88ff5fa1..79cfe708 100644 --- a/api.md +++ b/api.md @@ -26,13 +26,20 @@ Methods: Types: ```python -from gradientai.types.agents import APIKeyCreateResponse, APIKeyListResponse +from gradientai.types.agents import ( + APIKeyCreateResponse, + APIKeyUpdateResponse, + APIKeyListResponse, + APIKeyDeleteResponse, +) ``` Methods: - client.agents.api_keys.create(path_agent_uuid, \*\*params) -> APIKeyCreateResponse +- client.agents.api_keys.update(path_api_key_uuid, \*, path_agent_uuid, \*\*params) -> APIKeyUpdateResponse - client.agents.api_keys.list(agent_uuid, \*\*params) -> APIKeyListResponse +- client.agents.api_keys.delete(api_key_uuid, \*, agent_uuid) -> APIKeyDeleteResponse ## Versions diff --git a/src/gradientai/resources/agents/api_keys.py b/src/gradientai/resources/agents/api_keys.py index ad021d34..51fe4866 100644 --- a/src/gradientai/resources/agents/api_keys.py +++ b/src/gradientai/resources/agents/api_keys.py @@ -15,9 +15,11 @@ async_to_streamed_response_wrapper, ) from ..._base_client import make_request_options -from ...types.agents import api_key_list_params, api_key_create_params +from ...types.agents import api_key_list_params, api_key_create_params, api_key_update_params from ...types.agents.api_key_list_response import APIKeyListResponse from ...types.agents.api_key_create_response import APIKeyCreateResponse +from ...types.agents.api_key_delete_response import APIKeyDeleteResponse +from ...types.agents.api_key_update_response import APIKeyUpdateResponse __all__ = ["APIKeysResource", "AsyncAPIKeysResource"] @@ -85,6 +87,54 @@ def create( cast_to=APIKeyCreateResponse, ) + def update( + self, + path_api_key_uuid: str, + *, + path_agent_uuid: str, + body_agent_uuid: str | NotGiven = NOT_GIVEN, + body_api_key_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyUpdateResponse: + """ + To update an agent API key, send a PUT request to + `/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_agent_uuid: + raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") + if not path_api_key_uuid: + raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") + return self._put( + f"/v2/gen-ai/agents/{path_agent_uuid}/api_keys/{path_api_key_uuid}", + body=maybe_transform( + { + "body_agent_uuid": body_agent_uuid, + "body_api_key_uuid": body_api_key_uuid, + "name": name, + }, + api_key_update_params.APIKeyUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKeyUpdateResponse, + ) + def list( self, agent_uuid: str, @@ -135,6 +185,43 @@ def list( cast_to=APIKeyListResponse, ) + def delete( + self, + api_key_uuid: str, + *, + agent_uuid: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyDeleteResponse: + """ + To delete an API key for an agent, send a DELETE request to + `/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not agent_uuid: + raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return self._delete( + f"/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKeyDeleteResponse, + ) + class AsyncAPIKeysResource(AsyncAPIResource): @cached_property @@ -199,6 +286,54 @@ async def create( cast_to=APIKeyCreateResponse, ) + async def update( + self, + path_api_key_uuid: str, + *, + path_agent_uuid: str, + body_agent_uuid: str | NotGiven = NOT_GIVEN, + body_api_key_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyUpdateResponse: + """ + To update an agent API key, send a PUT request to + `/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_agent_uuid: + raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") + if not path_api_key_uuid: + raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") + return await self._put( + f"/v2/gen-ai/agents/{path_agent_uuid}/api_keys/{path_api_key_uuid}", + body=await async_maybe_transform( + { + "body_agent_uuid": body_agent_uuid, + "body_api_key_uuid": body_api_key_uuid, + "name": name, + }, + api_key_update_params.APIKeyUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKeyUpdateResponse, + ) + async def list( self, agent_uuid: str, @@ -249,6 +384,43 @@ async def list( cast_to=APIKeyListResponse, ) + async def delete( + self, + api_key_uuid: str, + *, + agent_uuid: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyDeleteResponse: + """ + To delete an API key for an agent, send a DELETE request to + `/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not agent_uuid: + raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return await self._delete( + f"/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKeyDeleteResponse, + ) + class APIKeysResourceWithRawResponse: def __init__(self, api_keys: APIKeysResource) -> None: @@ -257,9 +429,15 @@ def __init__(self, api_keys: APIKeysResource) -> None: self.create = to_raw_response_wrapper( api_keys.create, ) + self.update = to_raw_response_wrapper( + api_keys.update, + ) self.list = to_raw_response_wrapper( api_keys.list, ) + self.delete = to_raw_response_wrapper( + api_keys.delete, + ) class AsyncAPIKeysResourceWithRawResponse: @@ -269,9 +447,15 @@ def __init__(self, api_keys: AsyncAPIKeysResource) -> None: self.create = async_to_raw_response_wrapper( api_keys.create, ) + self.update = async_to_raw_response_wrapper( + api_keys.update, + ) self.list = async_to_raw_response_wrapper( api_keys.list, ) + self.delete = async_to_raw_response_wrapper( + api_keys.delete, + ) class APIKeysResourceWithStreamingResponse: @@ -281,9 +465,15 @@ def __init__(self, api_keys: APIKeysResource) -> None: self.create = to_streamed_response_wrapper( api_keys.create, ) + self.update = to_streamed_response_wrapper( + api_keys.update, + ) self.list = to_streamed_response_wrapper( api_keys.list, ) + self.delete = to_streamed_response_wrapper( + api_keys.delete, + ) class AsyncAPIKeysResourceWithStreamingResponse: @@ -293,6 +483,12 @@ def __init__(self, api_keys: AsyncAPIKeysResource) -> None: self.create = async_to_streamed_response_wrapper( api_keys.create, ) + self.update = async_to_streamed_response_wrapper( + api_keys.update, + ) self.list = async_to_streamed_response_wrapper( api_keys.list, ) + self.delete = async_to_streamed_response_wrapper( + api_keys.delete, + ) diff --git a/src/gradientai/types/agents/__init__.py b/src/gradientai/types/agents/__init__.py index f07f4bfd..5bb6e6a9 100644 --- a/src/gradientai/types/agents/__init__.py +++ b/src/gradientai/types/agents/__init__.py @@ -8,7 +8,10 @@ from .version_list_params import VersionListParams as VersionListParams from .api_key_create_params import APIKeyCreateParams as APIKeyCreateParams from .api_key_list_response import APIKeyListResponse as APIKeyListResponse +from .api_key_update_params import APIKeyUpdateParams as APIKeyUpdateParams from .version_list_response import VersionListResponse as VersionListResponse from .version_update_params import VersionUpdateParams as VersionUpdateParams from .api_key_create_response import APIKeyCreateResponse as APIKeyCreateResponse +from .api_key_delete_response import APIKeyDeleteResponse as APIKeyDeleteResponse +from .api_key_update_response import APIKeyUpdateResponse as APIKeyUpdateResponse from .version_update_response import VersionUpdateResponse as VersionUpdateResponse diff --git a/src/gradientai/types/agents/api_key_delete_response.py b/src/gradientai/types/agents/api_key_delete_response.py new file mode 100644 index 00000000..02b03f61 --- /dev/null +++ b/src/gradientai/types/agents/api_key_delete_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from ..api_agent_api_key_info import APIAgentAPIKeyInfo + +__all__ = ["APIKeyDeleteResponse"] + + +class APIKeyDeleteResponse(BaseModel): + api_key_info: Optional[APIAgentAPIKeyInfo] = None diff --git a/src/gradientai/types/agents/api_key_update_params.py b/src/gradientai/types/agents/api_key_update_params.py new file mode 100644 index 00000000..b49ebb38 --- /dev/null +++ b/src/gradientai/types/agents/api_key_update_params.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, Annotated, TypedDict + +from ..._utils import PropertyInfo + +__all__ = ["APIKeyUpdateParams"] + + +class APIKeyUpdateParams(TypedDict, total=False): + path_agent_uuid: Required[Annotated[str, PropertyInfo(alias="agent_uuid")]] + + body_agent_uuid: Annotated[str, PropertyInfo(alias="agent_uuid")] + + body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")] + + name: str diff --git a/src/gradientai/types/agents/api_key_update_response.py b/src/gradientai/types/agents/api_key_update_response.py new file mode 100644 index 00000000..87442329 --- /dev/null +++ b/src/gradientai/types/agents/api_key_update_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from ..api_agent_api_key_info import APIAgentAPIKeyInfo + +__all__ = ["APIKeyUpdateResponse"] + + +class APIKeyUpdateResponse(BaseModel): + api_key_info: Optional[APIAgentAPIKeyInfo] = None diff --git a/tests/api_resources/agents/test_api_keys.py b/tests/api_resources/agents/test_api_keys.py index 135fdb21..3eb348a7 100644 --- a/tests/api_resources/agents/test_api_keys.py +++ b/tests/api_resources/agents/test_api_keys.py @@ -9,7 +9,12 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import APIKeyListResponse, APIKeyCreateResponse +from gradientai.types.agents import ( + APIKeyListResponse, + APIKeyCreateResponse, + APIKeyDeleteResponse, + APIKeyUpdateResponse, +) base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -69,6 +74,70 @@ def test_path_params_create(self, client: GradientAI) -> None: path_agent_uuid="", ) + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: GradientAI) -> None: + api_key = client.agents.api_keys.update( + path_api_key_uuid="api_key_uuid", + path_agent_uuid="agent_uuid", + ) + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params(self, client: GradientAI) -> None: + api_key = client.agents.api_keys.update( + path_api_key_uuid="api_key_uuid", + path_agent_uuid="agent_uuid", + body_agent_uuid="agent_uuid", + body_api_key_uuid="api_key_uuid", + name="name", + ) + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: GradientAI) -> None: + response = client.agents.api_keys.with_raw_response.update( + path_api_key_uuid="api_key_uuid", + path_agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = response.parse() + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: GradientAI) -> None: + with client.agents.api_keys.with_streaming_response.update( + path_api_key_uuid="api_key_uuid", + path_agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = response.parse() + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): + client.agents.api_keys.with_raw_response.update( + path_api_key_uuid="api_key_uuid", + path_agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): + client.agents.api_keys.with_raw_response.update( + path_api_key_uuid="", + path_agent_uuid="agent_uuid", + ) + @pytest.mark.skip() @parametrize def test_method_list(self, client: GradientAI) -> None: @@ -121,6 +190,58 @@ def test_path_params_list(self, client: GradientAI) -> None: agent_uuid="", ) + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: GradientAI) -> None: + api_key = client.agents.api_keys.delete( + api_key_uuid="api_key_uuid", + agent_uuid="agent_uuid", + ) + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: GradientAI) -> None: + response = client.agents.api_keys.with_raw_response.delete( + api_key_uuid="api_key_uuid", + agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = response.parse() + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: GradientAI) -> None: + with client.agents.api_keys.with_streaming_response.delete( + api_key_uuid="api_key_uuid", + agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = response.parse() + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): + client.agents.api_keys.with_raw_response.delete( + api_key_uuid="api_key_uuid", + agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + client.agents.api_keys.with_raw_response.delete( + api_key_uuid="", + agent_uuid="agent_uuid", + ) + class TestAsyncAPIKeys: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @@ -177,6 +298,70 @@ async def test_path_params_create(self, async_client: AsyncGradientAI) -> None: path_agent_uuid="", ) + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncGradientAI) -> None: + api_key = await async_client.agents.api_keys.update( + path_api_key_uuid="api_key_uuid", + path_agent_uuid="agent_uuid", + ) + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: + api_key = await async_client.agents.api_keys.update( + path_api_key_uuid="api_key_uuid", + path_agent_uuid="agent_uuid", + body_agent_uuid="agent_uuid", + body_api_key_uuid="api_key_uuid", + name="name", + ) + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.api_keys.with_raw_response.update( + path_api_key_uuid="api_key_uuid", + path_agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = await response.parse() + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.api_keys.with_streaming_response.update( + path_api_key_uuid="api_key_uuid", + path_agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = await response.parse() + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): + await async_client.agents.api_keys.with_raw_response.update( + path_api_key_uuid="api_key_uuid", + path_agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): + await async_client.agents.api_keys.with_raw_response.update( + path_api_key_uuid="", + path_agent_uuid="agent_uuid", + ) + @pytest.mark.skip() @parametrize async def test_method_list(self, async_client: AsyncGradientAI) -> None: @@ -228,3 +413,55 @@ async def test_path_params_list(self, async_client: AsyncGradientAI) -> None: await async_client.agents.api_keys.with_raw_response.list( agent_uuid="", ) + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + api_key = await async_client.agents.api_keys.delete( + api_key_uuid="api_key_uuid", + agent_uuid="agent_uuid", + ) + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.api_keys.with_raw_response.delete( + api_key_uuid="api_key_uuid", + agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = await response.parse() + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.api_keys.with_streaming_response.delete( + api_key_uuid="api_key_uuid", + agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = await response.parse() + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): + await async_client.agents.api_keys.with_raw_response.delete( + api_key_uuid="api_key_uuid", + agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + await async_client.agents.api_keys.with_raw_response.delete( + api_key_uuid="", + agent_uuid="agent_uuid", + ) From e75adfbd2d035e57ae110a1d78ea40fb116975e5 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 14 Jun 2025 05:15:14 +0000 Subject: [PATCH 07/41] feat(api): update via SDK Studio --- .stats.yml | 6 +- api.md | 14 ++ src/gradientai/resources/agents/__init__.py | 14 ++ src/gradientai/resources/agents/agents.py | 32 +++ src/gradientai/resources/agents/api_keys.py | 87 ++++++++ src/gradientai/resources/agents/functions.py | 205 ++++++++++++++++++ src/gradientai/types/agents/__init__.py | 3 + .../agents/api_key_regenerate_response.py | 12 + .../types/agents/function_create_params.py | 25 +++ .../types/agents/function_create_response.py | 16 ++ tests/api_resources/agents/test_api_keys.py | 105 +++++++++ tests/api_resources/agents/test_functions.py | 136 ++++++++++++ 12 files changed, 652 insertions(+), 3 deletions(-) create mode 100644 src/gradientai/resources/agents/functions.py create mode 100644 src/gradientai/types/agents/api_key_regenerate_response.py create mode 100644 src/gradientai/types/agents/function_create_params.py create mode 100644 src/gradientai/types/agents/function_create_response.py create mode 100644 tests/api_resources/agents/test_functions.py diff --git a/.stats.yml b/.stats.yml index b93ec388..74e07701 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 12 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-2bbf73b1efbb5271e264d160d4d802781d18b94df56050565fb0579ba06147bd.yml -openapi_spec_hash: 40cced684005d4713404e1c77f0d194f +configured_endpoints: 14 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-a98eb68f96d2983dda152d72f9dfe3722ac5dcb60759328fe72858d4e3d16821.yml +openapi_spec_hash: 57506039c91b1054fdd65fe84988f1f0 config_hash: 69dc66269416b2e01e8852b5a6788b97 diff --git a/api.md b/api.md index 79cfe708..cead5153 100644 --- a/api.md +++ b/api.md @@ -31,6 +31,7 @@ from gradientai.types.agents import ( APIKeyUpdateResponse, APIKeyListResponse, APIKeyDeleteResponse, + APIKeyRegenerateResponse, ) ``` @@ -40,6 +41,19 @@ Methods: - client.agents.api_keys.update(path_api_key_uuid, \*, path_agent_uuid, \*\*params) -> APIKeyUpdateResponse - client.agents.api_keys.list(agent_uuid, \*\*params) -> APIKeyListResponse - client.agents.api_keys.delete(api_key_uuid, \*, agent_uuid) -> APIKeyDeleteResponse +- client.agents.api_keys.regenerate(api_key_uuid, \*, agent_uuid) -> APIKeyRegenerateResponse + +## Functions + +Types: + +```python +from gradientai.types.agents import FunctionCreateResponse +``` + +Methods: + +- client.agents.functions.create(path_agent_uuid, \*\*params) -> FunctionCreateResponse ## Versions diff --git a/src/gradientai/resources/agents/__init__.py b/src/gradientai/resources/agents/__init__.py index a4d7d576..5502b6f2 100644 --- a/src/gradientai/resources/agents/__init__.py +++ b/src/gradientai/resources/agents/__init__.py @@ -24,6 +24,14 @@ VersionsResourceWithStreamingResponse, AsyncVersionsResourceWithStreamingResponse, ) +from .functions import ( + FunctionsResource, + AsyncFunctionsResource, + FunctionsResourceWithRawResponse, + AsyncFunctionsResourceWithRawResponse, + FunctionsResourceWithStreamingResponse, + AsyncFunctionsResourceWithStreamingResponse, +) __all__ = [ "APIKeysResource", @@ -32,6 +40,12 @@ "AsyncAPIKeysResourceWithRawResponse", "APIKeysResourceWithStreamingResponse", "AsyncAPIKeysResourceWithStreamingResponse", + "FunctionsResource", + "AsyncFunctionsResource", + "FunctionsResourceWithRawResponse", + "AsyncFunctionsResourceWithRawResponse", + "FunctionsResourceWithStreamingResponse", + "AsyncFunctionsResourceWithStreamingResponse", "VersionsResource", "AsyncVersionsResource", "VersionsResourceWithRawResponse", diff --git a/src/gradientai/resources/agents/agents.py b/src/gradientai/resources/agents/agents.py index 30ae68da..f4490cef 100644 --- a/src/gradientai/resources/agents/agents.py +++ b/src/gradientai/resources/agents/agents.py @@ -26,6 +26,14 @@ AsyncVersionsResourceWithStreamingResponse, ) from ..._compat import cached_property +from .functions import ( + FunctionsResource, + AsyncFunctionsResource, + FunctionsResourceWithRawResponse, + AsyncFunctionsResourceWithRawResponse, + FunctionsResourceWithStreamingResponse, + AsyncFunctionsResourceWithStreamingResponse, +) from ..._resource import SyncAPIResource, AsyncAPIResource from ..._response import ( to_raw_response_wrapper, @@ -45,6 +53,10 @@ class AgentsResource(SyncAPIResource): def api_keys(self) -> APIKeysResource: return APIKeysResource(self._client) + @cached_property + def functions(self) -> FunctionsResource: + return FunctionsResource(self._client) + @cached_property def versions(self) -> VersionsResource: return VersionsResource(self._client) @@ -188,6 +200,10 @@ class AsyncAgentsResource(AsyncAPIResource): def api_keys(self) -> AsyncAPIKeysResource: return AsyncAPIKeysResource(self._client) + @cached_property + def functions(self) -> AsyncFunctionsResource: + return AsyncFunctionsResource(self._client) + @cached_property def versions(self) -> AsyncVersionsResource: return AsyncVersionsResource(self._client) @@ -341,6 +357,10 @@ def __init__(self, agents: AgentsResource) -> None: def api_keys(self) -> APIKeysResourceWithRawResponse: return APIKeysResourceWithRawResponse(self._agents.api_keys) + @cached_property + def functions(self) -> FunctionsResourceWithRawResponse: + return FunctionsResourceWithRawResponse(self._agents.functions) + @cached_property def versions(self) -> VersionsResourceWithRawResponse: return VersionsResourceWithRawResponse(self._agents.versions) @@ -361,6 +381,10 @@ def __init__(self, agents: AsyncAgentsResource) -> None: def api_keys(self) -> AsyncAPIKeysResourceWithRawResponse: return AsyncAPIKeysResourceWithRawResponse(self._agents.api_keys) + @cached_property + def functions(self) -> AsyncFunctionsResourceWithRawResponse: + return AsyncFunctionsResourceWithRawResponse(self._agents.functions) + @cached_property def versions(self) -> AsyncVersionsResourceWithRawResponse: return AsyncVersionsResourceWithRawResponse(self._agents.versions) @@ -381,6 +405,10 @@ def __init__(self, agents: AgentsResource) -> None: def api_keys(self) -> APIKeysResourceWithStreamingResponse: return APIKeysResourceWithStreamingResponse(self._agents.api_keys) + @cached_property + def functions(self) -> FunctionsResourceWithStreamingResponse: + return FunctionsResourceWithStreamingResponse(self._agents.functions) + @cached_property def versions(self) -> VersionsResourceWithStreamingResponse: return VersionsResourceWithStreamingResponse(self._agents.versions) @@ -401,6 +429,10 @@ def __init__(self, agents: AsyncAgentsResource) -> None: def api_keys(self) -> AsyncAPIKeysResourceWithStreamingResponse: return AsyncAPIKeysResourceWithStreamingResponse(self._agents.api_keys) + @cached_property + def functions(self) -> AsyncFunctionsResourceWithStreamingResponse: + return AsyncFunctionsResourceWithStreamingResponse(self._agents.functions) + @cached_property def versions(self) -> AsyncVersionsResourceWithStreamingResponse: return AsyncVersionsResourceWithStreamingResponse(self._agents.versions) diff --git a/src/gradientai/resources/agents/api_keys.py b/src/gradientai/resources/agents/api_keys.py index 51fe4866..7180503f 100644 --- a/src/gradientai/resources/agents/api_keys.py +++ b/src/gradientai/resources/agents/api_keys.py @@ -20,6 +20,7 @@ from ...types.agents.api_key_create_response import APIKeyCreateResponse from ...types.agents.api_key_delete_response import APIKeyDeleteResponse from ...types.agents.api_key_update_response import APIKeyUpdateResponse +from ...types.agents.api_key_regenerate_response import APIKeyRegenerateResponse __all__ = ["APIKeysResource", "AsyncAPIKeysResource"] @@ -222,6 +223,43 @@ def delete( cast_to=APIKeyDeleteResponse, ) + def regenerate( + self, + api_key_uuid: str, + *, + agent_uuid: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyRegenerateResponse: + """ + To regenerate an agent API key, send a PUT request to + `/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}/regenerate`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not agent_uuid: + raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return self._put( + f"/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}/regenerate", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKeyRegenerateResponse, + ) + class AsyncAPIKeysResource(AsyncAPIResource): @cached_property @@ -421,6 +459,43 @@ async def delete( cast_to=APIKeyDeleteResponse, ) + async def regenerate( + self, + api_key_uuid: str, + *, + agent_uuid: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyRegenerateResponse: + """ + To regenerate an agent API key, send a PUT request to + `/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}/regenerate`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not agent_uuid: + raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return await self._put( + f"/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}/regenerate", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKeyRegenerateResponse, + ) + class APIKeysResourceWithRawResponse: def __init__(self, api_keys: APIKeysResource) -> None: @@ -438,6 +513,9 @@ def __init__(self, api_keys: APIKeysResource) -> None: self.delete = to_raw_response_wrapper( api_keys.delete, ) + self.regenerate = to_raw_response_wrapper( + api_keys.regenerate, + ) class AsyncAPIKeysResourceWithRawResponse: @@ -456,6 +534,9 @@ def __init__(self, api_keys: AsyncAPIKeysResource) -> None: self.delete = async_to_raw_response_wrapper( api_keys.delete, ) + self.regenerate = async_to_raw_response_wrapper( + api_keys.regenerate, + ) class APIKeysResourceWithStreamingResponse: @@ -474,6 +555,9 @@ def __init__(self, api_keys: APIKeysResource) -> None: self.delete = to_streamed_response_wrapper( api_keys.delete, ) + self.regenerate = to_streamed_response_wrapper( + api_keys.regenerate, + ) class AsyncAPIKeysResourceWithStreamingResponse: @@ -492,3 +576,6 @@ def __init__(self, api_keys: AsyncAPIKeysResource) -> None: self.delete = async_to_streamed_response_wrapper( api_keys.delete, ) + self.regenerate = async_to_streamed_response_wrapper( + api_keys.regenerate, + ) diff --git a/src/gradientai/resources/agents/functions.py b/src/gradientai/resources/agents/functions.py new file mode 100644 index 00000000..010c0c2c --- /dev/null +++ b/src/gradientai/resources/agents/functions.py @@ -0,0 +1,205 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.agents import function_create_params +from ...types.agents.function_create_response import FunctionCreateResponse + +__all__ = ["FunctionsResource", "AsyncFunctionsResource"] + + +class FunctionsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> FunctionsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return FunctionsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> FunctionsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return FunctionsResourceWithStreamingResponse(self) + + def create( + self, + path_agent_uuid: str, + *, + body_agent_uuid: str | NotGiven = NOT_GIVEN, + description: str | NotGiven = NOT_GIVEN, + faas_name: str | NotGiven = NOT_GIVEN, + faas_namespace: str | NotGiven = NOT_GIVEN, + function_name: str | NotGiven = NOT_GIVEN, + input_schema: object | NotGiven = NOT_GIVEN, + output_schema: object | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FunctionCreateResponse: + """ + To create a function route for an agent, send a POST request to + `/v2/gen-ai/agents/{agent_uuid}/functions`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_agent_uuid: + raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") + return self._post( + f"/v2/gen-ai/agents/{path_agent_uuid}/functions", + body=maybe_transform( + { + "body_agent_uuid": body_agent_uuid, + "description": description, + "faas_name": faas_name, + "faas_namespace": faas_namespace, + "function_name": function_name, + "input_schema": input_schema, + "output_schema": output_schema, + }, + function_create_params.FunctionCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FunctionCreateResponse, + ) + + +class AsyncFunctionsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncFunctionsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncFunctionsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncFunctionsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncFunctionsResourceWithStreamingResponse(self) + + async def create( + self, + path_agent_uuid: str, + *, + body_agent_uuid: str | NotGiven = NOT_GIVEN, + description: str | NotGiven = NOT_GIVEN, + faas_name: str | NotGiven = NOT_GIVEN, + faas_namespace: str | NotGiven = NOT_GIVEN, + function_name: str | NotGiven = NOT_GIVEN, + input_schema: object | NotGiven = NOT_GIVEN, + output_schema: object | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FunctionCreateResponse: + """ + To create a function route for an agent, send a POST request to + `/v2/gen-ai/agents/{agent_uuid}/functions`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_agent_uuid: + raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") + return await self._post( + f"/v2/gen-ai/agents/{path_agent_uuid}/functions", + body=await async_maybe_transform( + { + "body_agent_uuid": body_agent_uuid, + "description": description, + "faas_name": faas_name, + "faas_namespace": faas_namespace, + "function_name": function_name, + "input_schema": input_schema, + "output_schema": output_schema, + }, + function_create_params.FunctionCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FunctionCreateResponse, + ) + + +class FunctionsResourceWithRawResponse: + def __init__(self, functions: FunctionsResource) -> None: + self._functions = functions + + self.create = to_raw_response_wrapper( + functions.create, + ) + + +class AsyncFunctionsResourceWithRawResponse: + def __init__(self, functions: AsyncFunctionsResource) -> None: + self._functions = functions + + self.create = async_to_raw_response_wrapper( + functions.create, + ) + + +class FunctionsResourceWithStreamingResponse: + def __init__(self, functions: FunctionsResource) -> None: + self._functions = functions + + self.create = to_streamed_response_wrapper( + functions.create, + ) + + +class AsyncFunctionsResourceWithStreamingResponse: + def __init__(self, functions: AsyncFunctionsResource) -> None: + self._functions = functions + + self.create = async_to_streamed_response_wrapper( + functions.create, + ) diff --git a/src/gradientai/types/agents/__init__.py b/src/gradientai/types/agents/__init__.py index 5bb6e6a9..0ae9c73c 100644 --- a/src/gradientai/types/agents/__init__.py +++ b/src/gradientai/types/agents/__init__.py @@ -11,7 +11,10 @@ from .api_key_update_params import APIKeyUpdateParams as APIKeyUpdateParams from .version_list_response import VersionListResponse as VersionListResponse from .version_update_params import VersionUpdateParams as VersionUpdateParams +from .function_create_params import FunctionCreateParams as FunctionCreateParams from .api_key_create_response import APIKeyCreateResponse as APIKeyCreateResponse from .api_key_delete_response import APIKeyDeleteResponse as APIKeyDeleteResponse from .api_key_update_response import APIKeyUpdateResponse as APIKeyUpdateResponse from .version_update_response import VersionUpdateResponse as VersionUpdateResponse +from .function_create_response import FunctionCreateResponse as FunctionCreateResponse +from .api_key_regenerate_response import APIKeyRegenerateResponse as APIKeyRegenerateResponse diff --git a/src/gradientai/types/agents/api_key_regenerate_response.py b/src/gradientai/types/agents/api_key_regenerate_response.py new file mode 100644 index 00000000..ea2f761e --- /dev/null +++ b/src/gradientai/types/agents/api_key_regenerate_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from ..api_agent_api_key_info import APIAgentAPIKeyInfo + +__all__ = ["APIKeyRegenerateResponse"] + + +class APIKeyRegenerateResponse(BaseModel): + api_key_info: Optional[APIAgentAPIKeyInfo] = None diff --git a/src/gradientai/types/agents/function_create_params.py b/src/gradientai/types/agents/function_create_params.py new file mode 100644 index 00000000..938fb1d5 --- /dev/null +++ b/src/gradientai/types/agents/function_create_params.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Annotated, TypedDict + +from ..._utils import PropertyInfo + +__all__ = ["FunctionCreateParams"] + + +class FunctionCreateParams(TypedDict, total=False): + body_agent_uuid: Annotated[str, PropertyInfo(alias="agent_uuid")] + + description: str + + faas_name: str + + faas_namespace: str + + function_name: str + + input_schema: object + + output_schema: object diff --git a/src/gradientai/types/agents/function_create_response.py b/src/gradientai/types/agents/function_create_response.py new file mode 100644 index 00000000..82ab984b --- /dev/null +++ b/src/gradientai/types/agents/function_create_response.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["FunctionCreateResponse"] + + +class FunctionCreateResponse(BaseModel): + agent: Optional["APIAgent"] = None + + +from ..api_agent import APIAgent diff --git a/tests/api_resources/agents/test_api_keys.py b/tests/api_resources/agents/test_api_keys.py index 3eb348a7..e8489258 100644 --- a/tests/api_resources/agents/test_api_keys.py +++ b/tests/api_resources/agents/test_api_keys.py @@ -14,6 +14,7 @@ APIKeyCreateResponse, APIKeyDeleteResponse, APIKeyUpdateResponse, + APIKeyRegenerateResponse, ) base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -242,6 +243,58 @@ def test_path_params_delete(self, client: GradientAI) -> None: agent_uuid="agent_uuid", ) + @pytest.mark.skip() + @parametrize + def test_method_regenerate(self, client: GradientAI) -> None: + api_key = client.agents.api_keys.regenerate( + api_key_uuid="api_key_uuid", + agent_uuid="agent_uuid", + ) + assert_matches_type(APIKeyRegenerateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_regenerate(self, client: GradientAI) -> None: + response = client.agents.api_keys.with_raw_response.regenerate( + api_key_uuid="api_key_uuid", + agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = response.parse() + assert_matches_type(APIKeyRegenerateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_regenerate(self, client: GradientAI) -> None: + with client.agents.api_keys.with_streaming_response.regenerate( + api_key_uuid="api_key_uuid", + agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = response.parse() + assert_matches_type(APIKeyRegenerateResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_regenerate(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): + client.agents.api_keys.with_raw_response.regenerate( + api_key_uuid="api_key_uuid", + agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + client.agents.api_keys.with_raw_response.regenerate( + api_key_uuid="", + agent_uuid="agent_uuid", + ) + class TestAsyncAPIKeys: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @@ -465,3 +518,55 @@ async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: api_key_uuid="", agent_uuid="agent_uuid", ) + + @pytest.mark.skip() + @parametrize + async def test_method_regenerate(self, async_client: AsyncGradientAI) -> None: + api_key = await async_client.agents.api_keys.regenerate( + api_key_uuid="api_key_uuid", + agent_uuid="agent_uuid", + ) + assert_matches_type(APIKeyRegenerateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_regenerate(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.api_keys.with_raw_response.regenerate( + api_key_uuid="api_key_uuid", + agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = await response.parse() + assert_matches_type(APIKeyRegenerateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_regenerate(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.api_keys.with_streaming_response.regenerate( + api_key_uuid="api_key_uuid", + agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = await response.parse() + assert_matches_type(APIKeyRegenerateResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_regenerate(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): + await async_client.agents.api_keys.with_raw_response.regenerate( + api_key_uuid="api_key_uuid", + agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + await async_client.agents.api_keys.with_raw_response.regenerate( + api_key_uuid="", + agent_uuid="agent_uuid", + ) diff --git a/tests/api_resources/agents/test_functions.py b/tests/api_resources/agents/test_functions.py new file mode 100644 index 00000000..cb98e0bd --- /dev/null +++ b/tests/api_resources/agents/test_functions.py @@ -0,0 +1,136 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.agents import FunctionCreateResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestFunctions: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: GradientAI) -> None: + function = client.agents.functions.create( + path_agent_uuid="agent_uuid", + ) + assert_matches_type(FunctionCreateResponse, function, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: GradientAI) -> None: + function = client.agents.functions.create( + path_agent_uuid="agent_uuid", + body_agent_uuid="agent_uuid", + description="description", + faas_name="faas_name", + faas_namespace="faas_namespace", + function_name="function_name", + input_schema={}, + output_schema={}, + ) + assert_matches_type(FunctionCreateResponse, function, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: GradientAI) -> None: + response = client.agents.functions.with_raw_response.create( + path_agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + function = response.parse() + assert_matches_type(FunctionCreateResponse, function, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: GradientAI) -> None: + with client.agents.functions.with_streaming_response.create( + path_agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + function = response.parse() + assert_matches_type(FunctionCreateResponse, function, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_create(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): + client.agents.functions.with_raw_response.create( + path_agent_uuid="", + ) + + +class TestAsyncFunctions: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncGradientAI) -> None: + function = await async_client.agents.functions.create( + path_agent_uuid="agent_uuid", + ) + assert_matches_type(FunctionCreateResponse, function, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + function = await async_client.agents.functions.create( + path_agent_uuid="agent_uuid", + body_agent_uuid="agent_uuid", + description="description", + faas_name="faas_name", + faas_namespace="faas_namespace", + function_name="function_name", + input_schema={}, + output_schema={}, + ) + assert_matches_type(FunctionCreateResponse, function, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.functions.with_raw_response.create( + path_agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + function = await response.parse() + assert_matches_type(FunctionCreateResponse, function, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.functions.with_streaming_response.create( + path_agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + function = await response.parse() + assert_matches_type(FunctionCreateResponse, function, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_create(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): + await async_client.agents.functions.with_raw_response.create( + path_agent_uuid="", + ) From 0ccc62cb8ef387e0aaf6784db25d5f99a587e5da Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 14 Jun 2025 05:15:34 +0000 Subject: [PATCH 08/41] feat(api): update via SDK Studio --- .stats.yml | 6 +- api.md | 12 +- src/gradientai/resources/agents/__init__.py | 14 + src/gradientai/resources/agents/agents.py | 32 +++ src/gradientai/resources/agents/functions.py | 218 ++++++++++++++- .../resources/agents/knowledge_bases.py | 165 ++++++++++++ src/gradientai/types/agents/__init__.py | 4 + .../agents/api_link_knowledge_base_output.py | 16 ++ .../types/agents/function_delete_response.py | 16 ++ .../types/agents/function_update_params.py | 29 ++ .../types/agents/function_update_response.py | 16 ++ tests/api_resources/agents/test_functions.py | 248 +++++++++++++++++- .../agents/test_knowledge_bases.py | 106 ++++++++ 13 files changed, 876 insertions(+), 6 deletions(-) create mode 100644 src/gradientai/resources/agents/knowledge_bases.py create mode 100644 src/gradientai/types/agents/api_link_knowledge_base_output.py create mode 100644 src/gradientai/types/agents/function_delete_response.py create mode 100644 src/gradientai/types/agents/function_update_params.py create mode 100644 src/gradientai/types/agents/function_update_response.py create mode 100644 tests/api_resources/agents/test_knowledge_bases.py diff --git a/.stats.yml b/.stats.yml index 74e07701..9743a688 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 14 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-a98eb68f96d2983dda152d72f9dfe3722ac5dcb60759328fe72858d4e3d16821.yml -openapi_spec_hash: 57506039c91b1054fdd65fe84988f1f0 +configured_endpoints: 17 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-4fb25ab4cb2a89b06ad5e071dba45405224808d3208aed937c231003ab6fc5f6.yml +openapi_spec_hash: c41014abe91e4f7205d503900cd31568 config_hash: 69dc66269416b2e01e8852b5a6788b97 diff --git a/api.md b/api.md index cead5153..6a2b6456 100644 --- a/api.md +++ b/api.md @@ -48,12 +48,18 @@ Methods: Types: ```python -from gradientai.types.agents import FunctionCreateResponse +from gradientai.types.agents import ( + FunctionCreateResponse, + FunctionUpdateResponse, + FunctionDeleteResponse, +) ``` Methods: - client.agents.functions.create(path_agent_uuid, \*\*params) -> FunctionCreateResponse +- client.agents.functions.update(path_function_uuid, \*, path_agent_uuid, \*\*params) -> FunctionUpdateResponse +- client.agents.functions.delete(function_uuid, \*, agent_uuid) -> FunctionDeleteResponse ## Versions @@ -76,6 +82,10 @@ Types: from gradientai.types.agents import APILinkKnowledgeBaseOutput ``` +Methods: + +- client.agents.knowledge_bases.attach(agent_uuid) -> APILinkKnowledgeBaseOutput + # IndexingJobs Types: diff --git a/src/gradientai/resources/agents/__init__.py b/src/gradientai/resources/agents/__init__.py index 5502b6f2..5bdea838 100644 --- a/src/gradientai/resources/agents/__init__.py +++ b/src/gradientai/resources/agents/__init__.py @@ -32,6 +32,14 @@ FunctionsResourceWithStreamingResponse, AsyncFunctionsResourceWithStreamingResponse, ) +from .knowledge_bases import ( + KnowledgeBasesResource, + AsyncKnowledgeBasesResource, + KnowledgeBasesResourceWithRawResponse, + AsyncKnowledgeBasesResourceWithRawResponse, + KnowledgeBasesResourceWithStreamingResponse, + AsyncKnowledgeBasesResourceWithStreamingResponse, +) __all__ = [ "APIKeysResource", @@ -52,6 +60,12 @@ "AsyncVersionsResourceWithRawResponse", "VersionsResourceWithStreamingResponse", "AsyncVersionsResourceWithStreamingResponse", + "KnowledgeBasesResource", + "AsyncKnowledgeBasesResource", + "KnowledgeBasesResourceWithRawResponse", + "AsyncKnowledgeBasesResourceWithRawResponse", + "KnowledgeBasesResourceWithStreamingResponse", + "AsyncKnowledgeBasesResourceWithStreamingResponse", "AgentsResource", "AsyncAgentsResource", "AgentsResourceWithRawResponse", diff --git a/src/gradientai/resources/agents/agents.py b/src/gradientai/resources/agents/agents.py index f4490cef..38444f9c 100644 --- a/src/gradientai/resources/agents/agents.py +++ b/src/gradientai/resources/agents/agents.py @@ -42,6 +42,14 @@ async_to_streamed_response_wrapper, ) from ..._base_client import make_request_options +from .knowledge_bases import ( + KnowledgeBasesResource, + AsyncKnowledgeBasesResource, + KnowledgeBasesResourceWithRawResponse, + AsyncKnowledgeBasesResourceWithRawResponse, + KnowledgeBasesResourceWithStreamingResponse, + AsyncKnowledgeBasesResourceWithStreamingResponse, +) from ...types.agent_list_response import AgentListResponse from ...types.agent_create_response import AgentCreateResponse @@ -61,6 +69,10 @@ def functions(self) -> FunctionsResource: def versions(self) -> VersionsResource: return VersionsResource(self._client) + @cached_property + def knowledge_bases(self) -> KnowledgeBasesResource: + return KnowledgeBasesResource(self._client) + @cached_property def with_raw_response(self) -> AgentsResourceWithRawResponse: """ @@ -208,6 +220,10 @@ def functions(self) -> AsyncFunctionsResource: def versions(self) -> AsyncVersionsResource: return AsyncVersionsResource(self._client) + @cached_property + def knowledge_bases(self) -> AsyncKnowledgeBasesResource: + return AsyncKnowledgeBasesResource(self._client) + @cached_property def with_raw_response(self) -> AsyncAgentsResourceWithRawResponse: """ @@ -365,6 +381,10 @@ def functions(self) -> FunctionsResourceWithRawResponse: def versions(self) -> VersionsResourceWithRawResponse: return VersionsResourceWithRawResponse(self._agents.versions) + @cached_property + def knowledge_bases(self) -> KnowledgeBasesResourceWithRawResponse: + return KnowledgeBasesResourceWithRawResponse(self._agents.knowledge_bases) + class AsyncAgentsResourceWithRawResponse: def __init__(self, agents: AsyncAgentsResource) -> None: @@ -389,6 +409,10 @@ def functions(self) -> AsyncFunctionsResourceWithRawResponse: def versions(self) -> AsyncVersionsResourceWithRawResponse: return AsyncVersionsResourceWithRawResponse(self._agents.versions) + @cached_property + def knowledge_bases(self) -> AsyncKnowledgeBasesResourceWithRawResponse: + return AsyncKnowledgeBasesResourceWithRawResponse(self._agents.knowledge_bases) + class AgentsResourceWithStreamingResponse: def __init__(self, agents: AgentsResource) -> None: @@ -413,6 +437,10 @@ def functions(self) -> FunctionsResourceWithStreamingResponse: def versions(self) -> VersionsResourceWithStreamingResponse: return VersionsResourceWithStreamingResponse(self._agents.versions) + @cached_property + def knowledge_bases(self) -> KnowledgeBasesResourceWithStreamingResponse: + return KnowledgeBasesResourceWithStreamingResponse(self._agents.knowledge_bases) + class AsyncAgentsResourceWithStreamingResponse: def __init__(self, agents: AsyncAgentsResource) -> None: @@ -436,3 +464,7 @@ def functions(self) -> AsyncFunctionsResourceWithStreamingResponse: @cached_property def versions(self) -> AsyncVersionsResourceWithStreamingResponse: return AsyncVersionsResourceWithStreamingResponse(self._agents.versions) + + @cached_property + def knowledge_bases(self) -> AsyncKnowledgeBasesResourceWithStreamingResponse: + return AsyncKnowledgeBasesResourceWithStreamingResponse(self._agents.knowledge_bases) diff --git a/src/gradientai/resources/agents/functions.py b/src/gradientai/resources/agents/functions.py index 010c0c2c..6de9b141 100644 --- a/src/gradientai/resources/agents/functions.py +++ b/src/gradientai/resources/agents/functions.py @@ -15,8 +15,10 @@ async_to_streamed_response_wrapper, ) from ..._base_client import make_request_options -from ...types.agents import function_create_params +from ...types.agents import function_create_params, function_update_params from ...types.agents.function_create_response import FunctionCreateResponse +from ...types.agents.function_delete_response import FunctionDeleteResponse +from ...types.agents.function_update_response import FunctionUpdateResponse __all__ = ["FunctionsResource", "AsyncFunctionsResource"] @@ -94,6 +96,101 @@ def create( cast_to=FunctionCreateResponse, ) + def update( + self, + path_function_uuid: str, + *, + path_agent_uuid: str, + body_agent_uuid: str | NotGiven = NOT_GIVEN, + description: str | NotGiven = NOT_GIVEN, + faas_name: str | NotGiven = NOT_GIVEN, + faas_namespace: str | NotGiven = NOT_GIVEN, + function_name: str | NotGiven = NOT_GIVEN, + body_function_uuid: str | NotGiven = NOT_GIVEN, + input_schema: object | NotGiven = NOT_GIVEN, + output_schema: object | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FunctionUpdateResponse: + """ + To update the function route, send a PUT request to + `/v2/gen-ai/agents/{agent_uuid}/functions/{function_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_agent_uuid: + raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") + if not path_function_uuid: + raise ValueError(f"Expected a non-empty value for `path_function_uuid` but received {path_function_uuid!r}") + return self._put( + f"/v2/gen-ai/agents/{path_agent_uuid}/functions/{path_function_uuid}", + body=maybe_transform( + { + "body_agent_uuid": body_agent_uuid, + "description": description, + "faas_name": faas_name, + "faas_namespace": faas_namespace, + "function_name": function_name, + "body_function_uuid": body_function_uuid, + "input_schema": input_schema, + "output_schema": output_schema, + }, + function_update_params.FunctionUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FunctionUpdateResponse, + ) + + def delete( + self, + function_uuid: str, + *, + agent_uuid: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FunctionDeleteResponse: + """ + To delete a function route from an agent, send a DELETE request to + `/v2/gen-ai/agents/{agent_uuid}/functions/{function_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not agent_uuid: + raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") + if not function_uuid: + raise ValueError(f"Expected a non-empty value for `function_uuid` but received {function_uuid!r}") + return self._delete( + f"/v2/gen-ai/agents/{agent_uuid}/functions/{function_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FunctionDeleteResponse, + ) + class AsyncFunctionsResource(AsyncAPIResource): @cached_property @@ -168,6 +265,101 @@ async def create( cast_to=FunctionCreateResponse, ) + async def update( + self, + path_function_uuid: str, + *, + path_agent_uuid: str, + body_agent_uuid: str | NotGiven = NOT_GIVEN, + description: str | NotGiven = NOT_GIVEN, + faas_name: str | NotGiven = NOT_GIVEN, + faas_namespace: str | NotGiven = NOT_GIVEN, + function_name: str | NotGiven = NOT_GIVEN, + body_function_uuid: str | NotGiven = NOT_GIVEN, + input_schema: object | NotGiven = NOT_GIVEN, + output_schema: object | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FunctionUpdateResponse: + """ + To update the function route, send a PUT request to + `/v2/gen-ai/agents/{agent_uuid}/functions/{function_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_agent_uuid: + raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") + if not path_function_uuid: + raise ValueError(f"Expected a non-empty value for `path_function_uuid` but received {path_function_uuid!r}") + return await self._put( + f"/v2/gen-ai/agents/{path_agent_uuid}/functions/{path_function_uuid}", + body=await async_maybe_transform( + { + "body_agent_uuid": body_agent_uuid, + "description": description, + "faas_name": faas_name, + "faas_namespace": faas_namespace, + "function_name": function_name, + "body_function_uuid": body_function_uuid, + "input_schema": input_schema, + "output_schema": output_schema, + }, + function_update_params.FunctionUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FunctionUpdateResponse, + ) + + async def delete( + self, + function_uuid: str, + *, + agent_uuid: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FunctionDeleteResponse: + """ + To delete a function route from an agent, send a DELETE request to + `/v2/gen-ai/agents/{agent_uuid}/functions/{function_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not agent_uuid: + raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") + if not function_uuid: + raise ValueError(f"Expected a non-empty value for `function_uuid` but received {function_uuid!r}") + return await self._delete( + f"/v2/gen-ai/agents/{agent_uuid}/functions/{function_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FunctionDeleteResponse, + ) + class FunctionsResourceWithRawResponse: def __init__(self, functions: FunctionsResource) -> None: @@ -176,6 +368,12 @@ def __init__(self, functions: FunctionsResource) -> None: self.create = to_raw_response_wrapper( functions.create, ) + self.update = to_raw_response_wrapper( + functions.update, + ) + self.delete = to_raw_response_wrapper( + functions.delete, + ) class AsyncFunctionsResourceWithRawResponse: @@ -185,6 +383,12 @@ def __init__(self, functions: AsyncFunctionsResource) -> None: self.create = async_to_raw_response_wrapper( functions.create, ) + self.update = async_to_raw_response_wrapper( + functions.update, + ) + self.delete = async_to_raw_response_wrapper( + functions.delete, + ) class FunctionsResourceWithStreamingResponse: @@ -194,6 +398,12 @@ def __init__(self, functions: FunctionsResource) -> None: self.create = to_streamed_response_wrapper( functions.create, ) + self.update = to_streamed_response_wrapper( + functions.update, + ) + self.delete = to_streamed_response_wrapper( + functions.delete, + ) class AsyncFunctionsResourceWithStreamingResponse: @@ -203,3 +413,9 @@ def __init__(self, functions: AsyncFunctionsResource) -> None: self.create = async_to_streamed_response_wrapper( functions.create, ) + self.update = async_to_streamed_response_wrapper( + functions.update, + ) + self.delete = async_to_streamed_response_wrapper( + functions.delete, + ) diff --git a/src/gradientai/resources/agents/knowledge_bases.py b/src/gradientai/resources/agents/knowledge_bases.py new file mode 100644 index 00000000..3d65228a --- /dev/null +++ b/src/gradientai/resources/agents/knowledge_bases.py @@ -0,0 +1,165 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.agents.api_link_knowledge_base_output import APILinkKnowledgeBaseOutput + +__all__ = ["KnowledgeBasesResource", "AsyncKnowledgeBasesResource"] + + +class KnowledgeBasesResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> KnowledgeBasesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return KnowledgeBasesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> KnowledgeBasesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return KnowledgeBasesResourceWithStreamingResponse(self) + + def attach( + self, + agent_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APILinkKnowledgeBaseOutput: + """ + To attach knowledge bases to an agent, send a POST request to + `/v2/gen-ai/agents/{agent_uuid}/knowledge_bases` + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not agent_uuid: + raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") + return self._post( + f"/v2/gen-ai/agents/{agent_uuid}/knowledge_bases", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APILinkKnowledgeBaseOutput, + ) + + +class AsyncKnowledgeBasesResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncKnowledgeBasesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncKnowledgeBasesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncKnowledgeBasesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncKnowledgeBasesResourceWithStreamingResponse(self) + + async def attach( + self, + agent_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APILinkKnowledgeBaseOutput: + """ + To attach knowledge bases to an agent, send a POST request to + `/v2/gen-ai/agents/{agent_uuid}/knowledge_bases` + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not agent_uuid: + raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") + return await self._post( + f"/v2/gen-ai/agents/{agent_uuid}/knowledge_bases", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APILinkKnowledgeBaseOutput, + ) + + +class KnowledgeBasesResourceWithRawResponse: + def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None: + self._knowledge_bases = knowledge_bases + + self.attach = to_raw_response_wrapper( + knowledge_bases.attach, + ) + + +class AsyncKnowledgeBasesResourceWithRawResponse: + def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None: + self._knowledge_bases = knowledge_bases + + self.attach = async_to_raw_response_wrapper( + knowledge_bases.attach, + ) + + +class KnowledgeBasesResourceWithStreamingResponse: + def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None: + self._knowledge_bases = knowledge_bases + + self.attach = to_streamed_response_wrapper( + knowledge_bases.attach, + ) + + +class AsyncKnowledgeBasesResourceWithStreamingResponse: + def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None: + self._knowledge_bases = knowledge_bases + + self.attach = async_to_streamed_response_wrapper( + knowledge_bases.attach, + ) diff --git a/src/gradientai/types/agents/__init__.py b/src/gradientai/types/agents/__init__.py index 0ae9c73c..2a7a830e 100644 --- a/src/gradientai/types/agents/__init__.py +++ b/src/gradientai/types/agents/__init__.py @@ -12,9 +12,13 @@ from .version_list_response import VersionListResponse as VersionListResponse from .version_update_params import VersionUpdateParams as VersionUpdateParams from .function_create_params import FunctionCreateParams as FunctionCreateParams +from .function_update_params import FunctionUpdateParams as FunctionUpdateParams from .api_key_create_response import APIKeyCreateResponse as APIKeyCreateResponse from .api_key_delete_response import APIKeyDeleteResponse as APIKeyDeleteResponse from .api_key_update_response import APIKeyUpdateResponse as APIKeyUpdateResponse from .version_update_response import VersionUpdateResponse as VersionUpdateResponse from .function_create_response import FunctionCreateResponse as FunctionCreateResponse +from .function_delete_response import FunctionDeleteResponse as FunctionDeleteResponse +from .function_update_response import FunctionUpdateResponse as FunctionUpdateResponse from .api_key_regenerate_response import APIKeyRegenerateResponse as APIKeyRegenerateResponse +from .api_link_knowledge_base_output import APILinkKnowledgeBaseOutput as APILinkKnowledgeBaseOutput diff --git a/src/gradientai/types/agents/api_link_knowledge_base_output.py b/src/gradientai/types/agents/api_link_knowledge_base_output.py new file mode 100644 index 00000000..a38f021b --- /dev/null +++ b/src/gradientai/types/agents/api_link_knowledge_base_output.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["APILinkKnowledgeBaseOutput"] + + +class APILinkKnowledgeBaseOutput(BaseModel): + agent: Optional["APIAgent"] = None + + +from ..api_agent import APIAgent diff --git a/src/gradientai/types/agents/function_delete_response.py b/src/gradientai/types/agents/function_delete_response.py new file mode 100644 index 00000000..678ef62d --- /dev/null +++ b/src/gradientai/types/agents/function_delete_response.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["FunctionDeleteResponse"] + + +class FunctionDeleteResponse(BaseModel): + agent: Optional["APIAgent"] = None + + +from ..api_agent import APIAgent diff --git a/src/gradientai/types/agents/function_update_params.py b/src/gradientai/types/agents/function_update_params.py new file mode 100644 index 00000000..2fa8e8f0 --- /dev/null +++ b/src/gradientai/types/agents/function_update_params.py @@ -0,0 +1,29 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, Annotated, TypedDict + +from ..._utils import PropertyInfo + +__all__ = ["FunctionUpdateParams"] + + +class FunctionUpdateParams(TypedDict, total=False): + path_agent_uuid: Required[Annotated[str, PropertyInfo(alias="agent_uuid")]] + + body_agent_uuid: Annotated[str, PropertyInfo(alias="agent_uuid")] + + description: str + + faas_name: str + + faas_namespace: str + + function_name: str + + body_function_uuid: Annotated[str, PropertyInfo(alias="function_uuid")] + + input_schema: object + + output_schema: object diff --git a/src/gradientai/types/agents/function_update_response.py b/src/gradientai/types/agents/function_update_response.py new file mode 100644 index 00000000..82fc63be --- /dev/null +++ b/src/gradientai/types/agents/function_update_response.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["FunctionUpdateResponse"] + + +class FunctionUpdateResponse(BaseModel): + agent: Optional["APIAgent"] = None + + +from ..api_agent import APIAgent diff --git a/tests/api_resources/agents/test_functions.py b/tests/api_resources/agents/test_functions.py index cb98e0bd..bfb05fa6 100644 --- a/tests/api_resources/agents/test_functions.py +++ b/tests/api_resources/agents/test_functions.py @@ -9,7 +9,11 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import FunctionCreateResponse +from gradientai.types.agents import ( + FunctionCreateResponse, + FunctionDeleteResponse, + FunctionUpdateResponse, +) base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -74,6 +78,127 @@ def test_path_params_create(self, client: GradientAI) -> None: path_agent_uuid="", ) + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: GradientAI) -> None: + function = client.agents.functions.update( + path_function_uuid="function_uuid", + path_agent_uuid="agent_uuid", + ) + assert_matches_type(FunctionUpdateResponse, function, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params(self, client: GradientAI) -> None: + function = client.agents.functions.update( + path_function_uuid="function_uuid", + path_agent_uuid="agent_uuid", + body_agent_uuid="agent_uuid", + description="description", + faas_name="faas_name", + faas_namespace="faas_namespace", + function_name="function_name", + body_function_uuid="function_uuid", + input_schema={}, + output_schema={}, + ) + assert_matches_type(FunctionUpdateResponse, function, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: GradientAI) -> None: + response = client.agents.functions.with_raw_response.update( + path_function_uuid="function_uuid", + path_agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + function = response.parse() + assert_matches_type(FunctionUpdateResponse, function, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: GradientAI) -> None: + with client.agents.functions.with_streaming_response.update( + path_function_uuid="function_uuid", + path_agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + function = response.parse() + assert_matches_type(FunctionUpdateResponse, function, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): + client.agents.functions.with_raw_response.update( + path_function_uuid="function_uuid", + path_agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_function_uuid` but received ''"): + client.agents.functions.with_raw_response.update( + path_function_uuid="", + path_agent_uuid="agent_uuid", + ) + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: GradientAI) -> None: + function = client.agents.functions.delete( + function_uuid="function_uuid", + agent_uuid="agent_uuid", + ) + assert_matches_type(FunctionDeleteResponse, function, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: GradientAI) -> None: + response = client.agents.functions.with_raw_response.delete( + function_uuid="function_uuid", + agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + function = response.parse() + assert_matches_type(FunctionDeleteResponse, function, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: GradientAI) -> None: + with client.agents.functions.with_streaming_response.delete( + function_uuid="function_uuid", + agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + function = response.parse() + assert_matches_type(FunctionDeleteResponse, function, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): + client.agents.functions.with_raw_response.delete( + function_uuid="function_uuid", + agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `function_uuid` but received ''"): + client.agents.functions.with_raw_response.delete( + function_uuid="", + agent_uuid="agent_uuid", + ) + class TestAsyncFunctions: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @@ -134,3 +259,124 @@ async def test_path_params_create(self, async_client: AsyncGradientAI) -> None: await async_client.agents.functions.with_raw_response.create( path_agent_uuid="", ) + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncGradientAI) -> None: + function = await async_client.agents.functions.update( + path_function_uuid="function_uuid", + path_agent_uuid="agent_uuid", + ) + assert_matches_type(FunctionUpdateResponse, function, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: + function = await async_client.agents.functions.update( + path_function_uuid="function_uuid", + path_agent_uuid="agent_uuid", + body_agent_uuid="agent_uuid", + description="description", + faas_name="faas_name", + faas_namespace="faas_namespace", + function_name="function_name", + body_function_uuid="function_uuid", + input_schema={}, + output_schema={}, + ) + assert_matches_type(FunctionUpdateResponse, function, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.functions.with_raw_response.update( + path_function_uuid="function_uuid", + path_agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + function = await response.parse() + assert_matches_type(FunctionUpdateResponse, function, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.functions.with_streaming_response.update( + path_function_uuid="function_uuid", + path_agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + function = await response.parse() + assert_matches_type(FunctionUpdateResponse, function, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): + await async_client.agents.functions.with_raw_response.update( + path_function_uuid="function_uuid", + path_agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_function_uuid` but received ''"): + await async_client.agents.functions.with_raw_response.update( + path_function_uuid="", + path_agent_uuid="agent_uuid", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + function = await async_client.agents.functions.delete( + function_uuid="function_uuid", + agent_uuid="agent_uuid", + ) + assert_matches_type(FunctionDeleteResponse, function, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.functions.with_raw_response.delete( + function_uuid="function_uuid", + agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + function = await response.parse() + assert_matches_type(FunctionDeleteResponse, function, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.functions.with_streaming_response.delete( + function_uuid="function_uuid", + agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + function = await response.parse() + assert_matches_type(FunctionDeleteResponse, function, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): + await async_client.agents.functions.with_raw_response.delete( + function_uuid="function_uuid", + agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `function_uuid` but received ''"): + await async_client.agents.functions.with_raw_response.delete( + function_uuid="", + agent_uuid="agent_uuid", + ) diff --git a/tests/api_resources/agents/test_knowledge_bases.py b/tests/api_resources/agents/test_knowledge_bases.py new file mode 100644 index 00000000..c8b5541d --- /dev/null +++ b/tests/api_resources/agents/test_knowledge_bases.py @@ -0,0 +1,106 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.agents import APILinkKnowledgeBaseOutput + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestKnowledgeBases: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_attach(self, client: GradientAI) -> None: + knowledge_base = client.agents.knowledge_bases.attach( + "agent_uuid", + ) + assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_attach(self, client: GradientAI) -> None: + response = client.agents.knowledge_bases.with_raw_response.attach( + "agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + knowledge_base = response.parse() + assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_attach(self, client: GradientAI) -> None: + with client.agents.knowledge_bases.with_streaming_response.attach( + "agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + knowledge_base = response.parse() + assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_attach(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): + client.agents.knowledge_bases.with_raw_response.attach( + "", + ) + + +class TestAsyncKnowledgeBases: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_attach(self, async_client: AsyncGradientAI) -> None: + knowledge_base = await async_client.agents.knowledge_bases.attach( + "agent_uuid", + ) + assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_attach(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.knowledge_bases.with_raw_response.attach( + "agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + knowledge_base = await response.parse() + assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_attach(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.knowledge_bases.with_streaming_response.attach( + "agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + knowledge_base = await response.parse() + assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_attach(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): + await async_client.agents.knowledge_bases.with_raw_response.attach( + "", + ) From 6d62ab00594d70df0458a0a401f866af15a9298e Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 14 Jun 2025 05:42:56 +0000 Subject: [PATCH 09/41] feat(api): update via SDK Studio --- .stats.yml | 6 +- README.md | 14 +- api.md | 35 +- src/gradientai/_client.py | 76 ++- src/gradientai/resources/__init__.py | 28 + src/gradientai/resources/indexing_jobs.py | 543 ++++++++++++++++++ .../resources/knowledge_bases/__init__.py | 33 ++ .../resources/knowledge_bases/data_sources.py | 319 ++++++++++ .../knowledge_bases/knowledge_bases.py | 378 ++++++++++++ src/gradientai/types/__init__.py | 14 + .../types/indexing_job_create_params.py | 14 + .../types/indexing_job_create_response.py | 12 + .../types/indexing_job_list_params.py | 15 + .../types/indexing_job_list_response.py | 18 + ...xing_job_retrieve_data_sources_response.py | 52 ++ .../types/indexing_job_retrieve_response.py | 12 + .../indexing_job_update_cancel_params.py | 14 + .../indexing_job_update_cancel_response.py | 12 + .../types/knowledge_base_create_params.py | 64 +++ .../types/knowledge_base_create_response.py | 12 + .../types/knowledge_base_list_params.py | 15 + .../types/knowledge_base_list_response.py | 18 + .../types/knowledge_bases/__init__.py | 12 + .../api_file_upload_data_source.py | 15 + .../api_file_upload_data_source_param.py | 15 + .../api_knowledge_base_data_source.py | 35 ++ .../knowledge_bases/api_spaces_data_source.py | 15 + .../api_spaces_data_source_param.py | 15 + .../api_web_crawler_data_source.py | 26 + .../api_web_crawler_data_source_param.py | 25 + .../data_source_create_params.py | 33 ++ .../data_source_create_response.py | 12 + .../data_source_list_params.py | 15 + .../data_source_list_response.py | 18 + .../api_resources/knowledge_bases/__init__.py | 1 + .../knowledge_bases/test_data_sources.py | 269 +++++++++ tests/api_resources/test_indexing_jobs.py | 446 ++++++++++++++ tests/api_resources/test_knowledge_bases.py | 227 ++++++++ 38 files changed, 2867 insertions(+), 16 deletions(-) create mode 100644 src/gradientai/resources/indexing_jobs.py create mode 100644 src/gradientai/resources/knowledge_bases/__init__.py create mode 100644 src/gradientai/resources/knowledge_bases/data_sources.py create mode 100644 src/gradientai/resources/knowledge_bases/knowledge_bases.py create mode 100644 src/gradientai/types/indexing_job_create_params.py create mode 100644 src/gradientai/types/indexing_job_create_response.py create mode 100644 src/gradientai/types/indexing_job_list_params.py create mode 100644 src/gradientai/types/indexing_job_list_response.py create mode 100644 src/gradientai/types/indexing_job_retrieve_data_sources_response.py create mode 100644 src/gradientai/types/indexing_job_retrieve_response.py create mode 100644 src/gradientai/types/indexing_job_update_cancel_params.py create mode 100644 src/gradientai/types/indexing_job_update_cancel_response.py create mode 100644 src/gradientai/types/knowledge_base_create_params.py create mode 100644 src/gradientai/types/knowledge_base_create_response.py create mode 100644 src/gradientai/types/knowledge_base_list_params.py create mode 100644 src/gradientai/types/knowledge_base_list_response.py create mode 100644 src/gradientai/types/knowledge_bases/api_file_upload_data_source.py create mode 100644 src/gradientai/types/knowledge_bases/api_file_upload_data_source_param.py create mode 100644 src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py create mode 100644 src/gradientai/types/knowledge_bases/api_spaces_data_source.py create mode 100644 src/gradientai/types/knowledge_bases/api_spaces_data_source_param.py create mode 100644 src/gradientai/types/knowledge_bases/api_web_crawler_data_source.py create mode 100644 src/gradientai/types/knowledge_bases/api_web_crawler_data_source_param.py create mode 100644 src/gradientai/types/knowledge_bases/data_source_create_params.py create mode 100644 src/gradientai/types/knowledge_bases/data_source_create_response.py create mode 100644 src/gradientai/types/knowledge_bases/data_source_list_params.py create mode 100644 src/gradientai/types/knowledge_bases/data_source_list_response.py create mode 100644 tests/api_resources/knowledge_bases/__init__.py create mode 100644 tests/api_resources/knowledge_bases/test_data_sources.py create mode 100644 tests/api_resources/test_indexing_jobs.py create mode 100644 tests/api_resources/test_knowledge_bases.py diff --git a/.stats.yml b/.stats.yml index 9743a688..8135f5de 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 17 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-4fb25ab4cb2a89b06ad5e071dba45405224808d3208aed937c231003ab6fc5f6.yml -openapi_spec_hash: c41014abe91e4f7205d503900cd31568 +configured_endpoints: 26 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-d8b53b5432334e3c25a01f8afa9cc6bb9213c8deb83721113ac48e0544a45c6a.yml +openapi_spec_hash: f6129f6ab890acc4ce6da26611b8fe67 config_hash: 69dc66269416b2e01e8852b5a6788b97 diff --git a/README.md b/README.md index a6757d3a..10236f18 100644 --- a/README.md +++ b/README.md @@ -90,17 +90,11 @@ from gradientai import GradientAI client = GradientAI() -response = client.chat.create_completion( - messages=[ - { - "content": "string", - "role": "system", - } - ], - model="llama3-8b-instruct", - stream_options={}, +data_source = client.knowledge_bases.data_sources.create( + path_knowledge_base_uuid="knowledge_base_uuid", + aws_data_source={}, ) -print(response.stream_options) +print(data_source.aws_data_source) ``` ## Handling errors diff --git a/api.md b/api.md index 6a2b6456..e8cac919 100644 --- a/api.md +++ b/api.md @@ -91,17 +91,41 @@ Methods: Types: ```python -from gradientai.types import APIIndexingJob +from gradientai.types import ( + APIIndexingJob, + IndexingJobCreateResponse, + IndexingJobRetrieveResponse, + IndexingJobListResponse, + IndexingJobRetrieveDataSourcesResponse, + IndexingJobUpdateCancelResponse, +) ``` +Methods: + +- client.indexing_jobs.create(\*\*params) -> IndexingJobCreateResponse +- client.indexing_jobs.retrieve(uuid) -> IndexingJobRetrieveResponse +- client.indexing_jobs.list(\*\*params) -> IndexingJobListResponse +- client.indexing_jobs.retrieve_data_sources(indexing_job_uuid) -> IndexingJobRetrieveDataSourcesResponse +- client.indexing_jobs.update_cancel(path_uuid, \*\*params) -> IndexingJobUpdateCancelResponse + # KnowledgeBases Types: ```python -from gradientai.types import APIKnowledgeBase +from gradientai.types import ( + APIKnowledgeBase, + KnowledgeBaseCreateResponse, + KnowledgeBaseListResponse, +) ``` +Methods: + +- client.knowledge_bases.create(\*\*params) -> KnowledgeBaseCreateResponse +- client.knowledge_bases.list(\*\*params) -> KnowledgeBaseListResponse + ## DataSources Types: @@ -112,9 +136,16 @@ from gradientai.types.knowledge_bases import ( APIKnowledgeBaseDataSource, APISpacesDataSource, APIWebCrawlerDataSource, + DataSourceCreateResponse, + DataSourceListResponse, ) ``` +Methods: + +- client.knowledge_bases.data_sources.create(path_knowledge_base_uuid, \*\*params) -> DataSourceCreateResponse +- client.knowledge_bases.data_sources.list(knowledge_base_uuid, \*\*params) -> DataSourceListResponse + # APIKeys Types: diff --git a/src/gradientai/_client.py b/src/gradientai/_client.py index b22056ad..ddf7beae 100644 --- a/src/gradientai/_client.py +++ b/src/gradientai/_client.py @@ -31,11 +31,13 @@ ) if TYPE_CHECKING: - from .resources import chat, agents, models, embeddings + from .resources import chat, agents, models, embeddings, indexing_jobs, knowledge_bases from .resources.chat import ChatResource, AsyncChatResource from .resources.models import ModelsResource, AsyncModelsResource from .resources.embeddings import EmbeddingsResource, AsyncEmbeddingsResource from .resources.agents.agents import AgentsResource, AsyncAgentsResource + from .resources.indexing_jobs import IndexingJobsResource, AsyncIndexingJobsResource + from .resources.knowledge_bases.knowledge_bases import KnowledgeBasesResource, AsyncKnowledgeBasesResource __all__ = [ "Timeout", @@ -110,6 +112,18 @@ def agents(self) -> AgentsResource: return AgentsResource(self) + @cached_property + def indexing_jobs(self) -> IndexingJobsResource: + from .resources.indexing_jobs import IndexingJobsResource + + return IndexingJobsResource(self) + + @cached_property + def knowledge_bases(self) -> KnowledgeBasesResource: + from .resources.knowledge_bases import KnowledgeBasesResource + + return KnowledgeBasesResource(self) + @cached_property def chat(self) -> ChatResource: from .resources.chat import ChatResource @@ -302,6 +316,18 @@ def agents(self) -> AsyncAgentsResource: return AsyncAgentsResource(self) + @cached_property + def indexing_jobs(self) -> AsyncIndexingJobsResource: + from .resources.indexing_jobs import AsyncIndexingJobsResource + + return AsyncIndexingJobsResource(self) + + @cached_property + def knowledge_bases(self) -> AsyncKnowledgeBasesResource: + from .resources.knowledge_bases import AsyncKnowledgeBasesResource + + return AsyncKnowledgeBasesResource(self) + @cached_property def chat(self) -> AsyncChatResource: from .resources.chat import AsyncChatResource @@ -445,6 +471,18 @@ def agents(self) -> agents.AgentsResourceWithRawResponse: return AgentsResourceWithRawResponse(self._client.agents) + @cached_property + def indexing_jobs(self) -> indexing_jobs.IndexingJobsResourceWithRawResponse: + from .resources.indexing_jobs import IndexingJobsResourceWithRawResponse + + return IndexingJobsResourceWithRawResponse(self._client.indexing_jobs) + + @cached_property + def knowledge_bases(self) -> knowledge_bases.KnowledgeBasesResourceWithRawResponse: + from .resources.knowledge_bases import KnowledgeBasesResourceWithRawResponse + + return KnowledgeBasesResourceWithRawResponse(self._client.knowledge_bases) + @cached_property def chat(self) -> chat.ChatResourceWithRawResponse: from .resources.chat import ChatResourceWithRawResponse @@ -476,6 +514,18 @@ def agents(self) -> agents.AsyncAgentsResourceWithRawResponse: return AsyncAgentsResourceWithRawResponse(self._client.agents) + @cached_property + def indexing_jobs(self) -> indexing_jobs.AsyncIndexingJobsResourceWithRawResponse: + from .resources.indexing_jobs import AsyncIndexingJobsResourceWithRawResponse + + return AsyncIndexingJobsResourceWithRawResponse(self._client.indexing_jobs) + + @cached_property + def knowledge_bases(self) -> knowledge_bases.AsyncKnowledgeBasesResourceWithRawResponse: + from .resources.knowledge_bases import AsyncKnowledgeBasesResourceWithRawResponse + + return AsyncKnowledgeBasesResourceWithRawResponse(self._client.knowledge_bases) + @cached_property def chat(self) -> chat.AsyncChatResourceWithRawResponse: from .resources.chat import AsyncChatResourceWithRawResponse @@ -507,6 +557,18 @@ def agents(self) -> agents.AgentsResourceWithStreamingResponse: return AgentsResourceWithStreamingResponse(self._client.agents) + @cached_property + def indexing_jobs(self) -> indexing_jobs.IndexingJobsResourceWithStreamingResponse: + from .resources.indexing_jobs import IndexingJobsResourceWithStreamingResponse + + return IndexingJobsResourceWithStreamingResponse(self._client.indexing_jobs) + + @cached_property + def knowledge_bases(self) -> knowledge_bases.KnowledgeBasesResourceWithStreamingResponse: + from .resources.knowledge_bases import KnowledgeBasesResourceWithStreamingResponse + + return KnowledgeBasesResourceWithStreamingResponse(self._client.knowledge_bases) + @cached_property def chat(self) -> chat.ChatResourceWithStreamingResponse: from .resources.chat import ChatResourceWithStreamingResponse @@ -538,6 +600,18 @@ def agents(self) -> agents.AsyncAgentsResourceWithStreamingResponse: return AsyncAgentsResourceWithStreamingResponse(self._client.agents) + @cached_property + def indexing_jobs(self) -> indexing_jobs.AsyncIndexingJobsResourceWithStreamingResponse: + from .resources.indexing_jobs import AsyncIndexingJobsResourceWithStreamingResponse + + return AsyncIndexingJobsResourceWithStreamingResponse(self._client.indexing_jobs) + + @cached_property + def knowledge_bases(self) -> knowledge_bases.AsyncKnowledgeBasesResourceWithStreamingResponse: + from .resources.knowledge_bases import AsyncKnowledgeBasesResourceWithStreamingResponse + + return AsyncKnowledgeBasesResourceWithStreamingResponse(self._client.knowledge_bases) + @cached_property def chat(self) -> chat.AsyncChatResourceWithStreamingResponse: from .resources.chat import AsyncChatResourceWithStreamingResponse diff --git a/src/gradientai/resources/__init__.py b/src/gradientai/resources/__init__.py index 386e2ed6..15e90bdb 100644 --- a/src/gradientai/resources/__init__.py +++ b/src/gradientai/resources/__init__.py @@ -32,6 +32,22 @@ EmbeddingsResourceWithStreamingResponse, AsyncEmbeddingsResourceWithStreamingResponse, ) +from .indexing_jobs import ( + IndexingJobsResource, + AsyncIndexingJobsResource, + IndexingJobsResourceWithRawResponse, + AsyncIndexingJobsResourceWithRawResponse, + IndexingJobsResourceWithStreamingResponse, + AsyncIndexingJobsResourceWithStreamingResponse, +) +from .knowledge_bases import ( + KnowledgeBasesResource, + AsyncKnowledgeBasesResource, + KnowledgeBasesResourceWithRawResponse, + AsyncKnowledgeBasesResourceWithRawResponse, + KnowledgeBasesResourceWithStreamingResponse, + AsyncKnowledgeBasesResourceWithStreamingResponse, +) __all__ = [ "AgentsResource", @@ -40,6 +56,18 @@ "AsyncAgentsResourceWithRawResponse", "AgentsResourceWithStreamingResponse", "AsyncAgentsResourceWithStreamingResponse", + "IndexingJobsResource", + "AsyncIndexingJobsResource", + "IndexingJobsResourceWithRawResponse", + "AsyncIndexingJobsResourceWithRawResponse", + "IndexingJobsResourceWithStreamingResponse", + "AsyncIndexingJobsResourceWithStreamingResponse", + "KnowledgeBasesResource", + "AsyncKnowledgeBasesResource", + "KnowledgeBasesResourceWithRawResponse", + "AsyncKnowledgeBasesResourceWithRawResponse", + "KnowledgeBasesResourceWithStreamingResponse", + "AsyncKnowledgeBasesResourceWithStreamingResponse", "ChatResource", "AsyncChatResource", "ChatResourceWithRawResponse", diff --git a/src/gradientai/resources/indexing_jobs.py b/src/gradientai/resources/indexing_jobs.py new file mode 100644 index 00000000..6647d36c --- /dev/null +++ b/src/gradientai/resources/indexing_jobs.py @@ -0,0 +1,543 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List + +import httpx + +from ..types import indexing_job_list_params, indexing_job_create_params, indexing_job_update_cancel_params +from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from .._utils import maybe_transform, async_maybe_transform +from .._compat import cached_property +from .._resource import SyncAPIResource, AsyncAPIResource +from .._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from .._base_client import make_request_options +from ..types.indexing_job_list_response import IndexingJobListResponse +from ..types.indexing_job_create_response import IndexingJobCreateResponse +from ..types.indexing_job_retrieve_response import IndexingJobRetrieveResponse +from ..types.indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse +from ..types.indexing_job_retrieve_data_sources_response import IndexingJobRetrieveDataSourcesResponse + +__all__ = ["IndexingJobsResource", "AsyncIndexingJobsResource"] + + +class IndexingJobsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> IndexingJobsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return IndexingJobsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> IndexingJobsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return IndexingJobsResourceWithStreamingResponse(self) + + def create( + self, + *, + data_source_uuids: List[str] | NotGiven = NOT_GIVEN, + knowledge_base_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> IndexingJobCreateResponse: + """ + To start an indexing job for a knowledge base, send a POST request to + `/v2/gen-ai/indexing_jobs`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v2/gen-ai/indexing_jobs", + body=maybe_transform( + { + "data_source_uuids": data_source_uuids, + "knowledge_base_uuid": knowledge_base_uuid, + }, + indexing_job_create_params.IndexingJobCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=IndexingJobCreateResponse, + ) + + def retrieve( + self, + uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> IndexingJobRetrieveResponse: + """ + To get status of an indexing Job for a knowledge base, send a GET request to + `/v2/gen-ai/indexing_jobs/{uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return self._get( + f"/v2/gen-ai/indexing_jobs/{uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=IndexingJobRetrieveResponse, + ) + + def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> IndexingJobListResponse: + """ + To list all indexing jobs for a knowledge base, send a GET request to + `/v2/gen-ai/indexing_jobs`. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/gen-ai/indexing_jobs", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + indexing_job_list_params.IndexingJobListParams, + ), + ), + cast_to=IndexingJobListResponse, + ) + + def retrieve_data_sources( + self, + indexing_job_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> IndexingJobRetrieveDataSourcesResponse: + """ + To list all datasources for an indexing job, send a GET request to + `/v2/gen-ai/indexing_jobs/{indexing_job_uuid}/data_sources`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not indexing_job_uuid: + raise ValueError(f"Expected a non-empty value for `indexing_job_uuid` but received {indexing_job_uuid!r}") + return self._get( + f"/v2/gen-ai/indexing_jobs/{indexing_job_uuid}/data_sources", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=IndexingJobRetrieveDataSourcesResponse, + ) + + def update_cancel( + self, + path_uuid: str, + *, + body_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> IndexingJobUpdateCancelResponse: + """ + To cancel an indexing job for a knowledge base, send a PUT request to + `/v2/gen-ai/indexing_jobs/{uuid}/cancel`. + + Args: + body_uuid: A unique identifier for an indexing job. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_uuid: + raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") + return self._put( + f"/v2/gen-ai/indexing_jobs/{path_uuid}/cancel", + body=maybe_transform( + {"body_uuid": body_uuid}, indexing_job_update_cancel_params.IndexingJobUpdateCancelParams + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=IndexingJobUpdateCancelResponse, + ) + + +class AsyncIndexingJobsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncIndexingJobsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncIndexingJobsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncIndexingJobsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncIndexingJobsResourceWithStreamingResponse(self) + + async def create( + self, + *, + data_source_uuids: List[str] | NotGiven = NOT_GIVEN, + knowledge_base_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> IndexingJobCreateResponse: + """ + To start an indexing job for a knowledge base, send a POST request to + `/v2/gen-ai/indexing_jobs`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v2/gen-ai/indexing_jobs", + body=await async_maybe_transform( + { + "data_source_uuids": data_source_uuids, + "knowledge_base_uuid": knowledge_base_uuid, + }, + indexing_job_create_params.IndexingJobCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=IndexingJobCreateResponse, + ) + + async def retrieve( + self, + uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> IndexingJobRetrieveResponse: + """ + To get status of an indexing Job for a knowledge base, send a GET request to + `/v2/gen-ai/indexing_jobs/{uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return await self._get( + f"/v2/gen-ai/indexing_jobs/{uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=IndexingJobRetrieveResponse, + ) + + async def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> IndexingJobListResponse: + """ + To list all indexing jobs for a knowledge base, send a GET request to + `/v2/gen-ai/indexing_jobs`. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/gen-ai/indexing_jobs", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + indexing_job_list_params.IndexingJobListParams, + ), + ), + cast_to=IndexingJobListResponse, + ) + + async def retrieve_data_sources( + self, + indexing_job_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> IndexingJobRetrieveDataSourcesResponse: + """ + To list all datasources for an indexing job, send a GET request to + `/v2/gen-ai/indexing_jobs/{indexing_job_uuid}/data_sources`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not indexing_job_uuid: + raise ValueError(f"Expected a non-empty value for `indexing_job_uuid` but received {indexing_job_uuid!r}") + return await self._get( + f"/v2/gen-ai/indexing_jobs/{indexing_job_uuid}/data_sources", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=IndexingJobRetrieveDataSourcesResponse, + ) + + async def update_cancel( + self, + path_uuid: str, + *, + body_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> IndexingJobUpdateCancelResponse: + """ + To cancel an indexing job for a knowledge base, send a PUT request to + `/v2/gen-ai/indexing_jobs/{uuid}/cancel`. + + Args: + body_uuid: A unique identifier for an indexing job. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_uuid: + raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") + return await self._put( + f"/v2/gen-ai/indexing_jobs/{path_uuid}/cancel", + body=await async_maybe_transform( + {"body_uuid": body_uuid}, indexing_job_update_cancel_params.IndexingJobUpdateCancelParams + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=IndexingJobUpdateCancelResponse, + ) + + +class IndexingJobsResourceWithRawResponse: + def __init__(self, indexing_jobs: IndexingJobsResource) -> None: + self._indexing_jobs = indexing_jobs + + self.create = to_raw_response_wrapper( + indexing_jobs.create, + ) + self.retrieve = to_raw_response_wrapper( + indexing_jobs.retrieve, + ) + self.list = to_raw_response_wrapper( + indexing_jobs.list, + ) + self.retrieve_data_sources = to_raw_response_wrapper( + indexing_jobs.retrieve_data_sources, + ) + self.update_cancel = to_raw_response_wrapper( + indexing_jobs.update_cancel, + ) + + +class AsyncIndexingJobsResourceWithRawResponse: + def __init__(self, indexing_jobs: AsyncIndexingJobsResource) -> None: + self._indexing_jobs = indexing_jobs + + self.create = async_to_raw_response_wrapper( + indexing_jobs.create, + ) + self.retrieve = async_to_raw_response_wrapper( + indexing_jobs.retrieve, + ) + self.list = async_to_raw_response_wrapper( + indexing_jobs.list, + ) + self.retrieve_data_sources = async_to_raw_response_wrapper( + indexing_jobs.retrieve_data_sources, + ) + self.update_cancel = async_to_raw_response_wrapper( + indexing_jobs.update_cancel, + ) + + +class IndexingJobsResourceWithStreamingResponse: + def __init__(self, indexing_jobs: IndexingJobsResource) -> None: + self._indexing_jobs = indexing_jobs + + self.create = to_streamed_response_wrapper( + indexing_jobs.create, + ) + self.retrieve = to_streamed_response_wrapper( + indexing_jobs.retrieve, + ) + self.list = to_streamed_response_wrapper( + indexing_jobs.list, + ) + self.retrieve_data_sources = to_streamed_response_wrapper( + indexing_jobs.retrieve_data_sources, + ) + self.update_cancel = to_streamed_response_wrapper( + indexing_jobs.update_cancel, + ) + + +class AsyncIndexingJobsResourceWithStreamingResponse: + def __init__(self, indexing_jobs: AsyncIndexingJobsResource) -> None: + self._indexing_jobs = indexing_jobs + + self.create = async_to_streamed_response_wrapper( + indexing_jobs.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + indexing_jobs.retrieve, + ) + self.list = async_to_streamed_response_wrapper( + indexing_jobs.list, + ) + self.retrieve_data_sources = async_to_streamed_response_wrapper( + indexing_jobs.retrieve_data_sources, + ) + self.update_cancel = async_to_streamed_response_wrapper( + indexing_jobs.update_cancel, + ) diff --git a/src/gradientai/resources/knowledge_bases/__init__.py b/src/gradientai/resources/knowledge_bases/__init__.py new file mode 100644 index 00000000..03d143e2 --- /dev/null +++ b/src/gradientai/resources/knowledge_bases/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .data_sources import ( + DataSourcesResource, + AsyncDataSourcesResource, + DataSourcesResourceWithRawResponse, + AsyncDataSourcesResourceWithRawResponse, + DataSourcesResourceWithStreamingResponse, + AsyncDataSourcesResourceWithStreamingResponse, +) +from .knowledge_bases import ( + KnowledgeBasesResource, + AsyncKnowledgeBasesResource, + KnowledgeBasesResourceWithRawResponse, + AsyncKnowledgeBasesResourceWithRawResponse, + KnowledgeBasesResourceWithStreamingResponse, + AsyncKnowledgeBasesResourceWithStreamingResponse, +) + +__all__ = [ + "DataSourcesResource", + "AsyncDataSourcesResource", + "DataSourcesResourceWithRawResponse", + "AsyncDataSourcesResourceWithRawResponse", + "DataSourcesResourceWithStreamingResponse", + "AsyncDataSourcesResourceWithStreamingResponse", + "KnowledgeBasesResource", + "AsyncKnowledgeBasesResource", + "KnowledgeBasesResourceWithRawResponse", + "AsyncKnowledgeBasesResourceWithRawResponse", + "KnowledgeBasesResourceWithStreamingResponse", + "AsyncKnowledgeBasesResourceWithStreamingResponse", +] diff --git a/src/gradientai/resources/knowledge_bases/data_sources.py b/src/gradientai/resources/knowledge_bases/data_sources.py new file mode 100644 index 00000000..21bde932 --- /dev/null +++ b/src/gradientai/resources/knowledge_bases/data_sources.py @@ -0,0 +1,319 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.knowledge_bases import ( + data_source_list_params, + data_source_create_params, +) +from ...types.knowledge_bases.data_source_list_response import DataSourceListResponse +from ...types.knowledge_bases.data_source_create_response import DataSourceCreateResponse +from ...types.knowledge_bases.api_spaces_data_source_param import APISpacesDataSourceParam +from ...types.knowledge_bases.api_web_crawler_data_source_param import APIWebCrawlerDataSourceParam + +__all__ = ["DataSourcesResource", "AsyncDataSourcesResource"] + + +class DataSourcesResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> DataSourcesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return DataSourcesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> DataSourcesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return DataSourcesResourceWithStreamingResponse(self) + + def create( + self, + path_knowledge_base_uuid: str, + *, + aws_data_source: data_source_create_params.AwsDataSource | NotGiven = NOT_GIVEN, + body_knowledge_base_uuid: str | NotGiven = NOT_GIVEN, + spaces_data_source: APISpacesDataSourceParam | NotGiven = NOT_GIVEN, + web_crawler_data_source: APIWebCrawlerDataSourceParam | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> DataSourceCreateResponse: + """ + To add a data source to a knowledge base, send a POST request to + `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_knowledge_base_uuid: + raise ValueError( + f"Expected a non-empty value for `path_knowledge_base_uuid` but received {path_knowledge_base_uuid!r}" + ) + return self._post( + f"/v2/gen-ai/knowledge_bases/{path_knowledge_base_uuid}/data_sources", + body=maybe_transform( + { + "aws_data_source": aws_data_source, + "body_knowledge_base_uuid": body_knowledge_base_uuid, + "spaces_data_source": spaces_data_source, + "web_crawler_data_source": web_crawler_data_source, + }, + data_source_create_params.DataSourceCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=DataSourceCreateResponse, + ) + + def list( + self, + knowledge_base_uuid: str, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> DataSourceListResponse: + """ + To list all data sources for a knowledge base, send a GET request to + `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources`. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not knowledge_base_uuid: + raise ValueError( + f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" + ) + return self._get( + f"/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + data_source_list_params.DataSourceListParams, + ), + ), + cast_to=DataSourceListResponse, + ) + + +class AsyncDataSourcesResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncDataSourcesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncDataSourcesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncDataSourcesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncDataSourcesResourceWithStreamingResponse(self) + + async def create( + self, + path_knowledge_base_uuid: str, + *, + aws_data_source: data_source_create_params.AwsDataSource | NotGiven = NOT_GIVEN, + body_knowledge_base_uuid: str | NotGiven = NOT_GIVEN, + spaces_data_source: APISpacesDataSourceParam | NotGiven = NOT_GIVEN, + web_crawler_data_source: APIWebCrawlerDataSourceParam | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> DataSourceCreateResponse: + """ + To add a data source to a knowledge base, send a POST request to + `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_knowledge_base_uuid: + raise ValueError( + f"Expected a non-empty value for `path_knowledge_base_uuid` but received {path_knowledge_base_uuid!r}" + ) + return await self._post( + f"/v2/gen-ai/knowledge_bases/{path_knowledge_base_uuid}/data_sources", + body=await async_maybe_transform( + { + "aws_data_source": aws_data_source, + "body_knowledge_base_uuid": body_knowledge_base_uuid, + "spaces_data_source": spaces_data_source, + "web_crawler_data_source": web_crawler_data_source, + }, + data_source_create_params.DataSourceCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=DataSourceCreateResponse, + ) + + async def list( + self, + knowledge_base_uuid: str, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> DataSourceListResponse: + """ + To list all data sources for a knowledge base, send a GET request to + `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources`. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not knowledge_base_uuid: + raise ValueError( + f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" + ) + return await self._get( + f"/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + data_source_list_params.DataSourceListParams, + ), + ), + cast_to=DataSourceListResponse, + ) + + +class DataSourcesResourceWithRawResponse: + def __init__(self, data_sources: DataSourcesResource) -> None: + self._data_sources = data_sources + + self.create = to_raw_response_wrapper( + data_sources.create, + ) + self.list = to_raw_response_wrapper( + data_sources.list, + ) + + +class AsyncDataSourcesResourceWithRawResponse: + def __init__(self, data_sources: AsyncDataSourcesResource) -> None: + self._data_sources = data_sources + + self.create = async_to_raw_response_wrapper( + data_sources.create, + ) + self.list = async_to_raw_response_wrapper( + data_sources.list, + ) + + +class DataSourcesResourceWithStreamingResponse: + def __init__(self, data_sources: DataSourcesResource) -> None: + self._data_sources = data_sources + + self.create = to_streamed_response_wrapper( + data_sources.create, + ) + self.list = to_streamed_response_wrapper( + data_sources.list, + ) + + +class AsyncDataSourcesResourceWithStreamingResponse: + def __init__(self, data_sources: AsyncDataSourcesResource) -> None: + self._data_sources = data_sources + + self.create = async_to_streamed_response_wrapper( + data_sources.create, + ) + self.list = async_to_streamed_response_wrapper( + data_sources.list, + ) diff --git a/src/gradientai/resources/knowledge_bases/knowledge_bases.py b/src/gradientai/resources/knowledge_bases/knowledge_bases.py new file mode 100644 index 00000000..c49e23c4 --- /dev/null +++ b/src/gradientai/resources/knowledge_bases/knowledge_bases.py @@ -0,0 +1,378 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Iterable + +import httpx + +from ...types import knowledge_base_list_params, knowledge_base_create_params +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from .data_sources import ( + DataSourcesResource, + AsyncDataSourcesResource, + DataSourcesResourceWithRawResponse, + AsyncDataSourcesResourceWithRawResponse, + DataSourcesResourceWithStreamingResponse, + AsyncDataSourcesResourceWithStreamingResponse, +) +from ..._base_client import make_request_options +from ...types.knowledge_base_list_response import KnowledgeBaseListResponse +from ...types.knowledge_base_create_response import KnowledgeBaseCreateResponse + +__all__ = ["KnowledgeBasesResource", "AsyncKnowledgeBasesResource"] + + +class KnowledgeBasesResource(SyncAPIResource): + @cached_property + def data_sources(self) -> DataSourcesResource: + return DataSourcesResource(self._client) + + @cached_property + def with_raw_response(self) -> KnowledgeBasesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return KnowledgeBasesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> KnowledgeBasesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return KnowledgeBasesResourceWithStreamingResponse(self) + + def create( + self, + *, + database_id: str | NotGiven = NOT_GIVEN, + datasources: Iterable[knowledge_base_create_params.Datasource] | NotGiven = NOT_GIVEN, + embedding_model_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + region: str | NotGiven = NOT_GIVEN, + tags: List[str] | NotGiven = NOT_GIVEN, + vpc_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KnowledgeBaseCreateResponse: + """ + To create a knowledge base, send a POST request to `/v2/gen-ai/knowledge_bases`. + + Args: + database_id: Identifier of the DigitalOcean OpenSearch database this knowledge base will use, + optional. If not provided, we create a new database for the knowledge base in + the same region as the knowledge base. + + datasources: The data sources to use for this knowledge base. See + [Organize Data Sources](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#spaces-buckets) + for more information on data sources best practices. + + embedding_model_uuid: Identifier for the + [embedding model](https://docs.digitalocean.com/products/genai-platform/details/models/#embedding-models). + + name: Name of the knowledge base. + + project_id: Identifier of the DigitalOcean project this knowledge base will belong to. + + region: The datacenter region to deploy the knowledge base in. + + tags: Tags to organize your knowledge base. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v2/gen-ai/knowledge_bases", + body=maybe_transform( + { + "database_id": database_id, + "datasources": datasources, + "embedding_model_uuid": embedding_model_uuid, + "name": name, + "project_id": project_id, + "region": region, + "tags": tags, + "vpc_uuid": vpc_uuid, + }, + knowledge_base_create_params.KnowledgeBaseCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KnowledgeBaseCreateResponse, + ) + + def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KnowledgeBaseListResponse: + """ + To list all knowledge bases, send a GET request to `/v2/gen-ai/knowledge_bases`. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/gen-ai/knowledge_bases", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + knowledge_base_list_params.KnowledgeBaseListParams, + ), + ), + cast_to=KnowledgeBaseListResponse, + ) + + +class AsyncKnowledgeBasesResource(AsyncAPIResource): + @cached_property + def data_sources(self) -> AsyncDataSourcesResource: + return AsyncDataSourcesResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncKnowledgeBasesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncKnowledgeBasesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncKnowledgeBasesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncKnowledgeBasesResourceWithStreamingResponse(self) + + async def create( + self, + *, + database_id: str | NotGiven = NOT_GIVEN, + datasources: Iterable[knowledge_base_create_params.Datasource] | NotGiven = NOT_GIVEN, + embedding_model_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + region: str | NotGiven = NOT_GIVEN, + tags: List[str] | NotGiven = NOT_GIVEN, + vpc_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KnowledgeBaseCreateResponse: + """ + To create a knowledge base, send a POST request to `/v2/gen-ai/knowledge_bases`. + + Args: + database_id: Identifier of the DigitalOcean OpenSearch database this knowledge base will use, + optional. If not provided, we create a new database for the knowledge base in + the same region as the knowledge base. + + datasources: The data sources to use for this knowledge base. See + [Organize Data Sources](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#spaces-buckets) + for more information on data sources best practices. + + embedding_model_uuid: Identifier for the + [embedding model](https://docs.digitalocean.com/products/genai-platform/details/models/#embedding-models). + + name: Name of the knowledge base. + + project_id: Identifier of the DigitalOcean project this knowledge base will belong to. + + region: The datacenter region to deploy the knowledge base in. + + tags: Tags to organize your knowledge base. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v2/gen-ai/knowledge_bases", + body=await async_maybe_transform( + { + "database_id": database_id, + "datasources": datasources, + "embedding_model_uuid": embedding_model_uuid, + "name": name, + "project_id": project_id, + "region": region, + "tags": tags, + "vpc_uuid": vpc_uuid, + }, + knowledge_base_create_params.KnowledgeBaseCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KnowledgeBaseCreateResponse, + ) + + async def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KnowledgeBaseListResponse: + """ + To list all knowledge bases, send a GET request to `/v2/gen-ai/knowledge_bases`. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/gen-ai/knowledge_bases", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + knowledge_base_list_params.KnowledgeBaseListParams, + ), + ), + cast_to=KnowledgeBaseListResponse, + ) + + +class KnowledgeBasesResourceWithRawResponse: + def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None: + self._knowledge_bases = knowledge_bases + + self.create = to_raw_response_wrapper( + knowledge_bases.create, + ) + self.list = to_raw_response_wrapper( + knowledge_bases.list, + ) + + @cached_property + def data_sources(self) -> DataSourcesResourceWithRawResponse: + return DataSourcesResourceWithRawResponse(self._knowledge_bases.data_sources) + + +class AsyncKnowledgeBasesResourceWithRawResponse: + def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None: + self._knowledge_bases = knowledge_bases + + self.create = async_to_raw_response_wrapper( + knowledge_bases.create, + ) + self.list = async_to_raw_response_wrapper( + knowledge_bases.list, + ) + + @cached_property + def data_sources(self) -> AsyncDataSourcesResourceWithRawResponse: + return AsyncDataSourcesResourceWithRawResponse(self._knowledge_bases.data_sources) + + +class KnowledgeBasesResourceWithStreamingResponse: + def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None: + self._knowledge_bases = knowledge_bases + + self.create = to_streamed_response_wrapper( + knowledge_bases.create, + ) + self.list = to_streamed_response_wrapper( + knowledge_bases.list, + ) + + @cached_property + def data_sources(self) -> DataSourcesResourceWithStreamingResponse: + return DataSourcesResourceWithStreamingResponse(self._knowledge_bases.data_sources) + + +class AsyncKnowledgeBasesResourceWithStreamingResponse: + def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None: + self._knowledge_bases = knowledge_bases + + self.create = async_to_streamed_response_wrapper( + knowledge_bases.create, + ) + self.list = async_to_streamed_response_wrapper( + knowledge_bases.list, + ) + + @cached_property + def data_sources(self) -> AsyncDataSourcesResourceWithStreamingResponse: + return AsyncDataSourcesResourceWithStreamingResponse(self._knowledge_bases.data_sources) diff --git a/src/gradientai/types/__init__.py b/src/gradientai/types/__init__.py index 25d7b58d..cb52748c 100644 --- a/src/gradientai/types/__init__.py +++ b/src/gradientai/types/__init__.py @@ -18,12 +18,26 @@ from .api_agent_api_key_info import APIAgentAPIKeyInfo as APIAgentAPIKeyInfo from .api_openai_api_key_info import APIOpenAIAPIKeyInfo as APIOpenAIAPIKeyInfo from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams +from .indexing_job_list_params import IndexingJobListParams as IndexingJobListParams from .api_deployment_visibility import APIDeploymentVisibility as APIDeploymentVisibility from .embedding_create_response import EmbeddingCreateResponse as EmbeddingCreateResponse from .api_anthropic_api_key_info import APIAnthropicAPIKeyInfo as APIAnthropicAPIKeyInfo +from .indexing_job_create_params import IndexingJobCreateParams as IndexingJobCreateParams +from .indexing_job_list_response import IndexingJobListResponse as IndexingJobListResponse +from .knowledge_base_list_params import KnowledgeBaseListParams as KnowledgeBaseListParams +from .indexing_job_create_response import IndexingJobCreateResponse as IndexingJobCreateResponse +from .knowledge_base_create_params import KnowledgeBaseCreateParams as KnowledgeBaseCreateParams +from .knowledge_base_list_response import KnowledgeBaseListResponse as KnowledgeBaseListResponse from .chat_completion_token_logprob import ChatCompletionTokenLogprob as ChatCompletionTokenLogprob from .chat_create_completion_params import ChatCreateCompletionParams as ChatCreateCompletionParams +from .indexing_job_retrieve_response import IndexingJobRetrieveResponse as IndexingJobRetrieveResponse +from .knowledge_base_create_response import KnowledgeBaseCreateResponse as KnowledgeBaseCreateResponse from .chat_create_completion_response import ChatCreateCompletionResponse as ChatCreateCompletionResponse +from .indexing_job_update_cancel_params import IndexingJobUpdateCancelParams as IndexingJobUpdateCancelParams +from .indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse as IndexingJobUpdateCancelResponse +from .indexing_job_retrieve_data_sources_response import ( + IndexingJobRetrieveDataSourcesResponse as IndexingJobRetrieveDataSourcesResponse, +) from .chat_completion_request_message_content_part_text_param import ( ChatCompletionRequestMessageContentPartTextParam as ChatCompletionRequestMessageContentPartTextParam, ) diff --git a/src/gradientai/types/indexing_job_create_params.py b/src/gradientai/types/indexing_job_create_params.py new file mode 100644 index 00000000..04838472 --- /dev/null +++ b/src/gradientai/types/indexing_job_create_params.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import TypedDict + +__all__ = ["IndexingJobCreateParams"] + + +class IndexingJobCreateParams(TypedDict, total=False): + data_source_uuids: List[str] + + knowledge_base_uuid: str diff --git a/src/gradientai/types/indexing_job_create_response.py b/src/gradientai/types/indexing_job_create_response.py new file mode 100644 index 00000000..839bc83b --- /dev/null +++ b/src/gradientai/types/indexing_job_create_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from .._models import BaseModel +from .api_indexing_job import APIIndexingJob + +__all__ = ["IndexingJobCreateResponse"] + + +class IndexingJobCreateResponse(BaseModel): + job: Optional[APIIndexingJob] = None diff --git a/src/gradientai/types/indexing_job_list_params.py b/src/gradientai/types/indexing_job_list_params.py new file mode 100644 index 00000000..90206aba --- /dev/null +++ b/src/gradientai/types/indexing_job_list_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["IndexingJobListParams"] + + +class IndexingJobListParams(TypedDict, total=False): + page: int + """page number.""" + + per_page: int + """items per page.""" diff --git a/src/gradientai/types/indexing_job_list_response.py b/src/gradientai/types/indexing_job_list_response.py new file mode 100644 index 00000000..1379cc55 --- /dev/null +++ b/src/gradientai/types/indexing_job_list_response.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from .._models import BaseModel +from .agents.api_meta import APIMeta +from .agents.api_links import APILinks +from .api_indexing_job import APIIndexingJob + +__all__ = ["IndexingJobListResponse"] + + +class IndexingJobListResponse(BaseModel): + jobs: Optional[List[APIIndexingJob]] = None + + links: Optional[APILinks] = None + + meta: Optional[APIMeta] = None diff --git a/src/gradientai/types/indexing_job_retrieve_data_sources_response.py b/src/gradientai/types/indexing_job_retrieve_data_sources_response.py new file mode 100644 index 00000000..b178b984 --- /dev/null +++ b/src/gradientai/types/indexing_job_retrieve_data_sources_response.py @@ -0,0 +1,52 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime +from typing_extensions import Literal + +from .._models import BaseModel + +__all__ = ["IndexingJobRetrieveDataSourcesResponse", "IndexedDataSource"] + + +class IndexedDataSource(BaseModel): + completed_at: Optional[datetime] = None + + data_source_uuid: Optional[str] = None + + error_details: Optional[str] = None + + error_msg: Optional[str] = None + + failed_item_count: Optional[str] = None + + indexed_file_count: Optional[str] = None + + indexed_item_count: Optional[str] = None + + removed_item_count: Optional[str] = None + + skipped_item_count: Optional[str] = None + + started_at: Optional[datetime] = None + + status: Optional[ + Literal[ + "DATA_SOURCE_STATUS_UNKNOWN", + "DATA_SOURCE_STATUS_IN_PROGRESS", + "DATA_SOURCE_STATUS_UPDATED", + "DATA_SOURCE_STATUS_PARTIALLY_UPDATED", + "DATA_SOURCE_STATUS_NOT_UPDATED", + "DATA_SOURCE_STATUS_FAILED", + ] + ] = None + + total_bytes: Optional[str] = None + + total_bytes_indexed: Optional[str] = None + + total_file_count: Optional[str] = None + + +class IndexingJobRetrieveDataSourcesResponse(BaseModel): + indexed_data_sources: Optional[List[IndexedDataSource]] = None diff --git a/src/gradientai/types/indexing_job_retrieve_response.py b/src/gradientai/types/indexing_job_retrieve_response.py new file mode 100644 index 00000000..95f33d7a --- /dev/null +++ b/src/gradientai/types/indexing_job_retrieve_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from .._models import BaseModel +from .api_indexing_job import APIIndexingJob + +__all__ = ["IndexingJobRetrieveResponse"] + + +class IndexingJobRetrieveResponse(BaseModel): + job: Optional[APIIndexingJob] = None diff --git a/src/gradientai/types/indexing_job_update_cancel_params.py b/src/gradientai/types/indexing_job_update_cancel_params.py new file mode 100644 index 00000000..4c2848b0 --- /dev/null +++ b/src/gradientai/types/indexing_job_update_cancel_params.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Annotated, TypedDict + +from .._utils import PropertyInfo + +__all__ = ["IndexingJobUpdateCancelParams"] + + +class IndexingJobUpdateCancelParams(TypedDict, total=False): + body_uuid: Annotated[str, PropertyInfo(alias="uuid")] + """A unique identifier for an indexing job.""" diff --git a/src/gradientai/types/indexing_job_update_cancel_response.py b/src/gradientai/types/indexing_job_update_cancel_response.py new file mode 100644 index 00000000..d50e1865 --- /dev/null +++ b/src/gradientai/types/indexing_job_update_cancel_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from .._models import BaseModel +from .api_indexing_job import APIIndexingJob + +__all__ = ["IndexingJobUpdateCancelResponse"] + + +class IndexingJobUpdateCancelResponse(BaseModel): + job: Optional[APIIndexingJob] = None diff --git a/src/gradientai/types/knowledge_base_create_params.py b/src/gradientai/types/knowledge_base_create_params.py new file mode 100644 index 00000000..3a58166b --- /dev/null +++ b/src/gradientai/types/knowledge_base_create_params.py @@ -0,0 +1,64 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Iterable +from typing_extensions import TypedDict + +from .knowledge_bases.api_spaces_data_source_param import APISpacesDataSourceParam +from .knowledge_bases.api_file_upload_data_source_param import APIFileUploadDataSourceParam +from .knowledge_bases.api_web_crawler_data_source_param import APIWebCrawlerDataSourceParam + +__all__ = ["KnowledgeBaseCreateParams", "Datasource"] + + +class KnowledgeBaseCreateParams(TypedDict, total=False): + database_id: str + """ + Identifier of the DigitalOcean OpenSearch database this knowledge base will use, + optional. If not provided, we create a new database for the knowledge base in + the same region as the knowledge base. + """ + + datasources: Iterable[Datasource] + """The data sources to use for this knowledge base. + + See + [Organize Data Sources](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#spaces-buckets) + for more information on data sources best practices. + """ + + embedding_model_uuid: str + """ + Identifier for the + [embedding model](https://docs.digitalocean.com/products/genai-platform/details/models/#embedding-models). + """ + + name: str + """Name of the knowledge base.""" + + project_id: str + """Identifier of the DigitalOcean project this knowledge base will belong to.""" + + region: str + """The datacenter region to deploy the knowledge base in.""" + + tags: List[str] + """Tags to organize your knowledge base.""" + + vpc_uuid: str + + +class Datasource(TypedDict, total=False): + bucket_name: str + + bucket_region: str + + file_upload_data_source: APIFileUploadDataSourceParam + """File to upload as data source for knowledge base.""" + + item_path: str + + spaces_data_source: APISpacesDataSourceParam + + web_crawler_data_source: APIWebCrawlerDataSourceParam diff --git a/src/gradientai/types/knowledge_base_create_response.py b/src/gradientai/types/knowledge_base_create_response.py new file mode 100644 index 00000000..cc2d8b9f --- /dev/null +++ b/src/gradientai/types/knowledge_base_create_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from .._models import BaseModel +from .api_knowledge_base import APIKnowledgeBase + +__all__ = ["KnowledgeBaseCreateResponse"] + + +class KnowledgeBaseCreateResponse(BaseModel): + knowledge_base: Optional[APIKnowledgeBase] = None diff --git a/src/gradientai/types/knowledge_base_list_params.py b/src/gradientai/types/knowledge_base_list_params.py new file mode 100644 index 00000000..dcf9a0ec --- /dev/null +++ b/src/gradientai/types/knowledge_base_list_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["KnowledgeBaseListParams"] + + +class KnowledgeBaseListParams(TypedDict, total=False): + page: int + """page number.""" + + per_page: int + """items per page.""" diff --git a/src/gradientai/types/knowledge_base_list_response.py b/src/gradientai/types/knowledge_base_list_response.py new file mode 100644 index 00000000..09ca1ad3 --- /dev/null +++ b/src/gradientai/types/knowledge_base_list_response.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from .._models import BaseModel +from .agents.api_meta import APIMeta +from .agents.api_links import APILinks +from .api_knowledge_base import APIKnowledgeBase + +__all__ = ["KnowledgeBaseListResponse"] + + +class KnowledgeBaseListResponse(BaseModel): + knowledge_bases: Optional[List[APIKnowledgeBase]] = None + + links: Optional[APILinks] = None + + meta: Optional[APIMeta] = None diff --git a/src/gradientai/types/knowledge_bases/__init__.py b/src/gradientai/types/knowledge_bases/__init__.py index f8ee8b14..e716e1f6 100644 --- a/src/gradientai/types/knowledge_bases/__init__.py +++ b/src/gradientai/types/knowledge_bases/__init__.py @@ -1,3 +1,15 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from __future__ import annotations + +from .api_spaces_data_source import APISpacesDataSource as APISpacesDataSource +from .data_source_list_params import DataSourceListParams as DataSourceListParams +from .data_source_create_params import DataSourceCreateParams as DataSourceCreateParams +from .data_source_list_response import DataSourceListResponse as DataSourceListResponse +from .api_file_upload_data_source import APIFileUploadDataSource as APIFileUploadDataSource +from .api_web_crawler_data_source import APIWebCrawlerDataSource as APIWebCrawlerDataSource +from .data_source_create_response import DataSourceCreateResponse as DataSourceCreateResponse +from .api_spaces_data_source_param import APISpacesDataSourceParam as APISpacesDataSourceParam +from .api_knowledge_base_data_source import APIKnowledgeBaseDataSource as APIKnowledgeBaseDataSource +from .api_file_upload_data_source_param import APIFileUploadDataSourceParam as APIFileUploadDataSourceParam +from .api_web_crawler_data_source_param import APIWebCrawlerDataSourceParam as APIWebCrawlerDataSourceParam diff --git a/src/gradientai/types/knowledge_bases/api_file_upload_data_source.py b/src/gradientai/types/knowledge_bases/api_file_upload_data_source.py new file mode 100644 index 00000000..1dcc9639 --- /dev/null +++ b/src/gradientai/types/knowledge_bases/api_file_upload_data_source.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["APIFileUploadDataSource"] + + +class APIFileUploadDataSource(BaseModel): + original_file_name: Optional[str] = None + + size_in_bytes: Optional[str] = None + + stored_object_key: Optional[str] = None diff --git a/src/gradientai/types/knowledge_bases/api_file_upload_data_source_param.py b/src/gradientai/types/knowledge_bases/api_file_upload_data_source_param.py new file mode 100644 index 00000000..37221059 --- /dev/null +++ b/src/gradientai/types/knowledge_bases/api_file_upload_data_source_param.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["APIFileUploadDataSourceParam"] + + +class APIFileUploadDataSourceParam(TypedDict, total=False): + original_file_name: str + + size_in_bytes: str + + stored_object_key: str diff --git a/src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py b/src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py new file mode 100644 index 00000000..df1cd3bb --- /dev/null +++ b/src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py @@ -0,0 +1,35 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from datetime import datetime + +from ..._models import BaseModel +from ..api_indexing_job import APIIndexingJob +from .api_spaces_data_source import APISpacesDataSource +from .api_file_upload_data_source import APIFileUploadDataSource +from .api_web_crawler_data_source import APIWebCrawlerDataSource + +__all__ = ["APIKnowledgeBaseDataSource"] + + +class APIKnowledgeBaseDataSource(BaseModel): + bucket_name: Optional[str] = None + + created_at: Optional[datetime] = None + + file_upload_data_source: Optional[APIFileUploadDataSource] = None + """File to upload as data source for knowledge base.""" + + item_path: Optional[str] = None + + last_indexing_job: Optional[APIIndexingJob] = None + + region: Optional[str] = None + + spaces_data_source: Optional[APISpacesDataSource] = None + + updated_at: Optional[datetime] = None + + uuid: Optional[str] = None + + web_crawler_data_source: Optional[APIWebCrawlerDataSource] = None diff --git a/src/gradientai/types/knowledge_bases/api_spaces_data_source.py b/src/gradientai/types/knowledge_bases/api_spaces_data_source.py new file mode 100644 index 00000000..f3a0421a --- /dev/null +++ b/src/gradientai/types/knowledge_bases/api_spaces_data_source.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["APISpacesDataSource"] + + +class APISpacesDataSource(BaseModel): + bucket_name: Optional[str] = None + + item_path: Optional[str] = None + + region: Optional[str] = None diff --git a/src/gradientai/types/knowledge_bases/api_spaces_data_source_param.py b/src/gradientai/types/knowledge_bases/api_spaces_data_source_param.py new file mode 100644 index 00000000..b7f2f657 --- /dev/null +++ b/src/gradientai/types/knowledge_bases/api_spaces_data_source_param.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["APISpacesDataSourceParam"] + + +class APISpacesDataSourceParam(TypedDict, total=False): + bucket_name: str + + item_path: str + + region: str diff --git a/src/gradientai/types/knowledge_bases/api_web_crawler_data_source.py b/src/gradientai/types/knowledge_bases/api_web_crawler_data_source.py new file mode 100644 index 00000000..4690c607 --- /dev/null +++ b/src/gradientai/types/knowledge_bases/api_web_crawler_data_source.py @@ -0,0 +1,26 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["APIWebCrawlerDataSource"] + + +class APIWebCrawlerDataSource(BaseModel): + base_url: Optional[str] = None + """The base url to crawl.""" + + crawling_option: Optional[Literal["UNKNOWN", "SCOPED", "PATH", "DOMAIN", "SUBDOMAINS"]] = None + """Options for specifying how URLs found on pages should be handled. + + - UNKNOWN: Default unknown value + - SCOPED: Only include the base URL. + - PATH: Crawl the base URL and linked pages within the URL path. + - DOMAIN: Crawl the base URL and linked pages within the same domain. + - SUBDOMAINS: Crawl the base URL and linked pages for any subdomain. + """ + + embed_media: Optional[bool] = None + """Whether to ingest and index media (images, etc.) on web pages.""" diff --git a/src/gradientai/types/knowledge_bases/api_web_crawler_data_source_param.py b/src/gradientai/types/knowledge_bases/api_web_crawler_data_source_param.py new file mode 100644 index 00000000..2345ed3a --- /dev/null +++ b/src/gradientai/types/knowledge_bases/api_web_crawler_data_source_param.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, TypedDict + +__all__ = ["APIWebCrawlerDataSourceParam"] + + +class APIWebCrawlerDataSourceParam(TypedDict, total=False): + base_url: str + """The base url to crawl.""" + + crawling_option: Literal["UNKNOWN", "SCOPED", "PATH", "DOMAIN", "SUBDOMAINS"] + """Options for specifying how URLs found on pages should be handled. + + - UNKNOWN: Default unknown value + - SCOPED: Only include the base URL. + - PATH: Crawl the base URL and linked pages within the URL path. + - DOMAIN: Crawl the base URL and linked pages within the same domain. + - SUBDOMAINS: Crawl the base URL and linked pages for any subdomain. + """ + + embed_media: bool + """Whether to ingest and index media (images, etc.) on web pages.""" diff --git a/src/gradientai/types/knowledge_bases/data_source_create_params.py b/src/gradientai/types/knowledge_bases/data_source_create_params.py new file mode 100644 index 00000000..b1abafdf --- /dev/null +++ b/src/gradientai/types/knowledge_bases/data_source_create_params.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Annotated, TypedDict + +from ..._utils import PropertyInfo +from .api_spaces_data_source_param import APISpacesDataSourceParam +from .api_web_crawler_data_source_param import APIWebCrawlerDataSourceParam + +__all__ = ["DataSourceCreateParams", "AwsDataSource"] + + +class DataSourceCreateParams(TypedDict, total=False): + aws_data_source: AwsDataSource + + body_knowledge_base_uuid: Annotated[str, PropertyInfo(alias="knowledge_base_uuid")] + + spaces_data_source: APISpacesDataSourceParam + + web_crawler_data_source: APIWebCrawlerDataSourceParam + + +class AwsDataSource(TypedDict, total=False): + bucket_name: str + + item_path: str + + key_id: str + + region: str + + secret_key: str diff --git a/src/gradientai/types/knowledge_bases/data_source_create_response.py b/src/gradientai/types/knowledge_bases/data_source_create_response.py new file mode 100644 index 00000000..1035d3f4 --- /dev/null +++ b/src/gradientai/types/knowledge_bases/data_source_create_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from .api_knowledge_base_data_source import APIKnowledgeBaseDataSource + +__all__ = ["DataSourceCreateResponse"] + + +class DataSourceCreateResponse(BaseModel): + knowledge_base_data_source: Optional[APIKnowledgeBaseDataSource] = None diff --git a/src/gradientai/types/knowledge_bases/data_source_list_params.py b/src/gradientai/types/knowledge_bases/data_source_list_params.py new file mode 100644 index 00000000..e3ed5e3c --- /dev/null +++ b/src/gradientai/types/knowledge_bases/data_source_list_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["DataSourceListParams"] + + +class DataSourceListParams(TypedDict, total=False): + page: int + """page number.""" + + per_page: int + """items per page.""" diff --git a/src/gradientai/types/knowledge_bases/data_source_list_response.py b/src/gradientai/types/knowledge_bases/data_source_list_response.py new file mode 100644 index 00000000..78246ce1 --- /dev/null +++ b/src/gradientai/types/knowledge_bases/data_source_list_response.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel +from ..agents.api_meta import APIMeta +from ..agents.api_links import APILinks +from .api_knowledge_base_data_source import APIKnowledgeBaseDataSource + +__all__ = ["DataSourceListResponse"] + + +class DataSourceListResponse(BaseModel): + knowledge_base_data_sources: Optional[List[APIKnowledgeBaseDataSource]] = None + + links: Optional[APILinks] = None + + meta: Optional[APIMeta] = None diff --git a/tests/api_resources/knowledge_bases/__init__.py b/tests/api_resources/knowledge_bases/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/knowledge_bases/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/knowledge_bases/test_data_sources.py b/tests/api_resources/knowledge_bases/test_data_sources.py new file mode 100644 index 00000000..cc90a9d7 --- /dev/null +++ b/tests/api_resources/knowledge_bases/test_data_sources.py @@ -0,0 +1,269 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.knowledge_bases import ( + DataSourceListResponse, + DataSourceCreateResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestDataSources: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: GradientAI) -> None: + data_source = client.knowledge_bases.data_sources.create( + path_knowledge_base_uuid="knowledge_base_uuid", + ) + assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: GradientAI) -> None: + data_source = client.knowledge_bases.data_sources.create( + path_knowledge_base_uuid="knowledge_base_uuid", + aws_data_source={ + "bucket_name": "bucket_name", + "item_path": "item_path", + "key_id": "key_id", + "region": "region", + "secret_key": "secret_key", + }, + body_knowledge_base_uuid="knowledge_base_uuid", + spaces_data_source={ + "bucket_name": "bucket_name", + "item_path": "item_path", + "region": "region", + }, + web_crawler_data_source={ + "base_url": "base_url", + "crawling_option": "UNKNOWN", + "embed_media": True, + }, + ) + assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: GradientAI) -> None: + response = client.knowledge_bases.data_sources.with_raw_response.create( + path_knowledge_base_uuid="knowledge_base_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + data_source = response.parse() + assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: GradientAI) -> None: + with client.knowledge_bases.data_sources.with_streaming_response.create( + path_knowledge_base_uuid="knowledge_base_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + data_source = response.parse() + assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_create(self, client: GradientAI) -> None: + with pytest.raises( + ValueError, match=r"Expected a non-empty value for `path_knowledge_base_uuid` but received ''" + ): + client.knowledge_bases.data_sources.with_raw_response.create( + path_knowledge_base_uuid="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + data_source = client.knowledge_bases.data_sources.list( + knowledge_base_uuid="knowledge_base_uuid", + ) + assert_matches_type(DataSourceListResponse, data_source, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + data_source = client.knowledge_bases.data_sources.list( + knowledge_base_uuid="knowledge_base_uuid", + page=0, + per_page=0, + ) + assert_matches_type(DataSourceListResponse, data_source, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.knowledge_bases.data_sources.with_raw_response.list( + knowledge_base_uuid="knowledge_base_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + data_source = response.parse() + assert_matches_type(DataSourceListResponse, data_source, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.knowledge_bases.data_sources.with_streaming_response.list( + knowledge_base_uuid="knowledge_base_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + data_source = response.parse() + assert_matches_type(DataSourceListResponse, data_source, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_list(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): + client.knowledge_bases.data_sources.with_raw_response.list( + knowledge_base_uuid="", + ) + + +class TestAsyncDataSources: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncGradientAI) -> None: + data_source = await async_client.knowledge_bases.data_sources.create( + path_knowledge_base_uuid="knowledge_base_uuid", + ) + assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + data_source = await async_client.knowledge_bases.data_sources.create( + path_knowledge_base_uuid="knowledge_base_uuid", + aws_data_source={ + "bucket_name": "bucket_name", + "item_path": "item_path", + "key_id": "key_id", + "region": "region", + "secret_key": "secret_key", + }, + body_knowledge_base_uuid="knowledge_base_uuid", + spaces_data_source={ + "bucket_name": "bucket_name", + "item_path": "item_path", + "region": "region", + }, + web_crawler_data_source={ + "base_url": "base_url", + "crawling_option": "UNKNOWN", + "embed_media": True, + }, + ) + assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + response = await async_client.knowledge_bases.data_sources.with_raw_response.create( + path_knowledge_base_uuid="knowledge_base_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + data_source = await response.parse() + assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async with async_client.knowledge_bases.data_sources.with_streaming_response.create( + path_knowledge_base_uuid="knowledge_base_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + data_source = await response.parse() + assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_create(self, async_client: AsyncGradientAI) -> None: + with pytest.raises( + ValueError, match=r"Expected a non-empty value for `path_knowledge_base_uuid` but received ''" + ): + await async_client.knowledge_bases.data_sources.with_raw_response.create( + path_knowledge_base_uuid="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + data_source = await async_client.knowledge_bases.data_sources.list( + knowledge_base_uuid="knowledge_base_uuid", + ) + assert_matches_type(DataSourceListResponse, data_source, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + data_source = await async_client.knowledge_bases.data_sources.list( + knowledge_base_uuid="knowledge_base_uuid", + page=0, + per_page=0, + ) + assert_matches_type(DataSourceListResponse, data_source, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.knowledge_bases.data_sources.with_raw_response.list( + knowledge_base_uuid="knowledge_base_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + data_source = await response.parse() + assert_matches_type(DataSourceListResponse, data_source, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.knowledge_bases.data_sources.with_streaming_response.list( + knowledge_base_uuid="knowledge_base_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + data_source = await response.parse() + assert_matches_type(DataSourceListResponse, data_source, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_list(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): + await async_client.knowledge_bases.data_sources.with_raw_response.list( + knowledge_base_uuid="", + ) diff --git a/tests/api_resources/test_indexing_jobs.py b/tests/api_resources/test_indexing_jobs.py new file mode 100644 index 00000000..d44a75ae --- /dev/null +++ b/tests/api_resources/test_indexing_jobs.py @@ -0,0 +1,446 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types import ( + IndexingJobListResponse, + IndexingJobCreateResponse, + IndexingJobRetrieveResponse, + IndexingJobUpdateCancelResponse, + IndexingJobRetrieveDataSourcesResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestIndexingJobs: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: GradientAI) -> None: + indexing_job = client.indexing_jobs.create() + assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: GradientAI) -> None: + indexing_job = client.indexing_jobs.create( + data_source_uuids=["string"], + knowledge_base_uuid="knowledge_base_uuid", + ) + assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: GradientAI) -> None: + response = client.indexing_jobs.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + indexing_job = response.parse() + assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: GradientAI) -> None: + with client.indexing_jobs.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + indexing_job = response.parse() + assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + indexing_job = client.indexing_jobs.retrieve( + "uuid", + ) + assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.indexing_jobs.with_raw_response.retrieve( + "uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + indexing_job = response.parse() + assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.indexing_jobs.with_streaming_response.retrieve( + "uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + indexing_job = response.parse() + assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + client.indexing_jobs.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + indexing_job = client.indexing_jobs.list() + assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + indexing_job = client.indexing_jobs.list( + page=0, + per_page=0, + ) + assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.indexing_jobs.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + indexing_job = response.parse() + assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.indexing_jobs.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + indexing_job = response.parse() + assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve_data_sources(self, client: GradientAI) -> None: + indexing_job = client.indexing_jobs.retrieve_data_sources( + "indexing_job_uuid", + ) + assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve_data_sources(self, client: GradientAI) -> None: + response = client.indexing_jobs.with_raw_response.retrieve_data_sources( + "indexing_job_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + indexing_job = response.parse() + assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve_data_sources(self, client: GradientAI) -> None: + with client.indexing_jobs.with_streaming_response.retrieve_data_sources( + "indexing_job_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + indexing_job = response.parse() + assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve_data_sources(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `indexing_job_uuid` but received ''"): + client.indexing_jobs.with_raw_response.retrieve_data_sources( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update_cancel(self, client: GradientAI) -> None: + indexing_job = client.indexing_jobs.update_cancel( + path_uuid="uuid", + ) + assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_cancel_with_all_params(self, client: GradientAI) -> None: + indexing_job = client.indexing_jobs.update_cancel( + path_uuid="uuid", + body_uuid="uuid", + ) + assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update_cancel(self, client: GradientAI) -> None: + response = client.indexing_jobs.with_raw_response.update_cancel( + path_uuid="uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + indexing_job = response.parse() + assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update_cancel(self, client: GradientAI) -> None: + with client.indexing_jobs.with_streaming_response.update_cancel( + path_uuid="uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + indexing_job = response.parse() + assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update_cancel(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): + client.indexing_jobs.with_raw_response.update_cancel( + path_uuid="", + ) + + +class TestAsyncIndexingJobs: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncGradientAI) -> None: + indexing_job = await async_client.indexing_jobs.create() + assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + indexing_job = await async_client.indexing_jobs.create( + data_source_uuids=["string"], + knowledge_base_uuid="knowledge_base_uuid", + ) + assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + response = await async_client.indexing_jobs.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + indexing_job = await response.parse() + assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async with async_client.indexing_jobs.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + indexing_job = await response.parse() + assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + indexing_job = await async_client.indexing_jobs.retrieve( + "uuid", + ) + assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.indexing_jobs.with_raw_response.retrieve( + "uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + indexing_job = await response.parse() + assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.indexing_jobs.with_streaming_response.retrieve( + "uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + indexing_job = await response.parse() + assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + await async_client.indexing_jobs.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + indexing_job = await async_client.indexing_jobs.list() + assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + indexing_job = await async_client.indexing_jobs.list( + page=0, + per_page=0, + ) + assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.indexing_jobs.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + indexing_job = await response.parse() + assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.indexing_jobs.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + indexing_job = await response.parse() + assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve_data_sources(self, async_client: AsyncGradientAI) -> None: + indexing_job = await async_client.indexing_jobs.retrieve_data_sources( + "indexing_job_uuid", + ) + assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve_data_sources(self, async_client: AsyncGradientAI) -> None: + response = await async_client.indexing_jobs.with_raw_response.retrieve_data_sources( + "indexing_job_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + indexing_job = await response.parse() + assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve_data_sources(self, async_client: AsyncGradientAI) -> None: + async with async_client.indexing_jobs.with_streaming_response.retrieve_data_sources( + "indexing_job_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + indexing_job = await response.parse() + assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve_data_sources(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `indexing_job_uuid` but received ''"): + await async_client.indexing_jobs.with_raw_response.retrieve_data_sources( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update_cancel(self, async_client: AsyncGradientAI) -> None: + indexing_job = await async_client.indexing_jobs.update_cancel( + path_uuid="uuid", + ) + assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_cancel_with_all_params(self, async_client: AsyncGradientAI) -> None: + indexing_job = await async_client.indexing_jobs.update_cancel( + path_uuid="uuid", + body_uuid="uuid", + ) + assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update_cancel(self, async_client: AsyncGradientAI) -> None: + response = await async_client.indexing_jobs.with_raw_response.update_cancel( + path_uuid="uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + indexing_job = await response.parse() + assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update_cancel(self, async_client: AsyncGradientAI) -> None: + async with async_client.indexing_jobs.with_streaming_response.update_cancel( + path_uuid="uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + indexing_job = await response.parse() + assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update_cancel(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): + await async_client.indexing_jobs.with_raw_response.update_cancel( + path_uuid="", + ) diff --git a/tests/api_resources/test_knowledge_bases.py b/tests/api_resources/test_knowledge_bases.py new file mode 100644 index 00000000..bf761cf2 --- /dev/null +++ b/tests/api_resources/test_knowledge_bases.py @@ -0,0 +1,227 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types import ( + KnowledgeBaseListResponse, + KnowledgeBaseCreateResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestKnowledgeBases: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: GradientAI) -> None: + knowledge_base = client.knowledge_bases.create() + assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: GradientAI) -> None: + knowledge_base = client.knowledge_bases.create( + database_id="database_id", + datasources=[ + { + "bucket_name": "bucket_name", + "bucket_region": "bucket_region", + "file_upload_data_source": { + "original_file_name": "original_file_name", + "size_in_bytes": "size_in_bytes", + "stored_object_key": "stored_object_key", + }, + "item_path": "item_path", + "spaces_data_source": { + "bucket_name": "bucket_name", + "item_path": "item_path", + "region": "region", + }, + "web_crawler_data_source": { + "base_url": "base_url", + "crawling_option": "UNKNOWN", + "embed_media": True, + }, + } + ], + embedding_model_uuid="embedding_model_uuid", + name="name", + project_id="project_id", + region="region", + tags=["string"], + vpc_uuid="vpc_uuid", + ) + assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: GradientAI) -> None: + response = client.knowledge_bases.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + knowledge_base = response.parse() + assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: GradientAI) -> None: + with client.knowledge_bases.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + knowledge_base = response.parse() + assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + knowledge_base = client.knowledge_bases.list() + assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + knowledge_base = client.knowledge_bases.list( + page=0, + per_page=0, + ) + assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.knowledge_bases.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + knowledge_base = response.parse() + assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.knowledge_bases.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + knowledge_base = response.parse() + assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncKnowledgeBases: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncGradientAI) -> None: + knowledge_base = await async_client.knowledge_bases.create() + assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + knowledge_base = await async_client.knowledge_bases.create( + database_id="database_id", + datasources=[ + { + "bucket_name": "bucket_name", + "bucket_region": "bucket_region", + "file_upload_data_source": { + "original_file_name": "original_file_name", + "size_in_bytes": "size_in_bytes", + "stored_object_key": "stored_object_key", + }, + "item_path": "item_path", + "spaces_data_source": { + "bucket_name": "bucket_name", + "item_path": "item_path", + "region": "region", + }, + "web_crawler_data_source": { + "base_url": "base_url", + "crawling_option": "UNKNOWN", + "embed_media": True, + }, + } + ], + embedding_model_uuid="embedding_model_uuid", + name="name", + project_id="project_id", + region="region", + tags=["string"], + vpc_uuid="vpc_uuid", + ) + assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + response = await async_client.knowledge_bases.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + knowledge_base = await response.parse() + assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async with async_client.knowledge_bases.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + knowledge_base = await response.parse() + assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + knowledge_base = await async_client.knowledge_bases.list() + assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + knowledge_base = await async_client.knowledge_bases.list( + page=0, + per_page=0, + ) + assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.knowledge_bases.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + knowledge_base = await response.parse() + assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.knowledge_bases.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + knowledge_base = await response.parse() + assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) + + assert cast(Any, response.is_closed) is True From cac54a81a3f22d34b2de0ebfac3c68a982178cad Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 14 Jun 2025 05:44:37 +0000 Subject: [PATCH 10/41] feat(api): update via SDK Studio --- .stats.yml | 6 +- api.md | 145 +++- src/gradientai/_client.py | 161 ++++- src/gradientai/resources/__init__.py | 56 ++ src/gradientai/resources/agents/__init__.py | 14 + src/gradientai/resources/agents/agents.py | 497 ++++++++++++- .../resources/agents/child_agents.py | 508 ++++++++++++++ .../resources/agents/knowledge_bases.py | 181 +++++ src/gradientai/resources/api_keys/__init__.py | 19 + src/gradientai/resources/api_keys/api_keys.py | 275 ++++++++ .../resources/api_keys/api_keys_.py | 529 ++++++++++++++ src/gradientai/resources/auth/__init__.py | 33 + .../resources/auth/agents/__init__.py | 33 + .../resources/auth/agents/agents.py | 102 +++ src/gradientai/resources/auth/agents/token.py | 173 +++++ src/gradientai/resources/auth/auth.py | 102 +++ .../resources/knowledge_bases/data_sources.py | 91 +++ .../knowledge_bases/knowledge_bases.py | 291 +++++++- .../resources/providers/__init__.py | 47 ++ .../resources/providers/anthropic/__init__.py | 33 + .../providers/anthropic/anthropic.py | 102 +++ .../resources/providers/anthropic/keys.py | 662 ++++++++++++++++++ .../resources/providers/openai/__init__.py | 33 + .../resources/providers/openai/keys.py | 658 +++++++++++++++++ .../resources/providers/openai/openai.py | 102 +++ .../resources/providers/providers.py | 134 ++++ src/gradientai/resources/regions.py | 191 +++++ src/gradientai/types/__init__.py | 14 + src/gradientai/types/agent_delete_response.py | 16 + .../types/agent_retrieve_response.py | 16 + src/gradientai/types/agent_update_params.py | 65 ++ src/gradientai/types/agent_update_response.py | 16 + .../types/agent_update_status_params.py | 16 + .../types/agent_update_status_response.py | 16 + src/gradientai/types/agents/__init__.py | 7 + .../types/agents/child_agent_add_params.py | 22 + .../types/agents/child_agent_add_response.py | 14 + .../agents/child_agent_delete_response.py | 13 + .../types/agents/child_agent_update_params.py | 24 + .../agents/child_agent_update_response.py | 18 + .../types/agents/child_agent_view_response.py | 16 + .../agents/knowledge_base_detach_response.py | 16 + src/gradientai/types/api_key_list_params.py | 42 ++ src/gradientai/types/api_key_list_response.py | 42 ++ src/gradientai/types/api_keys/__init__.py | 10 + .../types/api_keys/api_key_create_params.py | 11 + .../types/api_keys/api_key_create_response.py | 12 + .../types/api_keys/api_key_delete_response.py | 12 + .../types/api_keys/api_key_list_params.py | 15 + .../types/api_keys/api_key_list_response.py | 18 + .../types/api_keys/api_key_update_params.py | 15 + .../api_key_update_regenerate_response.py | 12 + .../types/api_keys/api_key_update_response.py | 12 + .../types/api_keys/api_model_api_key_info.py | 22 + src/gradientai/types/auth/agents/__init__.py | 3 + .../types/auth/agents/token_create_params.py | 13 + .../auth/agents/token_create_response.py | 13 + .../types/knowledge_base_delete_response.py | 11 + .../types/knowledge_base_retrieve_response.py | 30 + .../types/knowledge_base_update_params.py | 27 + .../types/knowledge_base_update_response.py | 12 + .../types/knowledge_bases/__init__.py | 1 + .../data_source_delete_response.py | 13 + .../types/providers/anthropic/__init__.py | 11 + .../providers/anthropic/key_create_params.py | 13 + .../anthropic/key_create_response.py | 12 + .../anthropic/key_delete_response.py | 12 + .../anthropic/key_list_agents_params.py | 15 + .../anthropic/key_list_agents_response.py | 22 + .../providers/anthropic/key_list_params.py | 15 + .../providers/anthropic/key_list_response.py | 18 + .../anthropic/key_retrieve_response.py | 12 + .../providers/anthropic/key_update_params.py | 17 + .../anthropic/key_update_response.py | 12 + .../types/providers/openai/__init__.py | 11 + .../providers/openai/key_create_params.py | 13 + .../providers/openai/key_create_response.py | 12 + .../providers/openai/key_delete_response.py | 12 + .../types/providers/openai/key_list_params.py | 15 + .../providers/openai/key_list_response.py | 18 + .../openai/key_retrieve_agents_params.py | 15 + .../openai/key_retrieve_agents_response.py | 22 + .../providers/openai/key_retrieve_response.py | 12 + .../providers/openai/key_update_params.py | 17 + .../providers/openai/key_update_response.py | 12 + src/gradientai/types/region_list_params.py | 15 + src/gradientai/types/region_list_response.py | 23 + .../api_resources/agents/test_child_agents.py | 485 +++++++++++++ .../agents/test_knowledge_bases.py | 210 +++++- tests/api_resources/api_keys/__init__.py | 1 + .../api_resources/api_keys/test_api_keys_.py | 446 ++++++++++++ tests/api_resources/auth/__init__.py | 1 + tests/api_resources/auth/agents/__init__.py | 1 + tests/api_resources/auth/agents/test_token.py | 124 ++++ .../knowledge_bases/test_data_sources.py | 105 +++ tests/api_resources/providers/__init__.py | 1 + .../providers/anthropic/__init__.py | 1 + .../providers/anthropic/test_keys.py | 555 +++++++++++++++ .../providers/openai/__init__.py | 1 + .../providers/openai/test_keys.py | 555 +++++++++++++++ tests/api_resources/test_agents.py | 411 ++++++++++- tests/api_resources/test_api_keys.py | 100 +++ tests/api_resources/test_knowledge_bases.py | 283 ++++++++ tests/api_resources/test_regions.py | 96 +++ 104 files changed, 9534 insertions(+), 11 deletions(-) create mode 100644 src/gradientai/resources/agents/child_agents.py create mode 100644 src/gradientai/resources/api_keys/__init__.py create mode 100644 src/gradientai/resources/api_keys/api_keys.py create mode 100644 src/gradientai/resources/api_keys/api_keys_.py create mode 100644 src/gradientai/resources/auth/__init__.py create mode 100644 src/gradientai/resources/auth/agents/__init__.py create mode 100644 src/gradientai/resources/auth/agents/agents.py create mode 100644 src/gradientai/resources/auth/agents/token.py create mode 100644 src/gradientai/resources/auth/auth.py create mode 100644 src/gradientai/resources/providers/__init__.py create mode 100644 src/gradientai/resources/providers/anthropic/__init__.py create mode 100644 src/gradientai/resources/providers/anthropic/anthropic.py create mode 100644 src/gradientai/resources/providers/anthropic/keys.py create mode 100644 src/gradientai/resources/providers/openai/__init__.py create mode 100644 src/gradientai/resources/providers/openai/keys.py create mode 100644 src/gradientai/resources/providers/openai/openai.py create mode 100644 src/gradientai/resources/providers/providers.py create mode 100644 src/gradientai/resources/regions.py create mode 100644 src/gradientai/types/agent_delete_response.py create mode 100644 src/gradientai/types/agent_retrieve_response.py create mode 100644 src/gradientai/types/agent_update_params.py create mode 100644 src/gradientai/types/agent_update_response.py create mode 100644 src/gradientai/types/agent_update_status_params.py create mode 100644 src/gradientai/types/agent_update_status_response.py create mode 100644 src/gradientai/types/agents/child_agent_add_params.py create mode 100644 src/gradientai/types/agents/child_agent_add_response.py create mode 100644 src/gradientai/types/agents/child_agent_delete_response.py create mode 100644 src/gradientai/types/agents/child_agent_update_params.py create mode 100644 src/gradientai/types/agents/child_agent_update_response.py create mode 100644 src/gradientai/types/agents/child_agent_view_response.py create mode 100644 src/gradientai/types/agents/knowledge_base_detach_response.py create mode 100644 src/gradientai/types/api_key_list_params.py create mode 100644 src/gradientai/types/api_key_list_response.py create mode 100644 src/gradientai/types/api_keys/api_key_create_params.py create mode 100644 src/gradientai/types/api_keys/api_key_create_response.py create mode 100644 src/gradientai/types/api_keys/api_key_delete_response.py create mode 100644 src/gradientai/types/api_keys/api_key_list_params.py create mode 100644 src/gradientai/types/api_keys/api_key_list_response.py create mode 100644 src/gradientai/types/api_keys/api_key_update_params.py create mode 100644 src/gradientai/types/api_keys/api_key_update_regenerate_response.py create mode 100644 src/gradientai/types/api_keys/api_key_update_response.py create mode 100644 src/gradientai/types/api_keys/api_model_api_key_info.py create mode 100644 src/gradientai/types/auth/agents/token_create_params.py create mode 100644 src/gradientai/types/auth/agents/token_create_response.py create mode 100644 src/gradientai/types/knowledge_base_delete_response.py create mode 100644 src/gradientai/types/knowledge_base_retrieve_response.py create mode 100644 src/gradientai/types/knowledge_base_update_params.py create mode 100644 src/gradientai/types/knowledge_base_update_response.py create mode 100644 src/gradientai/types/knowledge_bases/data_source_delete_response.py create mode 100644 src/gradientai/types/providers/anthropic/key_create_params.py create mode 100644 src/gradientai/types/providers/anthropic/key_create_response.py create mode 100644 src/gradientai/types/providers/anthropic/key_delete_response.py create mode 100644 src/gradientai/types/providers/anthropic/key_list_agents_params.py create mode 100644 src/gradientai/types/providers/anthropic/key_list_agents_response.py create mode 100644 src/gradientai/types/providers/anthropic/key_list_params.py create mode 100644 src/gradientai/types/providers/anthropic/key_list_response.py create mode 100644 src/gradientai/types/providers/anthropic/key_retrieve_response.py create mode 100644 src/gradientai/types/providers/anthropic/key_update_params.py create mode 100644 src/gradientai/types/providers/anthropic/key_update_response.py create mode 100644 src/gradientai/types/providers/openai/key_create_params.py create mode 100644 src/gradientai/types/providers/openai/key_create_response.py create mode 100644 src/gradientai/types/providers/openai/key_delete_response.py create mode 100644 src/gradientai/types/providers/openai/key_list_params.py create mode 100644 src/gradientai/types/providers/openai/key_list_response.py create mode 100644 src/gradientai/types/providers/openai/key_retrieve_agents_params.py create mode 100644 src/gradientai/types/providers/openai/key_retrieve_agents_response.py create mode 100644 src/gradientai/types/providers/openai/key_retrieve_response.py create mode 100644 src/gradientai/types/providers/openai/key_update_params.py create mode 100644 src/gradientai/types/providers/openai/key_update_response.py create mode 100644 src/gradientai/types/region_list_params.py create mode 100644 src/gradientai/types/region_list_response.py create mode 100644 tests/api_resources/agents/test_child_agents.py create mode 100644 tests/api_resources/api_keys/__init__.py create mode 100644 tests/api_resources/api_keys/test_api_keys_.py create mode 100644 tests/api_resources/auth/__init__.py create mode 100644 tests/api_resources/auth/agents/__init__.py create mode 100644 tests/api_resources/auth/agents/test_token.py create mode 100644 tests/api_resources/providers/__init__.py create mode 100644 tests/api_resources/providers/anthropic/__init__.py create mode 100644 tests/api_resources/providers/anthropic/test_keys.py create mode 100644 tests/api_resources/providers/openai/__init__.py create mode 100644 tests/api_resources/providers/openai/test_keys.py create mode 100644 tests/api_resources/test_api_keys.py create mode 100644 tests/api_resources/test_regions.py diff --git a/.stats.yml b/.stats.yml index 8135f5de..74cbd5c9 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 26 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-d8b53b5432334e3c25a01f8afa9cc6bb9213c8deb83721113ac48e0544a45c6a.yml -openapi_spec_hash: f6129f6ab890acc4ce6da26611b8fe67 +configured_endpoints: 60 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-6e449984986e066baea73af5c2726811e74a284f0d68d49926ec5c7821c7ed31.yml +openapi_spec_hash: 78f43f68f46df0d81891ae2ff66bf3a0 config_hash: 69dc66269416b2e01e8852b5a6788b97 diff --git a/api.md b/api.md index e8cac919..a3d3e8c1 100644 --- a/api.md +++ b/api.md @@ -12,14 +12,22 @@ from gradientai.types import ( APIOpenAIAPIKeyInfo, APIRetrievalMethod, AgentCreateResponse, + AgentRetrieveResponse, + AgentUpdateResponse, AgentListResponse, + AgentDeleteResponse, + AgentUpdateStatusResponse, ) ``` Methods: - client.agents.create(\*\*params) -> AgentCreateResponse +- client.agents.retrieve(uuid) -> AgentRetrieveResponse +- client.agents.update(path_uuid, \*\*params) -> AgentUpdateResponse - client.agents.list(\*\*params) -> AgentListResponse +- client.agents.delete(uuid) -> AgentDeleteResponse +- client.agents.update_status(path_uuid, \*\*params) -> AgentUpdateStatusResponse ## APIKeys @@ -79,12 +87,116 @@ Methods: Types: ```python -from gradientai.types.agents import APILinkKnowledgeBaseOutput +from gradientai.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse ``` Methods: - client.agents.knowledge_bases.attach(agent_uuid) -> APILinkKnowledgeBaseOutput +- client.agents.knowledge_bases.attach_single(knowledge_base_uuid, \*, agent_uuid) -> APILinkKnowledgeBaseOutput +- client.agents.knowledge_bases.detach(knowledge_base_uuid, \*, agent_uuid) -> KnowledgeBaseDetachResponse + +## ChildAgents + +Types: + +```python +from gradientai.types.agents import ( + ChildAgentUpdateResponse, + ChildAgentDeleteResponse, + ChildAgentAddResponse, + ChildAgentViewResponse, +) +``` + +Methods: + +- client.agents.child_agents.update(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> ChildAgentUpdateResponse +- client.agents.child_agents.delete(child_agent_uuid, \*, parent_agent_uuid) -> ChildAgentDeleteResponse +- client.agents.child_agents.add(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> ChildAgentAddResponse +- client.agents.child_agents.view(uuid) -> ChildAgentViewResponse + +# Providers + +## Anthropic + +### Keys + +Types: + +```python +from gradientai.types.providers.anthropic import ( + KeyCreateResponse, + KeyRetrieveResponse, + KeyUpdateResponse, + KeyListResponse, + KeyDeleteResponse, + KeyListAgentsResponse, +) +``` + +Methods: + +- client.providers.anthropic.keys.create(\*\*params) -> KeyCreateResponse +- client.providers.anthropic.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse +- client.providers.anthropic.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse +- client.providers.anthropic.keys.list(\*\*params) -> KeyListResponse +- client.providers.anthropic.keys.delete(api_key_uuid) -> KeyDeleteResponse +- client.providers.anthropic.keys.list_agents(uuid, \*\*params) -> KeyListAgentsResponse + +## OpenAI + +### Keys + +Types: + +```python +from gradientai.types.providers.openai import ( + KeyCreateResponse, + KeyRetrieveResponse, + KeyUpdateResponse, + KeyListResponse, + KeyDeleteResponse, + KeyRetrieveAgentsResponse, +) +``` + +Methods: + +- client.providers.openai.keys.create(\*\*params) -> KeyCreateResponse +- client.providers.openai.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse +- client.providers.openai.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse +- client.providers.openai.keys.list(\*\*params) -> KeyListResponse +- client.providers.openai.keys.delete(api_key_uuid) -> KeyDeleteResponse +- client.providers.openai.keys.retrieve_agents(uuid, \*\*params) -> KeyRetrieveAgentsResponse + +# Auth + +## Agents + +### Token + +Types: + +```python +from gradientai.types.auth.agents import TokenCreateResponse +``` + +Methods: + +- client.auth.agents.token.create(path_agent_uuid, \*\*params) -> TokenCreateResponse + +# Regions + +Types: + +```python +from gradientai.types import RegionListResponse +``` + +Methods: + +- client.regions.list(\*\*params) -> RegionListResponse # IndexingJobs @@ -117,14 +229,20 @@ Types: from gradientai.types import ( APIKnowledgeBase, KnowledgeBaseCreateResponse, + KnowledgeBaseRetrieveResponse, + KnowledgeBaseUpdateResponse, KnowledgeBaseListResponse, + KnowledgeBaseDeleteResponse, ) ``` Methods: - client.knowledge_bases.create(\*\*params) -> KnowledgeBaseCreateResponse +- client.knowledge_bases.retrieve(uuid) -> KnowledgeBaseRetrieveResponse +- client.knowledge_bases.update(path_uuid, \*\*params) -> KnowledgeBaseUpdateResponse - client.knowledge_bases.list(\*\*params) -> KnowledgeBaseListResponse +- client.knowledge_bases.delete(uuid) -> KnowledgeBaseDeleteResponse ## DataSources @@ -138,6 +256,7 @@ from gradientai.types.knowledge_bases import ( APIWebCrawlerDataSource, DataSourceCreateResponse, DataSourceListResponse, + DataSourceDeleteResponse, ) ``` @@ -145,23 +264,43 @@ Methods: - client.knowledge_bases.data_sources.create(path_knowledge_base_uuid, \*\*params) -> DataSourceCreateResponse - client.knowledge_bases.data_sources.list(knowledge_base_uuid, \*\*params) -> DataSourceListResponse +- client.knowledge_bases.data_sources.delete(data_source_uuid, \*, knowledge_base_uuid) -> DataSourceDeleteResponse # APIKeys Types: ```python -from gradientai.types import APIAgreement, APIModelVersion +from gradientai.types import APIAgreement, APIModelVersion, APIKeyListResponse ``` +Methods: + +- client.api_keys.list(\*\*params) -> APIKeyListResponse + ## APIKeys Types: ```python -from gradientai.types.api_keys import APIModelAPIKeyInfo +from gradientai.types.api_keys import ( + APIModelAPIKeyInfo, + APIKeyCreateResponse, + APIKeyUpdateResponse, + APIKeyListResponse, + APIKeyDeleteResponse, + APIKeyUpdateRegenerateResponse, +) ``` +Methods: + +- client.api*keys.api_keys.create(\*\*params) -> APIKeyCreateResponse +- client.api*keys.api_keys.update(path_api_key_uuid, \*\*params) -> APIKeyUpdateResponse +- client.api*keys.api_keys.list(\*\*params) -> APIKeyListResponse +- client.api*keys.api_keys.delete(api_key_uuid) -> APIKeyDeleteResponse +- client.api*keys.api_keys.update_regenerate(api_key_uuid) -> APIKeyUpdateRegenerateResponse + # Chat Types: diff --git a/src/gradientai/_client.py b/src/gradientai/_client.py index ddf7beae..5c0172c1 100644 --- a/src/gradientai/_client.py +++ b/src/gradientai/_client.py @@ -31,12 +31,27 @@ ) if TYPE_CHECKING: - from .resources import chat, agents, models, embeddings, indexing_jobs, knowledge_bases + from .resources import ( + auth, + chat, + agents, + models, + regions, + api_keys, + providers, + embeddings, + indexing_jobs, + knowledge_bases, + ) from .resources.chat import ChatResource, AsyncChatResource from .resources.models import ModelsResource, AsyncModelsResource + from .resources.regions import RegionsResource, AsyncRegionsResource + from .resources.auth.auth import AuthResource, AsyncAuthResource from .resources.embeddings import EmbeddingsResource, AsyncEmbeddingsResource from .resources.agents.agents import AgentsResource, AsyncAgentsResource from .resources.indexing_jobs import IndexingJobsResource, AsyncIndexingJobsResource + from .resources.api_keys.api_keys import APIKeysResource, AsyncAPIKeysResource + from .resources.providers.providers import ProvidersResource, AsyncProvidersResource from .resources.knowledge_bases.knowledge_bases import KnowledgeBasesResource, AsyncKnowledgeBasesResource __all__ = [ @@ -112,6 +127,24 @@ def agents(self) -> AgentsResource: return AgentsResource(self) + @cached_property + def providers(self) -> ProvidersResource: + from .resources.providers import ProvidersResource + + return ProvidersResource(self) + + @cached_property + def auth(self) -> AuthResource: + from .resources.auth import AuthResource + + return AuthResource(self) + + @cached_property + def regions(self) -> RegionsResource: + from .resources.regions import RegionsResource + + return RegionsResource(self) + @cached_property def indexing_jobs(self) -> IndexingJobsResource: from .resources.indexing_jobs import IndexingJobsResource @@ -124,6 +157,12 @@ def knowledge_bases(self) -> KnowledgeBasesResource: return KnowledgeBasesResource(self) + @cached_property + def api_keys(self) -> APIKeysResource: + from .resources.api_keys import APIKeysResource + + return APIKeysResource(self) + @cached_property def chat(self) -> ChatResource: from .resources.chat import ChatResource @@ -316,6 +355,24 @@ def agents(self) -> AsyncAgentsResource: return AsyncAgentsResource(self) + @cached_property + def providers(self) -> AsyncProvidersResource: + from .resources.providers import AsyncProvidersResource + + return AsyncProvidersResource(self) + + @cached_property + def auth(self) -> AsyncAuthResource: + from .resources.auth import AsyncAuthResource + + return AsyncAuthResource(self) + + @cached_property + def regions(self) -> AsyncRegionsResource: + from .resources.regions import AsyncRegionsResource + + return AsyncRegionsResource(self) + @cached_property def indexing_jobs(self) -> AsyncIndexingJobsResource: from .resources.indexing_jobs import AsyncIndexingJobsResource @@ -328,6 +385,12 @@ def knowledge_bases(self) -> AsyncKnowledgeBasesResource: return AsyncKnowledgeBasesResource(self) + @cached_property + def api_keys(self) -> AsyncAPIKeysResource: + from .resources.api_keys import AsyncAPIKeysResource + + return AsyncAPIKeysResource(self) + @cached_property def chat(self) -> AsyncChatResource: from .resources.chat import AsyncChatResource @@ -471,6 +534,24 @@ def agents(self) -> agents.AgentsResourceWithRawResponse: return AgentsResourceWithRawResponse(self._client.agents) + @cached_property + def providers(self) -> providers.ProvidersResourceWithRawResponse: + from .resources.providers import ProvidersResourceWithRawResponse + + return ProvidersResourceWithRawResponse(self._client.providers) + + @cached_property + def auth(self) -> auth.AuthResourceWithRawResponse: + from .resources.auth import AuthResourceWithRawResponse + + return AuthResourceWithRawResponse(self._client.auth) + + @cached_property + def regions(self) -> regions.RegionsResourceWithRawResponse: + from .resources.regions import RegionsResourceWithRawResponse + + return RegionsResourceWithRawResponse(self._client.regions) + @cached_property def indexing_jobs(self) -> indexing_jobs.IndexingJobsResourceWithRawResponse: from .resources.indexing_jobs import IndexingJobsResourceWithRawResponse @@ -483,6 +564,12 @@ def knowledge_bases(self) -> knowledge_bases.KnowledgeBasesResourceWithRawRespon return KnowledgeBasesResourceWithRawResponse(self._client.knowledge_bases) + @cached_property + def api_keys(self) -> api_keys.APIKeysResourceWithRawResponse: + from .resources.api_keys import APIKeysResourceWithRawResponse + + return APIKeysResourceWithRawResponse(self._client.api_keys) + @cached_property def chat(self) -> chat.ChatResourceWithRawResponse: from .resources.chat import ChatResourceWithRawResponse @@ -514,6 +601,24 @@ def agents(self) -> agents.AsyncAgentsResourceWithRawResponse: return AsyncAgentsResourceWithRawResponse(self._client.agents) + @cached_property + def providers(self) -> providers.AsyncProvidersResourceWithRawResponse: + from .resources.providers import AsyncProvidersResourceWithRawResponse + + return AsyncProvidersResourceWithRawResponse(self._client.providers) + + @cached_property + def auth(self) -> auth.AsyncAuthResourceWithRawResponse: + from .resources.auth import AsyncAuthResourceWithRawResponse + + return AsyncAuthResourceWithRawResponse(self._client.auth) + + @cached_property + def regions(self) -> regions.AsyncRegionsResourceWithRawResponse: + from .resources.regions import AsyncRegionsResourceWithRawResponse + + return AsyncRegionsResourceWithRawResponse(self._client.regions) + @cached_property def indexing_jobs(self) -> indexing_jobs.AsyncIndexingJobsResourceWithRawResponse: from .resources.indexing_jobs import AsyncIndexingJobsResourceWithRawResponse @@ -526,6 +631,12 @@ def knowledge_bases(self) -> knowledge_bases.AsyncKnowledgeBasesResourceWithRawR return AsyncKnowledgeBasesResourceWithRawResponse(self._client.knowledge_bases) + @cached_property + def api_keys(self) -> api_keys.AsyncAPIKeysResourceWithRawResponse: + from .resources.api_keys import AsyncAPIKeysResourceWithRawResponse + + return AsyncAPIKeysResourceWithRawResponse(self._client.api_keys) + @cached_property def chat(self) -> chat.AsyncChatResourceWithRawResponse: from .resources.chat import AsyncChatResourceWithRawResponse @@ -557,6 +668,24 @@ def agents(self) -> agents.AgentsResourceWithStreamingResponse: return AgentsResourceWithStreamingResponse(self._client.agents) + @cached_property + def providers(self) -> providers.ProvidersResourceWithStreamingResponse: + from .resources.providers import ProvidersResourceWithStreamingResponse + + return ProvidersResourceWithStreamingResponse(self._client.providers) + + @cached_property + def auth(self) -> auth.AuthResourceWithStreamingResponse: + from .resources.auth import AuthResourceWithStreamingResponse + + return AuthResourceWithStreamingResponse(self._client.auth) + + @cached_property + def regions(self) -> regions.RegionsResourceWithStreamingResponse: + from .resources.regions import RegionsResourceWithStreamingResponse + + return RegionsResourceWithStreamingResponse(self._client.regions) + @cached_property def indexing_jobs(self) -> indexing_jobs.IndexingJobsResourceWithStreamingResponse: from .resources.indexing_jobs import IndexingJobsResourceWithStreamingResponse @@ -569,6 +698,12 @@ def knowledge_bases(self) -> knowledge_bases.KnowledgeBasesResourceWithStreaming return KnowledgeBasesResourceWithStreamingResponse(self._client.knowledge_bases) + @cached_property + def api_keys(self) -> api_keys.APIKeysResourceWithStreamingResponse: + from .resources.api_keys import APIKeysResourceWithStreamingResponse + + return APIKeysResourceWithStreamingResponse(self._client.api_keys) + @cached_property def chat(self) -> chat.ChatResourceWithStreamingResponse: from .resources.chat import ChatResourceWithStreamingResponse @@ -600,6 +735,24 @@ def agents(self) -> agents.AsyncAgentsResourceWithStreamingResponse: return AsyncAgentsResourceWithStreamingResponse(self._client.agents) + @cached_property + def providers(self) -> providers.AsyncProvidersResourceWithStreamingResponse: + from .resources.providers import AsyncProvidersResourceWithStreamingResponse + + return AsyncProvidersResourceWithStreamingResponse(self._client.providers) + + @cached_property + def auth(self) -> auth.AsyncAuthResourceWithStreamingResponse: + from .resources.auth import AsyncAuthResourceWithStreamingResponse + + return AsyncAuthResourceWithStreamingResponse(self._client.auth) + + @cached_property + def regions(self) -> regions.AsyncRegionsResourceWithStreamingResponse: + from .resources.regions import AsyncRegionsResourceWithStreamingResponse + + return AsyncRegionsResourceWithStreamingResponse(self._client.regions) + @cached_property def indexing_jobs(self) -> indexing_jobs.AsyncIndexingJobsResourceWithStreamingResponse: from .resources.indexing_jobs import AsyncIndexingJobsResourceWithStreamingResponse @@ -612,6 +765,12 @@ def knowledge_bases(self) -> knowledge_bases.AsyncKnowledgeBasesResourceWithStre return AsyncKnowledgeBasesResourceWithStreamingResponse(self._client.knowledge_bases) + @cached_property + def api_keys(self) -> api_keys.AsyncAPIKeysResourceWithStreamingResponse: + from .resources.api_keys import AsyncAPIKeysResourceWithStreamingResponse + + return AsyncAPIKeysResourceWithStreamingResponse(self._client.api_keys) + @cached_property def chat(self) -> chat.AsyncChatResourceWithStreamingResponse: from .resources.chat import AsyncChatResourceWithStreamingResponse diff --git a/src/gradientai/resources/__init__.py b/src/gradientai/resources/__init__.py index 15e90bdb..6dcbff02 100644 --- a/src/gradientai/resources/__init__.py +++ b/src/gradientai/resources/__init__.py @@ -1,5 +1,13 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +from .auth import ( + AuthResource, + AsyncAuthResource, + AuthResourceWithRawResponse, + AsyncAuthResourceWithRawResponse, + AuthResourceWithStreamingResponse, + AsyncAuthResourceWithStreamingResponse, +) from .chat import ( ChatResource, AsyncChatResource, @@ -24,6 +32,30 @@ ModelsResourceWithStreamingResponse, AsyncModelsResourceWithStreamingResponse, ) +from .regions import ( + RegionsResource, + AsyncRegionsResource, + RegionsResourceWithRawResponse, + AsyncRegionsResourceWithRawResponse, + RegionsResourceWithStreamingResponse, + AsyncRegionsResourceWithStreamingResponse, +) +from .api_keys import ( + APIKeysResource, + AsyncAPIKeysResource, + APIKeysResourceWithRawResponse, + AsyncAPIKeysResourceWithRawResponse, + APIKeysResourceWithStreamingResponse, + AsyncAPIKeysResourceWithStreamingResponse, +) +from .providers import ( + ProvidersResource, + AsyncProvidersResource, + ProvidersResourceWithRawResponse, + AsyncProvidersResourceWithRawResponse, + ProvidersResourceWithStreamingResponse, + AsyncProvidersResourceWithStreamingResponse, +) from .embeddings import ( EmbeddingsResource, AsyncEmbeddingsResource, @@ -56,6 +88,24 @@ "AsyncAgentsResourceWithRawResponse", "AgentsResourceWithStreamingResponse", "AsyncAgentsResourceWithStreamingResponse", + "ProvidersResource", + "AsyncProvidersResource", + "ProvidersResourceWithRawResponse", + "AsyncProvidersResourceWithRawResponse", + "ProvidersResourceWithStreamingResponse", + "AsyncProvidersResourceWithStreamingResponse", + "AuthResource", + "AsyncAuthResource", + "AuthResourceWithRawResponse", + "AsyncAuthResourceWithRawResponse", + "AuthResourceWithStreamingResponse", + "AsyncAuthResourceWithStreamingResponse", + "RegionsResource", + "AsyncRegionsResource", + "RegionsResourceWithRawResponse", + "AsyncRegionsResourceWithRawResponse", + "RegionsResourceWithStreamingResponse", + "AsyncRegionsResourceWithStreamingResponse", "IndexingJobsResource", "AsyncIndexingJobsResource", "IndexingJobsResourceWithRawResponse", @@ -68,6 +118,12 @@ "AsyncKnowledgeBasesResourceWithRawResponse", "KnowledgeBasesResourceWithStreamingResponse", "AsyncKnowledgeBasesResourceWithStreamingResponse", + "APIKeysResource", + "AsyncAPIKeysResource", + "APIKeysResourceWithRawResponse", + "AsyncAPIKeysResourceWithRawResponse", + "APIKeysResourceWithStreamingResponse", + "AsyncAPIKeysResourceWithStreamingResponse", "ChatResource", "AsyncChatResource", "ChatResourceWithRawResponse", diff --git a/src/gradientai/resources/agents/__init__.py b/src/gradientai/resources/agents/__init__.py index 5bdea838..f41a0408 100644 --- a/src/gradientai/resources/agents/__init__.py +++ b/src/gradientai/resources/agents/__init__.py @@ -32,6 +32,14 @@ FunctionsResourceWithStreamingResponse, AsyncFunctionsResourceWithStreamingResponse, ) +from .child_agents import ( + ChildAgentsResource, + AsyncChildAgentsResource, + ChildAgentsResourceWithRawResponse, + AsyncChildAgentsResourceWithRawResponse, + ChildAgentsResourceWithStreamingResponse, + AsyncChildAgentsResourceWithStreamingResponse, +) from .knowledge_bases import ( KnowledgeBasesResource, AsyncKnowledgeBasesResource, @@ -66,6 +74,12 @@ "AsyncKnowledgeBasesResourceWithRawResponse", "KnowledgeBasesResourceWithStreamingResponse", "AsyncKnowledgeBasesResourceWithStreamingResponse", + "ChildAgentsResource", + "AsyncChildAgentsResource", + "ChildAgentsResourceWithRawResponse", + "AsyncChildAgentsResourceWithRawResponse", + "ChildAgentsResourceWithStreamingResponse", + "AsyncChildAgentsResourceWithStreamingResponse", "AgentsResource", "AsyncAgentsResource", "AgentsResourceWithRawResponse", diff --git a/src/gradientai/resources/agents/agents.py b/src/gradientai/resources/agents/agents.py index 38444f9c..87e2aeca 100644 --- a/src/gradientai/resources/agents/agents.py +++ b/src/gradientai/resources/agents/agents.py @@ -6,7 +6,14 @@ import httpx -from ...types import agent_list_params, agent_create_params +from ...types import ( + APIRetrievalMethod, + APIDeploymentVisibility, + agent_list_params, + agent_create_params, + agent_update_params, + agent_update_status_params, +) from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven from ..._utils import maybe_transform, async_maybe_transform from .api_keys import ( @@ -41,6 +48,14 @@ async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) +from .child_agents import ( + ChildAgentsResource, + AsyncChildAgentsResource, + ChildAgentsResourceWithRawResponse, + AsyncChildAgentsResourceWithRawResponse, + ChildAgentsResourceWithStreamingResponse, + AsyncChildAgentsResourceWithStreamingResponse, +) from ..._base_client import make_request_options from .knowledge_bases import ( KnowledgeBasesResource, @@ -51,7 +66,13 @@ AsyncKnowledgeBasesResourceWithStreamingResponse, ) from ...types.agent_list_response import AgentListResponse +from ...types.api_retrieval_method import APIRetrievalMethod from ...types.agent_create_response import AgentCreateResponse +from ...types.agent_delete_response import AgentDeleteResponse +from ...types.agent_update_response import AgentUpdateResponse +from ...types.agent_retrieve_response import AgentRetrieveResponse +from ...types.api_deployment_visibility import APIDeploymentVisibility +from ...types.agent_update_status_response import AgentUpdateStatusResponse __all__ = ["AgentsResource", "AsyncAgentsResource"] @@ -73,6 +94,10 @@ def versions(self) -> VersionsResource: def knowledge_bases(self) -> KnowledgeBasesResource: return KnowledgeBasesResource(self._client) + @cached_property + def child_agents(self) -> ChildAgentsResource: + return ChildAgentsResource(self._client) + @cached_property def with_raw_response(self) -> AgentsResourceWithRawResponse: """ @@ -156,6 +181,130 @@ def create( cast_to=AgentCreateResponse, ) + def retrieve( + self, + uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AgentRetrieveResponse: + """To retrieve details of an agent, GET request to `/v2/gen-ai/agents/{uuid}`. + + The + response body is a JSON object containing the agent. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return self._get( + f"/v2/gen-ai/agents/{uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AgentRetrieveResponse, + ) + + def update( + self, + path_uuid: str, + *, + anthropic_key_uuid: str | NotGiven = NOT_GIVEN, + description: str | NotGiven = NOT_GIVEN, + instruction: str | NotGiven = NOT_GIVEN, + k: int | NotGiven = NOT_GIVEN, + max_tokens: int | NotGiven = NOT_GIVEN, + model_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + openai_key_uuid: str | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + provide_citations: bool | NotGiven = NOT_GIVEN, + retrieval_method: APIRetrievalMethod | NotGiven = NOT_GIVEN, + tags: List[str] | NotGiven = NOT_GIVEN, + temperature: float | NotGiven = NOT_GIVEN, + top_p: float | NotGiven = NOT_GIVEN, + body_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AgentUpdateResponse: + """To update an agent, send a PUT request to `/v2/gen-ai/agents/{uuid}`. + + The + response body is a JSON object containing the agent. + + Args: + instruction: Agent instruction. Instructions help your agent to perform its job effectively. + See + [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) + for best practices. + + max_tokens: Specifies the maximum number of tokens the model can process in a single input + or output, set as a number between 1 and 512. This determines the length of each + response. + + model_uuid: Identifier for the foundation model. + + temperature: Controls the model’s creativity, specified as a number between 0 and 1. Lower + values produce more predictable and conservative responses, while higher values + encourage creativity and variation. + + top_p: Defines the cumulative probability threshold for word selection, specified as a + number between 0 and 1. Higher values allow for more diverse outputs, while + lower values ensure focused and coherent responses. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_uuid: + raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") + return self._put( + f"/v2/gen-ai/agents/{path_uuid}", + body=maybe_transform( + { + "anthropic_key_uuid": anthropic_key_uuid, + "description": description, + "instruction": instruction, + "k": k, + "max_tokens": max_tokens, + "model_uuid": model_uuid, + "name": name, + "openai_key_uuid": openai_key_uuid, + "project_id": project_id, + "provide_citations": provide_citations, + "retrieval_method": retrieval_method, + "tags": tags, + "temperature": temperature, + "top_p": top_p, + "body_uuid": body_uuid, + }, + agent_update_params.AgentUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AgentUpdateResponse, + ) + def list( self, *, @@ -206,6 +355,83 @@ def list( cast_to=AgentListResponse, ) + def delete( + self, + uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AgentDeleteResponse: + """ + To delete an agent, send a DELETE request to `/v2/gen-ai/agents/{uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return self._delete( + f"/v2/gen-ai/agents/{uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AgentDeleteResponse, + ) + + def update_status( + self, + path_uuid: str, + *, + body_uuid: str | NotGiven = NOT_GIVEN, + visibility: APIDeploymentVisibility | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AgentUpdateStatusResponse: + """Check whether an agent is public or private. + + To update the agent status, send a + PUT request to `/v2/gen-ai/agents/{uuid}/deployment_visibility`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_uuid: + raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") + return self._put( + f"/v2/gen-ai/agents/{path_uuid}/deployment_visibility", + body=maybe_transform( + { + "body_uuid": body_uuid, + "visibility": visibility, + }, + agent_update_status_params.AgentUpdateStatusParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AgentUpdateStatusResponse, + ) + class AsyncAgentsResource(AsyncAPIResource): @cached_property @@ -224,6 +450,10 @@ def versions(self) -> AsyncVersionsResource: def knowledge_bases(self) -> AsyncKnowledgeBasesResource: return AsyncKnowledgeBasesResource(self._client) + @cached_property + def child_agents(self) -> AsyncChildAgentsResource: + return AsyncChildAgentsResource(self._client) + @cached_property def with_raw_response(self) -> AsyncAgentsResourceWithRawResponse: """ @@ -307,6 +537,130 @@ async def create( cast_to=AgentCreateResponse, ) + async def retrieve( + self, + uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AgentRetrieveResponse: + """To retrieve details of an agent, GET request to `/v2/gen-ai/agents/{uuid}`. + + The + response body is a JSON object containing the agent. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return await self._get( + f"/v2/gen-ai/agents/{uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AgentRetrieveResponse, + ) + + async def update( + self, + path_uuid: str, + *, + anthropic_key_uuid: str | NotGiven = NOT_GIVEN, + description: str | NotGiven = NOT_GIVEN, + instruction: str | NotGiven = NOT_GIVEN, + k: int | NotGiven = NOT_GIVEN, + max_tokens: int | NotGiven = NOT_GIVEN, + model_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + openai_key_uuid: str | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + provide_citations: bool | NotGiven = NOT_GIVEN, + retrieval_method: APIRetrievalMethod | NotGiven = NOT_GIVEN, + tags: List[str] | NotGiven = NOT_GIVEN, + temperature: float | NotGiven = NOT_GIVEN, + top_p: float | NotGiven = NOT_GIVEN, + body_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AgentUpdateResponse: + """To update an agent, send a PUT request to `/v2/gen-ai/agents/{uuid}`. + + The + response body is a JSON object containing the agent. + + Args: + instruction: Agent instruction. Instructions help your agent to perform its job effectively. + See + [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) + for best practices. + + max_tokens: Specifies the maximum number of tokens the model can process in a single input + or output, set as a number between 1 and 512. This determines the length of each + response. + + model_uuid: Identifier for the foundation model. + + temperature: Controls the model’s creativity, specified as a number between 0 and 1. Lower + values produce more predictable and conservative responses, while higher values + encourage creativity and variation. + + top_p: Defines the cumulative probability threshold for word selection, specified as a + number between 0 and 1. Higher values allow for more diverse outputs, while + lower values ensure focused and coherent responses. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_uuid: + raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") + return await self._put( + f"/v2/gen-ai/agents/{path_uuid}", + body=await async_maybe_transform( + { + "anthropic_key_uuid": anthropic_key_uuid, + "description": description, + "instruction": instruction, + "k": k, + "max_tokens": max_tokens, + "model_uuid": model_uuid, + "name": name, + "openai_key_uuid": openai_key_uuid, + "project_id": project_id, + "provide_citations": provide_citations, + "retrieval_method": retrieval_method, + "tags": tags, + "temperature": temperature, + "top_p": top_p, + "body_uuid": body_uuid, + }, + agent_update_params.AgentUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AgentUpdateResponse, + ) + async def list( self, *, @@ -357,6 +711,83 @@ async def list( cast_to=AgentListResponse, ) + async def delete( + self, + uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AgentDeleteResponse: + """ + To delete an agent, send a DELETE request to `/v2/gen-ai/agents/{uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return await self._delete( + f"/v2/gen-ai/agents/{uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AgentDeleteResponse, + ) + + async def update_status( + self, + path_uuid: str, + *, + body_uuid: str | NotGiven = NOT_GIVEN, + visibility: APIDeploymentVisibility | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AgentUpdateStatusResponse: + """Check whether an agent is public or private. + + To update the agent status, send a + PUT request to `/v2/gen-ai/agents/{uuid}/deployment_visibility`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_uuid: + raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") + return await self._put( + f"/v2/gen-ai/agents/{path_uuid}/deployment_visibility", + body=await async_maybe_transform( + { + "body_uuid": body_uuid, + "visibility": visibility, + }, + agent_update_status_params.AgentUpdateStatusParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AgentUpdateStatusResponse, + ) + class AgentsResourceWithRawResponse: def __init__(self, agents: AgentsResource) -> None: @@ -365,9 +796,21 @@ def __init__(self, agents: AgentsResource) -> None: self.create = to_raw_response_wrapper( agents.create, ) + self.retrieve = to_raw_response_wrapper( + agents.retrieve, + ) + self.update = to_raw_response_wrapper( + agents.update, + ) self.list = to_raw_response_wrapper( agents.list, ) + self.delete = to_raw_response_wrapper( + agents.delete, + ) + self.update_status = to_raw_response_wrapper( + agents.update_status, + ) @cached_property def api_keys(self) -> APIKeysResourceWithRawResponse: @@ -385,6 +828,10 @@ def versions(self) -> VersionsResourceWithRawResponse: def knowledge_bases(self) -> KnowledgeBasesResourceWithRawResponse: return KnowledgeBasesResourceWithRawResponse(self._agents.knowledge_bases) + @cached_property + def child_agents(self) -> ChildAgentsResourceWithRawResponse: + return ChildAgentsResourceWithRawResponse(self._agents.child_agents) + class AsyncAgentsResourceWithRawResponse: def __init__(self, agents: AsyncAgentsResource) -> None: @@ -393,9 +840,21 @@ def __init__(self, agents: AsyncAgentsResource) -> None: self.create = async_to_raw_response_wrapper( agents.create, ) + self.retrieve = async_to_raw_response_wrapper( + agents.retrieve, + ) + self.update = async_to_raw_response_wrapper( + agents.update, + ) self.list = async_to_raw_response_wrapper( agents.list, ) + self.delete = async_to_raw_response_wrapper( + agents.delete, + ) + self.update_status = async_to_raw_response_wrapper( + agents.update_status, + ) @cached_property def api_keys(self) -> AsyncAPIKeysResourceWithRawResponse: @@ -413,6 +872,10 @@ def versions(self) -> AsyncVersionsResourceWithRawResponse: def knowledge_bases(self) -> AsyncKnowledgeBasesResourceWithRawResponse: return AsyncKnowledgeBasesResourceWithRawResponse(self._agents.knowledge_bases) + @cached_property + def child_agents(self) -> AsyncChildAgentsResourceWithRawResponse: + return AsyncChildAgentsResourceWithRawResponse(self._agents.child_agents) + class AgentsResourceWithStreamingResponse: def __init__(self, agents: AgentsResource) -> None: @@ -421,9 +884,21 @@ def __init__(self, agents: AgentsResource) -> None: self.create = to_streamed_response_wrapper( agents.create, ) + self.retrieve = to_streamed_response_wrapper( + agents.retrieve, + ) + self.update = to_streamed_response_wrapper( + agents.update, + ) self.list = to_streamed_response_wrapper( agents.list, ) + self.delete = to_streamed_response_wrapper( + agents.delete, + ) + self.update_status = to_streamed_response_wrapper( + agents.update_status, + ) @cached_property def api_keys(self) -> APIKeysResourceWithStreamingResponse: @@ -441,6 +916,10 @@ def versions(self) -> VersionsResourceWithStreamingResponse: def knowledge_bases(self) -> KnowledgeBasesResourceWithStreamingResponse: return KnowledgeBasesResourceWithStreamingResponse(self._agents.knowledge_bases) + @cached_property + def child_agents(self) -> ChildAgentsResourceWithStreamingResponse: + return ChildAgentsResourceWithStreamingResponse(self._agents.child_agents) + class AsyncAgentsResourceWithStreamingResponse: def __init__(self, agents: AsyncAgentsResource) -> None: @@ -449,9 +928,21 @@ def __init__(self, agents: AsyncAgentsResource) -> None: self.create = async_to_streamed_response_wrapper( agents.create, ) + self.retrieve = async_to_streamed_response_wrapper( + agents.retrieve, + ) + self.update = async_to_streamed_response_wrapper( + agents.update, + ) self.list = async_to_streamed_response_wrapper( agents.list, ) + self.delete = async_to_streamed_response_wrapper( + agents.delete, + ) + self.update_status = async_to_streamed_response_wrapper( + agents.update_status, + ) @cached_property def api_keys(self) -> AsyncAPIKeysResourceWithStreamingResponse: @@ -468,3 +959,7 @@ def versions(self) -> AsyncVersionsResourceWithStreamingResponse: @cached_property def knowledge_bases(self) -> AsyncKnowledgeBasesResourceWithStreamingResponse: return AsyncKnowledgeBasesResourceWithStreamingResponse(self._agents.knowledge_bases) + + @cached_property + def child_agents(self) -> AsyncChildAgentsResourceWithStreamingResponse: + return AsyncChildAgentsResourceWithStreamingResponse(self._agents.child_agents) diff --git a/src/gradientai/resources/agents/child_agents.py b/src/gradientai/resources/agents/child_agents.py new file mode 100644 index 00000000..1f7fe3ce --- /dev/null +++ b/src/gradientai/resources/agents/child_agents.py @@ -0,0 +1,508 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.agents import child_agent_add_params, child_agent_update_params +from ...types.agents.child_agent_add_response import ChildAgentAddResponse +from ...types.agents.child_agent_view_response import ChildAgentViewResponse +from ...types.agents.child_agent_delete_response import ChildAgentDeleteResponse +from ...types.agents.child_agent_update_response import ChildAgentUpdateResponse + +__all__ = ["ChildAgentsResource", "AsyncChildAgentsResource"] + + +class ChildAgentsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> ChildAgentsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return ChildAgentsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ChildAgentsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return ChildAgentsResourceWithStreamingResponse(self) + + def update( + self, + path_child_agent_uuid: str, + *, + path_parent_agent_uuid: str, + body_child_agent_uuid: str | NotGiven = NOT_GIVEN, + if_case: str | NotGiven = NOT_GIVEN, + body_parent_agent_uuid: str | NotGiven = NOT_GIVEN, + route_name: str | NotGiven = NOT_GIVEN, + uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ChildAgentUpdateResponse: + """ + To update an agent route for an agent, send a PUT request to + `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`. + + Args: + body_parent_agent_uuid: A unique identifier for the parent agent. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_parent_agent_uuid: + raise ValueError( + f"Expected a non-empty value for `path_parent_agent_uuid` but received {path_parent_agent_uuid!r}" + ) + if not path_child_agent_uuid: + raise ValueError( + f"Expected a non-empty value for `path_child_agent_uuid` but received {path_child_agent_uuid!r}" + ) + return self._put( + f"/v2/gen-ai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}", + body=maybe_transform( + { + "body_child_agent_uuid": body_child_agent_uuid, + "if_case": if_case, + "body_parent_agent_uuid": body_parent_agent_uuid, + "route_name": route_name, + "uuid": uuid, + }, + child_agent_update_params.ChildAgentUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ChildAgentUpdateResponse, + ) + + def delete( + self, + child_agent_uuid: str, + *, + parent_agent_uuid: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ChildAgentDeleteResponse: + """ + To delete an agent route from a parent agent, send a DELETE request to + `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not parent_agent_uuid: + raise ValueError(f"Expected a non-empty value for `parent_agent_uuid` but received {parent_agent_uuid!r}") + if not child_agent_uuid: + raise ValueError(f"Expected a non-empty value for `child_agent_uuid` but received {child_agent_uuid!r}") + return self._delete( + f"/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ChildAgentDeleteResponse, + ) + + def add( + self, + path_child_agent_uuid: str, + *, + path_parent_agent_uuid: str, + body_child_agent_uuid: str | NotGiven = NOT_GIVEN, + if_case: str | NotGiven = NOT_GIVEN, + body_parent_agent_uuid: str | NotGiven = NOT_GIVEN, + route_name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ChildAgentAddResponse: + """ + To add an agent route to an agent, send a POST request to + `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`. + + Args: + body_parent_agent_uuid: A unique identifier for the parent agent. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_parent_agent_uuid: + raise ValueError( + f"Expected a non-empty value for `path_parent_agent_uuid` but received {path_parent_agent_uuid!r}" + ) + if not path_child_agent_uuid: + raise ValueError( + f"Expected a non-empty value for `path_child_agent_uuid` but received {path_child_agent_uuid!r}" + ) + return self._post( + f"/v2/gen-ai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}", + body=maybe_transform( + { + "body_child_agent_uuid": body_child_agent_uuid, + "if_case": if_case, + "body_parent_agent_uuid": body_parent_agent_uuid, + "route_name": route_name, + }, + child_agent_add_params.ChildAgentAddParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ChildAgentAddResponse, + ) + + def view( + self, + uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ChildAgentViewResponse: + """ + To view agent routes for an agent, send a GET requtest to + `/v2/gen-ai/agents/{uuid}/child_agents`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return self._get( + f"/v2/gen-ai/agents/{uuid}/child_agents", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ChildAgentViewResponse, + ) + + +class AsyncChildAgentsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncChildAgentsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncChildAgentsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncChildAgentsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncChildAgentsResourceWithStreamingResponse(self) + + async def update( + self, + path_child_agent_uuid: str, + *, + path_parent_agent_uuid: str, + body_child_agent_uuid: str | NotGiven = NOT_GIVEN, + if_case: str | NotGiven = NOT_GIVEN, + body_parent_agent_uuid: str | NotGiven = NOT_GIVEN, + route_name: str | NotGiven = NOT_GIVEN, + uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ChildAgentUpdateResponse: + """ + To update an agent route for an agent, send a PUT request to + `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`. + + Args: + body_parent_agent_uuid: A unique identifier for the parent agent. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_parent_agent_uuid: + raise ValueError( + f"Expected a non-empty value for `path_parent_agent_uuid` but received {path_parent_agent_uuid!r}" + ) + if not path_child_agent_uuid: + raise ValueError( + f"Expected a non-empty value for `path_child_agent_uuid` but received {path_child_agent_uuid!r}" + ) + return await self._put( + f"/v2/gen-ai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}", + body=await async_maybe_transform( + { + "body_child_agent_uuid": body_child_agent_uuid, + "if_case": if_case, + "body_parent_agent_uuid": body_parent_agent_uuid, + "route_name": route_name, + "uuid": uuid, + }, + child_agent_update_params.ChildAgentUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ChildAgentUpdateResponse, + ) + + async def delete( + self, + child_agent_uuid: str, + *, + parent_agent_uuid: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ChildAgentDeleteResponse: + """ + To delete an agent route from a parent agent, send a DELETE request to + `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not parent_agent_uuid: + raise ValueError(f"Expected a non-empty value for `parent_agent_uuid` but received {parent_agent_uuid!r}") + if not child_agent_uuid: + raise ValueError(f"Expected a non-empty value for `child_agent_uuid` but received {child_agent_uuid!r}") + return await self._delete( + f"/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ChildAgentDeleteResponse, + ) + + async def add( + self, + path_child_agent_uuid: str, + *, + path_parent_agent_uuid: str, + body_child_agent_uuid: str | NotGiven = NOT_GIVEN, + if_case: str | NotGiven = NOT_GIVEN, + body_parent_agent_uuid: str | NotGiven = NOT_GIVEN, + route_name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ChildAgentAddResponse: + """ + To add an agent route to an agent, send a POST request to + `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`. + + Args: + body_parent_agent_uuid: A unique identifier for the parent agent. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_parent_agent_uuid: + raise ValueError( + f"Expected a non-empty value for `path_parent_agent_uuid` but received {path_parent_agent_uuid!r}" + ) + if not path_child_agent_uuid: + raise ValueError( + f"Expected a non-empty value for `path_child_agent_uuid` but received {path_child_agent_uuid!r}" + ) + return await self._post( + f"/v2/gen-ai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}", + body=await async_maybe_transform( + { + "body_child_agent_uuid": body_child_agent_uuid, + "if_case": if_case, + "body_parent_agent_uuid": body_parent_agent_uuid, + "route_name": route_name, + }, + child_agent_add_params.ChildAgentAddParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ChildAgentAddResponse, + ) + + async def view( + self, + uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ChildAgentViewResponse: + """ + To view agent routes for an agent, send a GET requtest to + `/v2/gen-ai/agents/{uuid}/child_agents`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return await self._get( + f"/v2/gen-ai/agents/{uuid}/child_agents", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ChildAgentViewResponse, + ) + + +class ChildAgentsResourceWithRawResponse: + def __init__(self, child_agents: ChildAgentsResource) -> None: + self._child_agents = child_agents + + self.update = to_raw_response_wrapper( + child_agents.update, + ) + self.delete = to_raw_response_wrapper( + child_agents.delete, + ) + self.add = to_raw_response_wrapper( + child_agents.add, + ) + self.view = to_raw_response_wrapper( + child_agents.view, + ) + + +class AsyncChildAgentsResourceWithRawResponse: + def __init__(self, child_agents: AsyncChildAgentsResource) -> None: + self._child_agents = child_agents + + self.update = async_to_raw_response_wrapper( + child_agents.update, + ) + self.delete = async_to_raw_response_wrapper( + child_agents.delete, + ) + self.add = async_to_raw_response_wrapper( + child_agents.add, + ) + self.view = async_to_raw_response_wrapper( + child_agents.view, + ) + + +class ChildAgentsResourceWithStreamingResponse: + def __init__(self, child_agents: ChildAgentsResource) -> None: + self._child_agents = child_agents + + self.update = to_streamed_response_wrapper( + child_agents.update, + ) + self.delete = to_streamed_response_wrapper( + child_agents.delete, + ) + self.add = to_streamed_response_wrapper( + child_agents.add, + ) + self.view = to_streamed_response_wrapper( + child_agents.view, + ) + + +class AsyncChildAgentsResourceWithStreamingResponse: + def __init__(self, child_agents: AsyncChildAgentsResource) -> None: + self._child_agents = child_agents + + self.update = async_to_streamed_response_wrapper( + child_agents.update, + ) + self.delete = async_to_streamed_response_wrapper( + child_agents.delete, + ) + self.add = async_to_streamed_response_wrapper( + child_agents.add, + ) + self.view = async_to_streamed_response_wrapper( + child_agents.view, + ) diff --git a/src/gradientai/resources/agents/knowledge_bases.py b/src/gradientai/resources/agents/knowledge_bases.py index 3d65228a..97b086e0 100644 --- a/src/gradientai/resources/agents/knowledge_bases.py +++ b/src/gradientai/resources/agents/knowledge_bases.py @@ -15,6 +15,7 @@ ) from ..._base_client import make_request_options from ...types.agents.api_link_knowledge_base_output import APILinkKnowledgeBaseOutput +from ...types.agents.knowledge_base_detach_response import KnowledgeBaseDetachResponse __all__ = ["KnowledgeBasesResource", "AsyncKnowledgeBasesResource"] @@ -73,6 +74,84 @@ def attach( cast_to=APILinkKnowledgeBaseOutput, ) + def attach_single( + self, + knowledge_base_uuid: str, + *, + agent_uuid: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APILinkKnowledgeBaseOutput: + """ + To attach a knowledge base to an agent, send a POST request to + `/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}` + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not agent_uuid: + raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") + if not knowledge_base_uuid: + raise ValueError( + f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" + ) + return self._post( + f"/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APILinkKnowledgeBaseOutput, + ) + + def detach( + self, + knowledge_base_uuid: str, + *, + agent_uuid: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KnowledgeBaseDetachResponse: + """ + To detach a knowledge base from an agent, send a DELETE request to + `/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not agent_uuid: + raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") + if not knowledge_base_uuid: + raise ValueError( + f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" + ) + return self._delete( + f"/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KnowledgeBaseDetachResponse, + ) + class AsyncKnowledgeBasesResource(AsyncAPIResource): @cached_property @@ -128,6 +207,84 @@ async def attach( cast_to=APILinkKnowledgeBaseOutput, ) + async def attach_single( + self, + knowledge_base_uuid: str, + *, + agent_uuid: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APILinkKnowledgeBaseOutput: + """ + To attach a knowledge base to an agent, send a POST request to + `/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}` + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not agent_uuid: + raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") + if not knowledge_base_uuid: + raise ValueError( + f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" + ) + return await self._post( + f"/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APILinkKnowledgeBaseOutput, + ) + + async def detach( + self, + knowledge_base_uuid: str, + *, + agent_uuid: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KnowledgeBaseDetachResponse: + """ + To detach a knowledge base from an agent, send a DELETE request to + `/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not agent_uuid: + raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") + if not knowledge_base_uuid: + raise ValueError( + f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" + ) + return await self._delete( + f"/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KnowledgeBaseDetachResponse, + ) + class KnowledgeBasesResourceWithRawResponse: def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None: @@ -136,6 +293,12 @@ def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None: self.attach = to_raw_response_wrapper( knowledge_bases.attach, ) + self.attach_single = to_raw_response_wrapper( + knowledge_bases.attach_single, + ) + self.detach = to_raw_response_wrapper( + knowledge_bases.detach, + ) class AsyncKnowledgeBasesResourceWithRawResponse: @@ -145,6 +308,12 @@ def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None: self.attach = async_to_raw_response_wrapper( knowledge_bases.attach, ) + self.attach_single = async_to_raw_response_wrapper( + knowledge_bases.attach_single, + ) + self.detach = async_to_raw_response_wrapper( + knowledge_bases.detach, + ) class KnowledgeBasesResourceWithStreamingResponse: @@ -154,6 +323,12 @@ def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None: self.attach = to_streamed_response_wrapper( knowledge_bases.attach, ) + self.attach_single = to_streamed_response_wrapper( + knowledge_bases.attach_single, + ) + self.detach = to_streamed_response_wrapper( + knowledge_bases.detach, + ) class AsyncKnowledgeBasesResourceWithStreamingResponse: @@ -163,3 +338,9 @@ def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None: self.attach = async_to_streamed_response_wrapper( knowledge_bases.attach, ) + self.attach_single = async_to_streamed_response_wrapper( + knowledge_bases.attach_single, + ) + self.detach = async_to_streamed_response_wrapper( + knowledge_bases.detach, + ) diff --git a/src/gradientai/resources/api_keys/__init__.py b/src/gradientai/resources/api_keys/__init__.py new file mode 100644 index 00000000..ed14565c --- /dev/null +++ b/src/gradientai/resources/api_keys/__init__.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .api_keys import ( + APIKeysResource, + AsyncAPIKeysResource, + APIKeysResourceWithRawResponse, + AsyncAPIKeysResourceWithRawResponse, + APIKeysResourceWithStreamingResponse, + AsyncAPIKeysResourceWithStreamingResponse, +) + +__all__ = [ + "APIKeysResource", + "AsyncAPIKeysResource", + "APIKeysResourceWithRawResponse", + "AsyncAPIKeysResourceWithRawResponse", + "APIKeysResourceWithStreamingResponse", + "AsyncAPIKeysResourceWithStreamingResponse", +] diff --git a/src/gradientai/resources/api_keys/api_keys.py b/src/gradientai/resources/api_keys/api_keys.py new file mode 100644 index 00000000..aecccfc3 --- /dev/null +++ b/src/gradientai/resources/api_keys/api_keys.py @@ -0,0 +1,275 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Literal + +import httpx + +from . import api_keys_ as api_keys +from ...types import api_key_list_params +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.api_key_list_response import APIKeyListResponse + +__all__ = ["APIKeysResource", "AsyncAPIKeysResource"] + + +class APIKeysResource(SyncAPIResource): + @cached_property + def api_keys(self) -> api_keys.APIKeysResource: + return api_keys.APIKeysResource(self._client) + + @cached_property + def with_raw_response(self) -> APIKeysResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return APIKeysResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> APIKeysResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return APIKeysResourceWithStreamingResponse(self) + + def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + public_only: bool | NotGiven = NOT_GIVEN, + usecases: List[ + Literal[ + "MODEL_USECASE_UNKNOWN", + "MODEL_USECASE_AGENT", + "MODEL_USECASE_FINETUNED", + "MODEL_USECASE_KNOWLEDGEBASE", + "MODEL_USECASE_GUARDRAIL", + "MODEL_USECASE_REASONING", + "MODEL_USECASE_SERVERLESS", + ] + ] + | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyListResponse: + """ + To list all models, send a GET request to `/v2/gen-ai/models`. + + Args: + page: page number. + + per_page: items per page. + + public_only: only include models that are publicly available. + + usecases: include only models defined for the listed usecases. + + - MODEL_USECASE_UNKNOWN: The use case of the model is unknown + - MODEL_USECASE_AGENT: The model maybe used in an agent + - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning + - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases + (embedding models) + - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails + - MODEL_USECASE_REASONING: The model usecase for reasoning + - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/gen-ai/models", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + "public_only": public_only, + "usecases": usecases, + }, + api_key_list_params.APIKeyListParams, + ), + ), + cast_to=APIKeyListResponse, + ) + + +class AsyncAPIKeysResource(AsyncAPIResource): + @cached_property + def api_keys(self) -> api_keys.AsyncAPIKeysResource: + return api_keys.AsyncAPIKeysResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncAPIKeysResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncAPIKeysResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncAPIKeysResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncAPIKeysResourceWithStreamingResponse(self) + + async def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + public_only: bool | NotGiven = NOT_GIVEN, + usecases: List[ + Literal[ + "MODEL_USECASE_UNKNOWN", + "MODEL_USECASE_AGENT", + "MODEL_USECASE_FINETUNED", + "MODEL_USECASE_KNOWLEDGEBASE", + "MODEL_USECASE_GUARDRAIL", + "MODEL_USECASE_REASONING", + "MODEL_USECASE_SERVERLESS", + ] + ] + | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyListResponse: + """ + To list all models, send a GET request to `/v2/gen-ai/models`. + + Args: + page: page number. + + per_page: items per page. + + public_only: only include models that are publicly available. + + usecases: include only models defined for the listed usecases. + + - MODEL_USECASE_UNKNOWN: The use case of the model is unknown + - MODEL_USECASE_AGENT: The model maybe used in an agent + - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning + - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases + (embedding models) + - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails + - MODEL_USECASE_REASONING: The model usecase for reasoning + - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/gen-ai/models", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + "public_only": public_only, + "usecases": usecases, + }, + api_key_list_params.APIKeyListParams, + ), + ), + cast_to=APIKeyListResponse, + ) + + +class APIKeysResourceWithRawResponse: + def __init__(self, api_keys: APIKeysResource) -> None: + self._api_keys = api_keys + + self.list = to_raw_response_wrapper( + api_keys.list, + ) + + @cached_property + def api_keys(self) -> api_keys.APIKeysResourceWithRawResponse: + return api_keys.APIKeysResourceWithRawResponse(self._api_keys.api_keys) + + +class AsyncAPIKeysResourceWithRawResponse: + def __init__(self, api_keys: AsyncAPIKeysResource) -> None: + self._api_keys = api_keys + + self.list = async_to_raw_response_wrapper( + api_keys.list, + ) + + @cached_property + def api_keys(self) -> api_keys.AsyncAPIKeysResourceWithRawResponse: + return api_keys.AsyncAPIKeysResourceWithRawResponse(self._api_keys.api_keys) + + +class APIKeysResourceWithStreamingResponse: + def __init__(self, api_keys: APIKeysResource) -> None: + self._api_keys = api_keys + + self.list = to_streamed_response_wrapper( + api_keys.list, + ) + + @cached_property + def api_keys(self) -> api_keys.APIKeysResourceWithStreamingResponse: + return api_keys.APIKeysResourceWithStreamingResponse(self._api_keys.api_keys) + + +class AsyncAPIKeysResourceWithStreamingResponse: + def __init__(self, api_keys: AsyncAPIKeysResource) -> None: + self._api_keys = api_keys + + self.list = async_to_streamed_response_wrapper( + api_keys.list, + ) + + @cached_property + def api_keys(self) -> api_keys.AsyncAPIKeysResourceWithStreamingResponse: + return api_keys.AsyncAPIKeysResourceWithStreamingResponse(self._api_keys.api_keys) diff --git a/src/gradientai/resources/api_keys/api_keys_.py b/src/gradientai/resources/api_keys/api_keys_.py new file mode 100644 index 00000000..969bcfb9 --- /dev/null +++ b/src/gradientai/resources/api_keys/api_keys_.py @@ -0,0 +1,529 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.api_keys import api_key_list_params, api_key_create_params, api_key_update_params +from ...types.api_keys.api_key_list_response import APIKeyListResponse +from ...types.api_keys.api_key_create_response import APIKeyCreateResponse +from ...types.api_keys.api_key_delete_response import APIKeyDeleteResponse +from ...types.api_keys.api_key_update_response import APIKeyUpdateResponse +from ...types.api_keys.api_key_update_regenerate_response import APIKeyUpdateRegenerateResponse + +__all__ = ["APIKeysResource", "AsyncAPIKeysResource"] + + +class APIKeysResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> APIKeysResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return APIKeysResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> APIKeysResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return APIKeysResourceWithStreamingResponse(self) + + def create( + self, + *, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyCreateResponse: + """ + To create a model API key, send a POST request to `/v2/gen-ai/models/api_keys`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v2/gen-ai/models/api_keys", + body=maybe_transform({"name": name}, api_key_create_params.APIKeyCreateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKeyCreateResponse, + ) + + def update( + self, + path_api_key_uuid: str, + *, + body_api_key_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyUpdateResponse: + """ + To update a model API key, send a PUT request to + `/v2/gen-ai/models/api_keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_api_key_uuid: + raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") + return self._put( + f"/v2/gen-ai/models/api_keys/{path_api_key_uuid}", + body=maybe_transform( + { + "body_api_key_uuid": body_api_key_uuid, + "name": name, + }, + api_key_update_params.APIKeyUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKeyUpdateResponse, + ) + + def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyListResponse: + """ + To list all model API keys, send a GET request to `/v2/gen-ai/models/api_keys`. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/gen-ai/models/api_keys", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + api_key_list_params.APIKeyListParams, + ), + ), + cast_to=APIKeyListResponse, + ) + + def delete( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyDeleteResponse: + """ + To delete an API key for a model, send a DELETE request to + `/v2/gen-ai/models/api_keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return self._delete( + f"/v2/gen-ai/models/api_keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKeyDeleteResponse, + ) + + def update_regenerate( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyUpdateRegenerateResponse: + """ + To regenerate a model API key, send a PUT request to + `/v2/gen-ai/models/api_keys/{api_key_uuid}/regenerate`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return self._put( + f"/v2/gen-ai/models/api_keys/{api_key_uuid}/regenerate", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKeyUpdateRegenerateResponse, + ) + + +class AsyncAPIKeysResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncAPIKeysResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncAPIKeysResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncAPIKeysResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncAPIKeysResourceWithStreamingResponse(self) + + async def create( + self, + *, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyCreateResponse: + """ + To create a model API key, send a POST request to `/v2/gen-ai/models/api_keys`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v2/gen-ai/models/api_keys", + body=await async_maybe_transform({"name": name}, api_key_create_params.APIKeyCreateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKeyCreateResponse, + ) + + async def update( + self, + path_api_key_uuid: str, + *, + body_api_key_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyUpdateResponse: + """ + To update a model API key, send a PUT request to + `/v2/gen-ai/models/api_keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_api_key_uuid: + raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") + return await self._put( + f"/v2/gen-ai/models/api_keys/{path_api_key_uuid}", + body=await async_maybe_transform( + { + "body_api_key_uuid": body_api_key_uuid, + "name": name, + }, + api_key_update_params.APIKeyUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKeyUpdateResponse, + ) + + async def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyListResponse: + """ + To list all model API keys, send a GET request to `/v2/gen-ai/models/api_keys`. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/gen-ai/models/api_keys", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + api_key_list_params.APIKeyListParams, + ), + ), + cast_to=APIKeyListResponse, + ) + + async def delete( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyDeleteResponse: + """ + To delete an API key for a model, send a DELETE request to + `/v2/gen-ai/models/api_keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return await self._delete( + f"/v2/gen-ai/models/api_keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKeyDeleteResponse, + ) + + async def update_regenerate( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> APIKeyUpdateRegenerateResponse: + """ + To regenerate a model API key, send a PUT request to + `/v2/gen-ai/models/api_keys/{api_key_uuid}/regenerate`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return await self._put( + f"/v2/gen-ai/models/api_keys/{api_key_uuid}/regenerate", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=APIKeyUpdateRegenerateResponse, + ) + + +class APIKeysResourceWithRawResponse: + def __init__(self, api_keys: APIKeysResource) -> None: + self._api_keys = api_keys + + self.create = to_raw_response_wrapper( + api_keys.create, + ) + self.update = to_raw_response_wrapper( + api_keys.update, + ) + self.list = to_raw_response_wrapper( + api_keys.list, + ) + self.delete = to_raw_response_wrapper( + api_keys.delete, + ) + self.update_regenerate = to_raw_response_wrapper( + api_keys.update_regenerate, + ) + + +class AsyncAPIKeysResourceWithRawResponse: + def __init__(self, api_keys: AsyncAPIKeysResource) -> None: + self._api_keys = api_keys + + self.create = async_to_raw_response_wrapper( + api_keys.create, + ) + self.update = async_to_raw_response_wrapper( + api_keys.update, + ) + self.list = async_to_raw_response_wrapper( + api_keys.list, + ) + self.delete = async_to_raw_response_wrapper( + api_keys.delete, + ) + self.update_regenerate = async_to_raw_response_wrapper( + api_keys.update_regenerate, + ) + + +class APIKeysResourceWithStreamingResponse: + def __init__(self, api_keys: APIKeysResource) -> None: + self._api_keys = api_keys + + self.create = to_streamed_response_wrapper( + api_keys.create, + ) + self.update = to_streamed_response_wrapper( + api_keys.update, + ) + self.list = to_streamed_response_wrapper( + api_keys.list, + ) + self.delete = to_streamed_response_wrapper( + api_keys.delete, + ) + self.update_regenerate = to_streamed_response_wrapper( + api_keys.update_regenerate, + ) + + +class AsyncAPIKeysResourceWithStreamingResponse: + def __init__(self, api_keys: AsyncAPIKeysResource) -> None: + self._api_keys = api_keys + + self.create = async_to_streamed_response_wrapper( + api_keys.create, + ) + self.update = async_to_streamed_response_wrapper( + api_keys.update, + ) + self.list = async_to_streamed_response_wrapper( + api_keys.list, + ) + self.delete = async_to_streamed_response_wrapper( + api_keys.delete, + ) + self.update_regenerate = async_to_streamed_response_wrapper( + api_keys.update_regenerate, + ) diff --git a/src/gradientai/resources/auth/__init__.py b/src/gradientai/resources/auth/__init__.py new file mode 100644 index 00000000..7c844a98 --- /dev/null +++ b/src/gradientai/resources/auth/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .auth import ( + AuthResource, + AsyncAuthResource, + AuthResourceWithRawResponse, + AsyncAuthResourceWithRawResponse, + AuthResourceWithStreamingResponse, + AsyncAuthResourceWithStreamingResponse, +) +from .agents import ( + AgentsResource, + AsyncAgentsResource, + AgentsResourceWithRawResponse, + AsyncAgentsResourceWithRawResponse, + AgentsResourceWithStreamingResponse, + AsyncAgentsResourceWithStreamingResponse, +) + +__all__ = [ + "AgentsResource", + "AsyncAgentsResource", + "AgentsResourceWithRawResponse", + "AsyncAgentsResourceWithRawResponse", + "AgentsResourceWithStreamingResponse", + "AsyncAgentsResourceWithStreamingResponse", + "AuthResource", + "AsyncAuthResource", + "AuthResourceWithRawResponse", + "AsyncAuthResourceWithRawResponse", + "AuthResourceWithStreamingResponse", + "AsyncAuthResourceWithStreamingResponse", +] diff --git a/src/gradientai/resources/auth/agents/__init__.py b/src/gradientai/resources/auth/agents/__init__.py new file mode 100644 index 00000000..2972198f --- /dev/null +++ b/src/gradientai/resources/auth/agents/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .token import ( + TokenResource, + AsyncTokenResource, + TokenResourceWithRawResponse, + AsyncTokenResourceWithRawResponse, + TokenResourceWithStreamingResponse, + AsyncTokenResourceWithStreamingResponse, +) +from .agents import ( + AgentsResource, + AsyncAgentsResource, + AgentsResourceWithRawResponse, + AsyncAgentsResourceWithRawResponse, + AgentsResourceWithStreamingResponse, + AsyncAgentsResourceWithStreamingResponse, +) + +__all__ = [ + "TokenResource", + "AsyncTokenResource", + "TokenResourceWithRawResponse", + "AsyncTokenResourceWithRawResponse", + "TokenResourceWithStreamingResponse", + "AsyncTokenResourceWithStreamingResponse", + "AgentsResource", + "AsyncAgentsResource", + "AgentsResourceWithRawResponse", + "AsyncAgentsResourceWithRawResponse", + "AgentsResourceWithStreamingResponse", + "AsyncAgentsResourceWithStreamingResponse", +] diff --git a/src/gradientai/resources/auth/agents/agents.py b/src/gradientai/resources/auth/agents/agents.py new file mode 100644 index 00000000..52426560 --- /dev/null +++ b/src/gradientai/resources/auth/agents/agents.py @@ -0,0 +1,102 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .token import ( + TokenResource, + AsyncTokenResource, + TokenResourceWithRawResponse, + AsyncTokenResourceWithRawResponse, + TokenResourceWithStreamingResponse, + AsyncTokenResourceWithStreamingResponse, +) +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource + +__all__ = ["AgentsResource", "AsyncAgentsResource"] + + +class AgentsResource(SyncAPIResource): + @cached_property + def token(self) -> TokenResource: + return TokenResource(self._client) + + @cached_property + def with_raw_response(self) -> AgentsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AgentsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AgentsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AgentsResourceWithStreamingResponse(self) + + +class AsyncAgentsResource(AsyncAPIResource): + @cached_property + def token(self) -> AsyncTokenResource: + return AsyncTokenResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncAgentsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncAgentsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncAgentsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncAgentsResourceWithStreamingResponse(self) + + +class AgentsResourceWithRawResponse: + def __init__(self, agents: AgentsResource) -> None: + self._agents = agents + + @cached_property + def token(self) -> TokenResourceWithRawResponse: + return TokenResourceWithRawResponse(self._agents.token) + + +class AsyncAgentsResourceWithRawResponse: + def __init__(self, agents: AsyncAgentsResource) -> None: + self._agents = agents + + @cached_property + def token(self) -> AsyncTokenResourceWithRawResponse: + return AsyncTokenResourceWithRawResponse(self._agents.token) + + +class AgentsResourceWithStreamingResponse: + def __init__(self, agents: AgentsResource) -> None: + self._agents = agents + + @cached_property + def token(self) -> TokenResourceWithStreamingResponse: + return TokenResourceWithStreamingResponse(self._agents.token) + + +class AsyncAgentsResourceWithStreamingResponse: + def __init__(self, agents: AsyncAgentsResource) -> None: + self._agents = agents + + @cached_property + def token(self) -> AsyncTokenResourceWithStreamingResponse: + return AsyncTokenResourceWithStreamingResponse(self._agents.token) diff --git a/src/gradientai/resources/auth/agents/token.py b/src/gradientai/resources/auth/agents/token.py new file mode 100644 index 00000000..26de7c06 --- /dev/null +++ b/src/gradientai/resources/auth/agents/token.py @@ -0,0 +1,173 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.auth.agents import token_create_params +from ....types.auth.agents.token_create_response import TokenCreateResponse + +__all__ = ["TokenResource", "AsyncTokenResource"] + + +class TokenResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> TokenResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return TokenResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> TokenResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return TokenResourceWithStreamingResponse(self) + + def create( + self, + path_agent_uuid: str, + *, + body_agent_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> TokenCreateResponse: + """ + To issue an agent token, send a POST request to + `/v2/gen-ai/auth/agents/{agent_uuid}/token`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_agent_uuid: + raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") + return self._post( + f"/v2/gen-ai/auth/agents/{path_agent_uuid}/token", + body=maybe_transform({"body_agent_uuid": body_agent_uuid}, token_create_params.TokenCreateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=TokenCreateResponse, + ) + + +class AsyncTokenResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncTokenResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncTokenResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncTokenResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncTokenResourceWithStreamingResponse(self) + + async def create( + self, + path_agent_uuid: str, + *, + body_agent_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> TokenCreateResponse: + """ + To issue an agent token, send a POST request to + `/v2/gen-ai/auth/agents/{agent_uuid}/token`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_agent_uuid: + raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") + return await self._post( + f"/v2/gen-ai/auth/agents/{path_agent_uuid}/token", + body=await async_maybe_transform( + {"body_agent_uuid": body_agent_uuid}, token_create_params.TokenCreateParams + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=TokenCreateResponse, + ) + + +class TokenResourceWithRawResponse: + def __init__(self, token: TokenResource) -> None: + self._token = token + + self.create = to_raw_response_wrapper( + token.create, + ) + + +class AsyncTokenResourceWithRawResponse: + def __init__(self, token: AsyncTokenResource) -> None: + self._token = token + + self.create = async_to_raw_response_wrapper( + token.create, + ) + + +class TokenResourceWithStreamingResponse: + def __init__(self, token: TokenResource) -> None: + self._token = token + + self.create = to_streamed_response_wrapper( + token.create, + ) + + +class AsyncTokenResourceWithStreamingResponse: + def __init__(self, token: AsyncTokenResource) -> None: + self._token = token + + self.create = async_to_streamed_response_wrapper( + token.create, + ) diff --git a/src/gradientai/resources/auth/auth.py b/src/gradientai/resources/auth/auth.py new file mode 100644 index 00000000..7a502a2c --- /dev/null +++ b/src/gradientai/resources/auth/auth.py @@ -0,0 +1,102 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from .agents.agents import ( + AgentsResource, + AsyncAgentsResource, + AgentsResourceWithRawResponse, + AsyncAgentsResourceWithRawResponse, + AgentsResourceWithStreamingResponse, + AsyncAgentsResourceWithStreamingResponse, +) + +__all__ = ["AuthResource", "AsyncAuthResource"] + + +class AuthResource(SyncAPIResource): + @cached_property + def agents(self) -> AgentsResource: + return AgentsResource(self._client) + + @cached_property + def with_raw_response(self) -> AuthResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AuthResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AuthResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AuthResourceWithStreamingResponse(self) + + +class AsyncAuthResource(AsyncAPIResource): + @cached_property + def agents(self) -> AsyncAgentsResource: + return AsyncAgentsResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncAuthResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncAuthResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncAuthResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncAuthResourceWithStreamingResponse(self) + + +class AuthResourceWithRawResponse: + def __init__(self, auth: AuthResource) -> None: + self._auth = auth + + @cached_property + def agents(self) -> AgentsResourceWithRawResponse: + return AgentsResourceWithRawResponse(self._auth.agents) + + +class AsyncAuthResourceWithRawResponse: + def __init__(self, auth: AsyncAuthResource) -> None: + self._auth = auth + + @cached_property + def agents(self) -> AsyncAgentsResourceWithRawResponse: + return AsyncAgentsResourceWithRawResponse(self._auth.agents) + + +class AuthResourceWithStreamingResponse: + def __init__(self, auth: AuthResource) -> None: + self._auth = auth + + @cached_property + def agents(self) -> AgentsResourceWithStreamingResponse: + return AgentsResourceWithStreamingResponse(self._auth.agents) + + +class AsyncAuthResourceWithStreamingResponse: + def __init__(self, auth: AsyncAuthResource) -> None: + self._auth = auth + + @cached_property + def agents(self) -> AsyncAgentsResourceWithStreamingResponse: + return AsyncAgentsResourceWithStreamingResponse(self._auth.agents) diff --git a/src/gradientai/resources/knowledge_bases/data_sources.py b/src/gradientai/resources/knowledge_bases/data_sources.py index 21bde932..a1d2c575 100644 --- a/src/gradientai/resources/knowledge_bases/data_sources.py +++ b/src/gradientai/resources/knowledge_bases/data_sources.py @@ -21,6 +21,7 @@ ) from ...types.knowledge_bases.data_source_list_response import DataSourceListResponse from ...types.knowledge_bases.data_source_create_response import DataSourceCreateResponse +from ...types.knowledge_bases.data_source_delete_response import DataSourceDeleteResponse from ...types.knowledge_bases.api_spaces_data_source_param import APISpacesDataSourceParam from ...types.knowledge_bases.api_web_crawler_data_source_param import APIWebCrawlerDataSourceParam @@ -148,6 +149,45 @@ def list( cast_to=DataSourceListResponse, ) + def delete( + self, + data_source_uuid: str, + *, + knowledge_base_uuid: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> DataSourceDeleteResponse: + """ + To delete a data source from a knowledge base, send a DELETE request to + `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources/{data_source_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not knowledge_base_uuid: + raise ValueError( + f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" + ) + if not data_source_uuid: + raise ValueError(f"Expected a non-empty value for `data_source_uuid` but received {data_source_uuid!r}") + return self._delete( + f"/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources/{data_source_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=DataSourceDeleteResponse, + ) + class AsyncDataSourcesResource(AsyncAPIResource): @cached_property @@ -270,6 +310,45 @@ async def list( cast_to=DataSourceListResponse, ) + async def delete( + self, + data_source_uuid: str, + *, + knowledge_base_uuid: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> DataSourceDeleteResponse: + """ + To delete a data source from a knowledge base, send a DELETE request to + `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources/{data_source_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not knowledge_base_uuid: + raise ValueError( + f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" + ) + if not data_source_uuid: + raise ValueError(f"Expected a non-empty value for `data_source_uuid` but received {data_source_uuid!r}") + return await self._delete( + f"/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources/{data_source_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=DataSourceDeleteResponse, + ) + class DataSourcesResourceWithRawResponse: def __init__(self, data_sources: DataSourcesResource) -> None: @@ -281,6 +360,9 @@ def __init__(self, data_sources: DataSourcesResource) -> None: self.list = to_raw_response_wrapper( data_sources.list, ) + self.delete = to_raw_response_wrapper( + data_sources.delete, + ) class AsyncDataSourcesResourceWithRawResponse: @@ -293,6 +375,9 @@ def __init__(self, data_sources: AsyncDataSourcesResource) -> None: self.list = async_to_raw_response_wrapper( data_sources.list, ) + self.delete = async_to_raw_response_wrapper( + data_sources.delete, + ) class DataSourcesResourceWithStreamingResponse: @@ -305,6 +390,9 @@ def __init__(self, data_sources: DataSourcesResource) -> None: self.list = to_streamed_response_wrapper( data_sources.list, ) + self.delete = to_streamed_response_wrapper( + data_sources.delete, + ) class AsyncDataSourcesResourceWithStreamingResponse: @@ -317,3 +405,6 @@ def __init__(self, data_sources: AsyncDataSourcesResource) -> None: self.list = async_to_streamed_response_wrapper( data_sources.list, ) + self.delete = async_to_streamed_response_wrapper( + data_sources.delete, + ) diff --git a/src/gradientai/resources/knowledge_bases/knowledge_bases.py b/src/gradientai/resources/knowledge_bases/knowledge_bases.py index c49e23c4..7d4f38e3 100644 --- a/src/gradientai/resources/knowledge_bases/knowledge_bases.py +++ b/src/gradientai/resources/knowledge_bases/knowledge_bases.py @@ -6,7 +6,7 @@ import httpx -from ...types import knowledge_base_list_params, knowledge_base_create_params +from ...types import knowledge_base_list_params, knowledge_base_create_params, knowledge_base_update_params from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven from ..._utils import maybe_transform, async_maybe_transform from ..._compat import cached_property @@ -28,6 +28,9 @@ from ..._base_client import make_request_options from ...types.knowledge_base_list_response import KnowledgeBaseListResponse from ...types.knowledge_base_create_response import KnowledgeBaseCreateResponse +from ...types.knowledge_base_delete_response import KnowledgeBaseDeleteResponse +from ...types.knowledge_base_update_response import KnowledgeBaseUpdateResponse +from ...types.knowledge_base_retrieve_response import KnowledgeBaseRetrieveResponse __all__ = ["KnowledgeBasesResource", "AsyncKnowledgeBasesResource"] @@ -126,6 +129,97 @@ def create( cast_to=KnowledgeBaseCreateResponse, ) + def retrieve( + self, + uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KnowledgeBaseRetrieveResponse: + """ + To retrive information about an existing knowledge base, send a GET request to + `/v2/gen-ai/knowledge_bases/{uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return self._get( + f"/v2/gen-ai/knowledge_bases/{uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KnowledgeBaseRetrieveResponse, + ) + + def update( + self, + path_uuid: str, + *, + database_id: str | NotGiven = NOT_GIVEN, + embedding_model_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + tags: List[str] | NotGiven = NOT_GIVEN, + body_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KnowledgeBaseUpdateResponse: + """ + To update a knowledge base, send a PUT request to + `/v2/gen-ai/knowledge_bases/{uuid}`. + + Args: + database_id: the id of the DigitalOcean database this knowledge base will use, optiona. + + embedding_model_uuid: Identifier for the foundation model. + + tags: Tags to organize your knowledge base. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_uuid: + raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") + return self._put( + f"/v2/gen-ai/knowledge_bases/{path_uuid}", + body=maybe_transform( + { + "database_id": database_id, + "embedding_model_uuid": embedding_model_uuid, + "name": name, + "project_id": project_id, + "tags": tags, + "body_uuid": body_uuid, + }, + knowledge_base_update_params.KnowledgeBaseUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KnowledgeBaseUpdateResponse, + ) + def list( self, *, @@ -172,6 +266,40 @@ def list( cast_to=KnowledgeBaseListResponse, ) + def delete( + self, + uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KnowledgeBaseDeleteResponse: + """ + To delete a knowledge base, send a DELETE request to + `/v2/gen-ai/knowledge_bases/{uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return self._delete( + f"/v2/gen-ai/knowledge_bases/{uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KnowledgeBaseDeleteResponse, + ) + class AsyncKnowledgeBasesResource(AsyncAPIResource): @cached_property @@ -267,6 +395,97 @@ async def create( cast_to=KnowledgeBaseCreateResponse, ) + async def retrieve( + self, + uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KnowledgeBaseRetrieveResponse: + """ + To retrive information about an existing knowledge base, send a GET request to + `/v2/gen-ai/knowledge_bases/{uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return await self._get( + f"/v2/gen-ai/knowledge_bases/{uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KnowledgeBaseRetrieveResponse, + ) + + async def update( + self, + path_uuid: str, + *, + database_id: str | NotGiven = NOT_GIVEN, + embedding_model_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + tags: List[str] | NotGiven = NOT_GIVEN, + body_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KnowledgeBaseUpdateResponse: + """ + To update a knowledge base, send a PUT request to + `/v2/gen-ai/knowledge_bases/{uuid}`. + + Args: + database_id: the id of the DigitalOcean database this knowledge base will use, optiona. + + embedding_model_uuid: Identifier for the foundation model. + + tags: Tags to organize your knowledge base. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_uuid: + raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") + return await self._put( + f"/v2/gen-ai/knowledge_bases/{path_uuid}", + body=await async_maybe_transform( + { + "database_id": database_id, + "embedding_model_uuid": embedding_model_uuid, + "name": name, + "project_id": project_id, + "tags": tags, + "body_uuid": body_uuid, + }, + knowledge_base_update_params.KnowledgeBaseUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KnowledgeBaseUpdateResponse, + ) + async def list( self, *, @@ -313,6 +532,40 @@ async def list( cast_to=KnowledgeBaseListResponse, ) + async def delete( + self, + uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KnowledgeBaseDeleteResponse: + """ + To delete a knowledge base, send a DELETE request to + `/v2/gen-ai/knowledge_bases/{uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return await self._delete( + f"/v2/gen-ai/knowledge_bases/{uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KnowledgeBaseDeleteResponse, + ) + class KnowledgeBasesResourceWithRawResponse: def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None: @@ -321,9 +574,18 @@ def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None: self.create = to_raw_response_wrapper( knowledge_bases.create, ) + self.retrieve = to_raw_response_wrapper( + knowledge_bases.retrieve, + ) + self.update = to_raw_response_wrapper( + knowledge_bases.update, + ) self.list = to_raw_response_wrapper( knowledge_bases.list, ) + self.delete = to_raw_response_wrapper( + knowledge_bases.delete, + ) @cached_property def data_sources(self) -> DataSourcesResourceWithRawResponse: @@ -337,9 +599,18 @@ def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None: self.create = async_to_raw_response_wrapper( knowledge_bases.create, ) + self.retrieve = async_to_raw_response_wrapper( + knowledge_bases.retrieve, + ) + self.update = async_to_raw_response_wrapper( + knowledge_bases.update, + ) self.list = async_to_raw_response_wrapper( knowledge_bases.list, ) + self.delete = async_to_raw_response_wrapper( + knowledge_bases.delete, + ) @cached_property def data_sources(self) -> AsyncDataSourcesResourceWithRawResponse: @@ -353,9 +624,18 @@ def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None: self.create = to_streamed_response_wrapper( knowledge_bases.create, ) + self.retrieve = to_streamed_response_wrapper( + knowledge_bases.retrieve, + ) + self.update = to_streamed_response_wrapper( + knowledge_bases.update, + ) self.list = to_streamed_response_wrapper( knowledge_bases.list, ) + self.delete = to_streamed_response_wrapper( + knowledge_bases.delete, + ) @cached_property def data_sources(self) -> DataSourcesResourceWithStreamingResponse: @@ -369,9 +649,18 @@ def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None: self.create = async_to_streamed_response_wrapper( knowledge_bases.create, ) + self.retrieve = async_to_streamed_response_wrapper( + knowledge_bases.retrieve, + ) + self.update = async_to_streamed_response_wrapper( + knowledge_bases.update, + ) self.list = async_to_streamed_response_wrapper( knowledge_bases.list, ) + self.delete = async_to_streamed_response_wrapper( + knowledge_bases.delete, + ) @cached_property def data_sources(self) -> AsyncDataSourcesResourceWithStreamingResponse: diff --git a/src/gradientai/resources/providers/__init__.py b/src/gradientai/resources/providers/__init__.py new file mode 100644 index 00000000..1731e057 --- /dev/null +++ b/src/gradientai/resources/providers/__init__.py @@ -0,0 +1,47 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .openai import ( + OpenAIResource, + AsyncOpenAIResource, + OpenAIResourceWithRawResponse, + AsyncOpenAIResourceWithRawResponse, + OpenAIResourceWithStreamingResponse, + AsyncOpenAIResourceWithStreamingResponse, +) +from .anthropic import ( + AnthropicResource, + AsyncAnthropicResource, + AnthropicResourceWithRawResponse, + AsyncAnthropicResourceWithRawResponse, + AnthropicResourceWithStreamingResponse, + AsyncAnthropicResourceWithStreamingResponse, +) +from .providers import ( + ProvidersResource, + AsyncProvidersResource, + ProvidersResourceWithRawResponse, + AsyncProvidersResourceWithRawResponse, + ProvidersResourceWithStreamingResponse, + AsyncProvidersResourceWithStreamingResponse, +) + +__all__ = [ + "AnthropicResource", + "AsyncAnthropicResource", + "AnthropicResourceWithRawResponse", + "AsyncAnthropicResourceWithRawResponse", + "AnthropicResourceWithStreamingResponse", + "AsyncAnthropicResourceWithStreamingResponse", + "OpenAIResource", + "AsyncOpenAIResource", + "OpenAIResourceWithRawResponse", + "AsyncOpenAIResourceWithRawResponse", + "OpenAIResourceWithStreamingResponse", + "AsyncOpenAIResourceWithStreamingResponse", + "ProvidersResource", + "AsyncProvidersResource", + "ProvidersResourceWithRawResponse", + "AsyncProvidersResourceWithRawResponse", + "ProvidersResourceWithStreamingResponse", + "AsyncProvidersResourceWithStreamingResponse", +] diff --git a/src/gradientai/resources/providers/anthropic/__init__.py b/src/gradientai/resources/providers/anthropic/__init__.py new file mode 100644 index 00000000..057a3a2f --- /dev/null +++ b/src/gradientai/resources/providers/anthropic/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .keys import ( + KeysResource, + AsyncKeysResource, + KeysResourceWithRawResponse, + AsyncKeysResourceWithRawResponse, + KeysResourceWithStreamingResponse, + AsyncKeysResourceWithStreamingResponse, +) +from .anthropic import ( + AnthropicResource, + AsyncAnthropicResource, + AnthropicResourceWithRawResponse, + AsyncAnthropicResourceWithRawResponse, + AnthropicResourceWithStreamingResponse, + AsyncAnthropicResourceWithStreamingResponse, +) + +__all__ = [ + "KeysResource", + "AsyncKeysResource", + "KeysResourceWithRawResponse", + "AsyncKeysResourceWithRawResponse", + "KeysResourceWithStreamingResponse", + "AsyncKeysResourceWithStreamingResponse", + "AnthropicResource", + "AsyncAnthropicResource", + "AnthropicResourceWithRawResponse", + "AsyncAnthropicResourceWithRawResponse", + "AnthropicResourceWithStreamingResponse", + "AsyncAnthropicResourceWithStreamingResponse", +] diff --git a/src/gradientai/resources/providers/anthropic/anthropic.py b/src/gradientai/resources/providers/anthropic/anthropic.py new file mode 100644 index 00000000..23a914e9 --- /dev/null +++ b/src/gradientai/resources/providers/anthropic/anthropic.py @@ -0,0 +1,102 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .keys import ( + KeysResource, + AsyncKeysResource, + KeysResourceWithRawResponse, + AsyncKeysResourceWithRawResponse, + KeysResourceWithStreamingResponse, + AsyncKeysResourceWithStreamingResponse, +) +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource + +__all__ = ["AnthropicResource", "AsyncAnthropicResource"] + + +class AnthropicResource(SyncAPIResource): + @cached_property + def keys(self) -> KeysResource: + return KeysResource(self._client) + + @cached_property + def with_raw_response(self) -> AnthropicResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AnthropicResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AnthropicResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AnthropicResourceWithStreamingResponse(self) + + +class AsyncAnthropicResource(AsyncAPIResource): + @cached_property + def keys(self) -> AsyncKeysResource: + return AsyncKeysResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncAnthropicResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncAnthropicResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncAnthropicResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncAnthropicResourceWithStreamingResponse(self) + + +class AnthropicResourceWithRawResponse: + def __init__(self, anthropic: AnthropicResource) -> None: + self._anthropic = anthropic + + @cached_property + def keys(self) -> KeysResourceWithRawResponse: + return KeysResourceWithRawResponse(self._anthropic.keys) + + +class AsyncAnthropicResourceWithRawResponse: + def __init__(self, anthropic: AsyncAnthropicResource) -> None: + self._anthropic = anthropic + + @cached_property + def keys(self) -> AsyncKeysResourceWithRawResponse: + return AsyncKeysResourceWithRawResponse(self._anthropic.keys) + + +class AnthropicResourceWithStreamingResponse: + def __init__(self, anthropic: AnthropicResource) -> None: + self._anthropic = anthropic + + @cached_property + def keys(self) -> KeysResourceWithStreamingResponse: + return KeysResourceWithStreamingResponse(self._anthropic.keys) + + +class AsyncAnthropicResourceWithStreamingResponse: + def __init__(self, anthropic: AsyncAnthropicResource) -> None: + self._anthropic = anthropic + + @cached_property + def keys(self) -> AsyncKeysResourceWithStreamingResponse: + return AsyncKeysResourceWithStreamingResponse(self._anthropic.keys) diff --git a/src/gradientai/resources/providers/anthropic/keys.py b/src/gradientai/resources/providers/anthropic/keys.py new file mode 100644 index 00000000..8fbb64db --- /dev/null +++ b/src/gradientai/resources/providers/anthropic/keys.py @@ -0,0 +1,662 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.providers.anthropic import key_list_params, key_create_params, key_update_params, key_list_agents_params +from ....types.providers.anthropic.key_list_response import KeyListResponse +from ....types.providers.anthropic.key_create_response import KeyCreateResponse +from ....types.providers.anthropic.key_delete_response import KeyDeleteResponse +from ....types.providers.anthropic.key_update_response import KeyUpdateResponse +from ....types.providers.anthropic.key_retrieve_response import KeyRetrieveResponse +from ....types.providers.anthropic.key_list_agents_response import KeyListAgentsResponse + +__all__ = ["KeysResource", "AsyncKeysResource"] + + +class KeysResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> KeysResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return KeysResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> KeysResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return KeysResourceWithStreamingResponse(self) + + def create( + self, + *, + api_key: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyCreateResponse: + """ + To create an Anthropic API key, send a POST request to + `/v2/gen-ai/anthropic/keys`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v2/gen-ai/anthropic/keys", + body=maybe_transform( + { + "api_key": api_key, + "name": name, + }, + key_create_params.KeyCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyCreateResponse, + ) + + def retrieve( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyRetrieveResponse: + """ + To retrieve details of an Anthropic API key, send a GET request to + `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return self._get( + f"/v2/gen-ai/anthropic/keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyRetrieveResponse, + ) + + def update( + self, + path_api_key_uuid: str, + *, + api_key: str | NotGiven = NOT_GIVEN, + body_api_key_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyUpdateResponse: + """ + To update an Anthropic API key, send a PUT request to + `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_api_key_uuid: + raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") + return self._put( + f"/v2/gen-ai/anthropic/keys/{path_api_key_uuid}", + body=maybe_transform( + { + "api_key": api_key, + "body_api_key_uuid": body_api_key_uuid, + "name": name, + }, + key_update_params.KeyUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyUpdateResponse, + ) + + def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyListResponse: + """ + To list all Anthropic API keys, send a GET request to + `/v2/gen-ai/anthropic/keys`. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/gen-ai/anthropic/keys", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + key_list_params.KeyListParams, + ), + ), + cast_to=KeyListResponse, + ) + + def delete( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyDeleteResponse: + """ + To delete an Anthropic API key, send a DELETE request to + `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return self._delete( + f"/v2/gen-ai/anthropic/keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyDeleteResponse, + ) + + def list_agents( + self, + uuid: str, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyListAgentsResponse: + """ + List Agents by Anthropic Key. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return self._get( + f"/v2/gen-ai/anthropic/keys/{uuid}/agents", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + key_list_agents_params.KeyListAgentsParams, + ), + ), + cast_to=KeyListAgentsResponse, + ) + + +class AsyncKeysResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncKeysResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncKeysResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncKeysResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncKeysResourceWithStreamingResponse(self) + + async def create( + self, + *, + api_key: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyCreateResponse: + """ + To create an Anthropic API key, send a POST request to + `/v2/gen-ai/anthropic/keys`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v2/gen-ai/anthropic/keys", + body=await async_maybe_transform( + { + "api_key": api_key, + "name": name, + }, + key_create_params.KeyCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyCreateResponse, + ) + + async def retrieve( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyRetrieveResponse: + """ + To retrieve details of an Anthropic API key, send a GET request to + `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return await self._get( + f"/v2/gen-ai/anthropic/keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyRetrieveResponse, + ) + + async def update( + self, + path_api_key_uuid: str, + *, + api_key: str | NotGiven = NOT_GIVEN, + body_api_key_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyUpdateResponse: + """ + To update an Anthropic API key, send a PUT request to + `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_api_key_uuid: + raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") + return await self._put( + f"/v2/gen-ai/anthropic/keys/{path_api_key_uuid}", + body=await async_maybe_transform( + { + "api_key": api_key, + "body_api_key_uuid": body_api_key_uuid, + "name": name, + }, + key_update_params.KeyUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyUpdateResponse, + ) + + async def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyListResponse: + """ + To list all Anthropic API keys, send a GET request to + `/v2/gen-ai/anthropic/keys`. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/gen-ai/anthropic/keys", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + key_list_params.KeyListParams, + ), + ), + cast_to=KeyListResponse, + ) + + async def delete( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyDeleteResponse: + """ + To delete an Anthropic API key, send a DELETE request to + `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return await self._delete( + f"/v2/gen-ai/anthropic/keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyDeleteResponse, + ) + + async def list_agents( + self, + uuid: str, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyListAgentsResponse: + """ + List Agents by Anthropic Key. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return await self._get( + f"/v2/gen-ai/anthropic/keys/{uuid}/agents", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + key_list_agents_params.KeyListAgentsParams, + ), + ), + cast_to=KeyListAgentsResponse, + ) + + +class KeysResourceWithRawResponse: + def __init__(self, keys: KeysResource) -> None: + self._keys = keys + + self.create = to_raw_response_wrapper( + keys.create, + ) + self.retrieve = to_raw_response_wrapper( + keys.retrieve, + ) + self.update = to_raw_response_wrapper( + keys.update, + ) + self.list = to_raw_response_wrapper( + keys.list, + ) + self.delete = to_raw_response_wrapper( + keys.delete, + ) + self.list_agents = to_raw_response_wrapper( + keys.list_agents, + ) + + +class AsyncKeysResourceWithRawResponse: + def __init__(self, keys: AsyncKeysResource) -> None: + self._keys = keys + + self.create = async_to_raw_response_wrapper( + keys.create, + ) + self.retrieve = async_to_raw_response_wrapper( + keys.retrieve, + ) + self.update = async_to_raw_response_wrapper( + keys.update, + ) + self.list = async_to_raw_response_wrapper( + keys.list, + ) + self.delete = async_to_raw_response_wrapper( + keys.delete, + ) + self.list_agents = async_to_raw_response_wrapper( + keys.list_agents, + ) + + +class KeysResourceWithStreamingResponse: + def __init__(self, keys: KeysResource) -> None: + self._keys = keys + + self.create = to_streamed_response_wrapper( + keys.create, + ) + self.retrieve = to_streamed_response_wrapper( + keys.retrieve, + ) + self.update = to_streamed_response_wrapper( + keys.update, + ) + self.list = to_streamed_response_wrapper( + keys.list, + ) + self.delete = to_streamed_response_wrapper( + keys.delete, + ) + self.list_agents = to_streamed_response_wrapper( + keys.list_agents, + ) + + +class AsyncKeysResourceWithStreamingResponse: + def __init__(self, keys: AsyncKeysResource) -> None: + self._keys = keys + + self.create = async_to_streamed_response_wrapper( + keys.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + keys.retrieve, + ) + self.update = async_to_streamed_response_wrapper( + keys.update, + ) + self.list = async_to_streamed_response_wrapper( + keys.list, + ) + self.delete = async_to_streamed_response_wrapper( + keys.delete, + ) + self.list_agents = async_to_streamed_response_wrapper( + keys.list_agents, + ) diff --git a/src/gradientai/resources/providers/openai/__init__.py b/src/gradientai/resources/providers/openai/__init__.py new file mode 100644 index 00000000..66d8ca7a --- /dev/null +++ b/src/gradientai/resources/providers/openai/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .keys import ( + KeysResource, + AsyncKeysResource, + KeysResourceWithRawResponse, + AsyncKeysResourceWithRawResponse, + KeysResourceWithStreamingResponse, + AsyncKeysResourceWithStreamingResponse, +) +from .openai import ( + OpenAIResource, + AsyncOpenAIResource, + OpenAIResourceWithRawResponse, + AsyncOpenAIResourceWithRawResponse, + OpenAIResourceWithStreamingResponse, + AsyncOpenAIResourceWithStreamingResponse, +) + +__all__ = [ + "KeysResource", + "AsyncKeysResource", + "KeysResourceWithRawResponse", + "AsyncKeysResourceWithRawResponse", + "KeysResourceWithStreamingResponse", + "AsyncKeysResourceWithStreamingResponse", + "OpenAIResource", + "AsyncOpenAIResource", + "OpenAIResourceWithRawResponse", + "AsyncOpenAIResourceWithRawResponse", + "OpenAIResourceWithStreamingResponse", + "AsyncOpenAIResourceWithStreamingResponse", +] diff --git a/src/gradientai/resources/providers/openai/keys.py b/src/gradientai/resources/providers/openai/keys.py new file mode 100644 index 00000000..44ac8508 --- /dev/null +++ b/src/gradientai/resources/providers/openai/keys.py @@ -0,0 +1,658 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.providers.openai import key_list_params, key_create_params, key_update_params, key_retrieve_agents_params +from ....types.providers.openai.key_list_response import KeyListResponse +from ....types.providers.openai.key_create_response import KeyCreateResponse +from ....types.providers.openai.key_delete_response import KeyDeleteResponse +from ....types.providers.openai.key_update_response import KeyUpdateResponse +from ....types.providers.openai.key_retrieve_response import KeyRetrieveResponse +from ....types.providers.openai.key_retrieve_agents_response import KeyRetrieveAgentsResponse + +__all__ = ["KeysResource", "AsyncKeysResource"] + + +class KeysResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> KeysResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return KeysResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> KeysResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return KeysResourceWithStreamingResponse(self) + + def create( + self, + *, + api_key: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyCreateResponse: + """ + To create an OpenAI API key, send a POST request to `/v2/gen-ai/openai/keys`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v2/gen-ai/openai/keys", + body=maybe_transform( + { + "api_key": api_key, + "name": name, + }, + key_create_params.KeyCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyCreateResponse, + ) + + def retrieve( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyRetrieveResponse: + """ + To retrieve details of an OpenAI API key, send a GET request to + `/v2/gen-ai/openai/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return self._get( + f"/v2/gen-ai/openai/keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyRetrieveResponse, + ) + + def update( + self, + path_api_key_uuid: str, + *, + api_key: str | NotGiven = NOT_GIVEN, + body_api_key_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyUpdateResponse: + """ + To update an OpenAI API key, send a PUT request to + `/v2/gen-ai/openai/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_api_key_uuid: + raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") + return self._put( + f"/v2/gen-ai/openai/keys/{path_api_key_uuid}", + body=maybe_transform( + { + "api_key": api_key, + "body_api_key_uuid": body_api_key_uuid, + "name": name, + }, + key_update_params.KeyUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyUpdateResponse, + ) + + def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyListResponse: + """ + To list all OpenAI API keys, send a GET request to `/v2/gen-ai/openai/keys`. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/gen-ai/openai/keys", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + key_list_params.KeyListParams, + ), + ), + cast_to=KeyListResponse, + ) + + def delete( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyDeleteResponse: + """ + To delete an OpenAI API key, send a DELETE request to + `/v2/gen-ai/openai/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return self._delete( + f"/v2/gen-ai/openai/keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyDeleteResponse, + ) + + def retrieve_agents( + self, + uuid: str, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyRetrieveAgentsResponse: + """ + List Agents by OpenAI Key. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return self._get( + f"/v2/gen-ai/openai/keys/{uuid}/agents", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + key_retrieve_agents_params.KeyRetrieveAgentsParams, + ), + ), + cast_to=KeyRetrieveAgentsResponse, + ) + + +class AsyncKeysResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncKeysResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncKeysResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncKeysResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncKeysResourceWithStreamingResponse(self) + + async def create( + self, + *, + api_key: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyCreateResponse: + """ + To create an OpenAI API key, send a POST request to `/v2/gen-ai/openai/keys`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v2/gen-ai/openai/keys", + body=await async_maybe_transform( + { + "api_key": api_key, + "name": name, + }, + key_create_params.KeyCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyCreateResponse, + ) + + async def retrieve( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyRetrieveResponse: + """ + To retrieve details of an OpenAI API key, send a GET request to + `/v2/gen-ai/openai/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return await self._get( + f"/v2/gen-ai/openai/keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyRetrieveResponse, + ) + + async def update( + self, + path_api_key_uuid: str, + *, + api_key: str | NotGiven = NOT_GIVEN, + body_api_key_uuid: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyUpdateResponse: + """ + To update an OpenAI API key, send a PUT request to + `/v2/gen-ai/openai/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_api_key_uuid: + raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") + return await self._put( + f"/v2/gen-ai/openai/keys/{path_api_key_uuid}", + body=await async_maybe_transform( + { + "api_key": api_key, + "body_api_key_uuid": body_api_key_uuid, + "name": name, + }, + key_update_params.KeyUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyUpdateResponse, + ) + + async def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyListResponse: + """ + To list all OpenAI API keys, send a GET request to `/v2/gen-ai/openai/keys`. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/gen-ai/openai/keys", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + key_list_params.KeyListParams, + ), + ), + cast_to=KeyListResponse, + ) + + async def delete( + self, + api_key_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyDeleteResponse: + """ + To delete an OpenAI API key, send a DELETE request to + `/v2/gen-ai/openai/keys/{api_key_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not api_key_uuid: + raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") + return await self._delete( + f"/v2/gen-ai/openai/keys/{api_key_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyDeleteResponse, + ) + + async def retrieve_agents( + self, + uuid: str, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyRetrieveAgentsResponse: + """ + List Agents by OpenAI Key. + + Args: + page: page number. + + per_page: items per page. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not uuid: + raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") + return await self._get( + f"/v2/gen-ai/openai/keys/{uuid}/agents", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + key_retrieve_agents_params.KeyRetrieveAgentsParams, + ), + ), + cast_to=KeyRetrieveAgentsResponse, + ) + + +class KeysResourceWithRawResponse: + def __init__(self, keys: KeysResource) -> None: + self._keys = keys + + self.create = to_raw_response_wrapper( + keys.create, + ) + self.retrieve = to_raw_response_wrapper( + keys.retrieve, + ) + self.update = to_raw_response_wrapper( + keys.update, + ) + self.list = to_raw_response_wrapper( + keys.list, + ) + self.delete = to_raw_response_wrapper( + keys.delete, + ) + self.retrieve_agents = to_raw_response_wrapper( + keys.retrieve_agents, + ) + + +class AsyncKeysResourceWithRawResponse: + def __init__(self, keys: AsyncKeysResource) -> None: + self._keys = keys + + self.create = async_to_raw_response_wrapper( + keys.create, + ) + self.retrieve = async_to_raw_response_wrapper( + keys.retrieve, + ) + self.update = async_to_raw_response_wrapper( + keys.update, + ) + self.list = async_to_raw_response_wrapper( + keys.list, + ) + self.delete = async_to_raw_response_wrapper( + keys.delete, + ) + self.retrieve_agents = async_to_raw_response_wrapper( + keys.retrieve_agents, + ) + + +class KeysResourceWithStreamingResponse: + def __init__(self, keys: KeysResource) -> None: + self._keys = keys + + self.create = to_streamed_response_wrapper( + keys.create, + ) + self.retrieve = to_streamed_response_wrapper( + keys.retrieve, + ) + self.update = to_streamed_response_wrapper( + keys.update, + ) + self.list = to_streamed_response_wrapper( + keys.list, + ) + self.delete = to_streamed_response_wrapper( + keys.delete, + ) + self.retrieve_agents = to_streamed_response_wrapper( + keys.retrieve_agents, + ) + + +class AsyncKeysResourceWithStreamingResponse: + def __init__(self, keys: AsyncKeysResource) -> None: + self._keys = keys + + self.create = async_to_streamed_response_wrapper( + keys.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + keys.retrieve, + ) + self.update = async_to_streamed_response_wrapper( + keys.update, + ) + self.list = async_to_streamed_response_wrapper( + keys.list, + ) + self.delete = async_to_streamed_response_wrapper( + keys.delete, + ) + self.retrieve_agents = async_to_streamed_response_wrapper( + keys.retrieve_agents, + ) diff --git a/src/gradientai/resources/providers/openai/openai.py b/src/gradientai/resources/providers/openai/openai.py new file mode 100644 index 00000000..b02dc2e1 --- /dev/null +++ b/src/gradientai/resources/providers/openai/openai.py @@ -0,0 +1,102 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .keys import ( + KeysResource, + AsyncKeysResource, + KeysResourceWithRawResponse, + AsyncKeysResourceWithRawResponse, + KeysResourceWithStreamingResponse, + AsyncKeysResourceWithStreamingResponse, +) +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource + +__all__ = ["OpenAIResource", "AsyncOpenAIResource"] + + +class OpenAIResource(SyncAPIResource): + @cached_property + def keys(self) -> KeysResource: + return KeysResource(self._client) + + @cached_property + def with_raw_response(self) -> OpenAIResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return OpenAIResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> OpenAIResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return OpenAIResourceWithStreamingResponse(self) + + +class AsyncOpenAIResource(AsyncAPIResource): + @cached_property + def keys(self) -> AsyncKeysResource: + return AsyncKeysResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncOpenAIResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncOpenAIResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncOpenAIResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncOpenAIResourceWithStreamingResponse(self) + + +class OpenAIResourceWithRawResponse: + def __init__(self, openai: OpenAIResource) -> None: + self._openai = openai + + @cached_property + def keys(self) -> KeysResourceWithRawResponse: + return KeysResourceWithRawResponse(self._openai.keys) + + +class AsyncOpenAIResourceWithRawResponse: + def __init__(self, openai: AsyncOpenAIResource) -> None: + self._openai = openai + + @cached_property + def keys(self) -> AsyncKeysResourceWithRawResponse: + return AsyncKeysResourceWithRawResponse(self._openai.keys) + + +class OpenAIResourceWithStreamingResponse: + def __init__(self, openai: OpenAIResource) -> None: + self._openai = openai + + @cached_property + def keys(self) -> KeysResourceWithStreamingResponse: + return KeysResourceWithStreamingResponse(self._openai.keys) + + +class AsyncOpenAIResourceWithStreamingResponse: + def __init__(self, openai: AsyncOpenAIResource) -> None: + self._openai = openai + + @cached_property + def keys(self) -> AsyncKeysResourceWithStreamingResponse: + return AsyncKeysResourceWithStreamingResponse(self._openai.keys) diff --git a/src/gradientai/resources/providers/providers.py b/src/gradientai/resources/providers/providers.py new file mode 100644 index 00000000..ef942f73 --- /dev/null +++ b/src/gradientai/resources/providers/providers.py @@ -0,0 +1,134 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from .openai.openai import ( + OpenAIResource, + AsyncOpenAIResource, + OpenAIResourceWithRawResponse, + AsyncOpenAIResourceWithRawResponse, + OpenAIResourceWithStreamingResponse, + AsyncOpenAIResourceWithStreamingResponse, +) +from .anthropic.anthropic import ( + AnthropicResource, + AsyncAnthropicResource, + AnthropicResourceWithRawResponse, + AsyncAnthropicResourceWithRawResponse, + AnthropicResourceWithStreamingResponse, + AsyncAnthropicResourceWithStreamingResponse, +) + +__all__ = ["ProvidersResource", "AsyncProvidersResource"] + + +class ProvidersResource(SyncAPIResource): + @cached_property + def anthropic(self) -> AnthropicResource: + return AnthropicResource(self._client) + + @cached_property + def openai(self) -> OpenAIResource: + return OpenAIResource(self._client) + + @cached_property + def with_raw_response(self) -> ProvidersResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return ProvidersResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ProvidersResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return ProvidersResourceWithStreamingResponse(self) + + +class AsyncProvidersResource(AsyncAPIResource): + @cached_property + def anthropic(self) -> AsyncAnthropicResource: + return AsyncAnthropicResource(self._client) + + @cached_property + def openai(self) -> AsyncOpenAIResource: + return AsyncOpenAIResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncProvidersResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncProvidersResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncProvidersResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncProvidersResourceWithStreamingResponse(self) + + +class ProvidersResourceWithRawResponse: + def __init__(self, providers: ProvidersResource) -> None: + self._providers = providers + + @cached_property + def anthropic(self) -> AnthropicResourceWithRawResponse: + return AnthropicResourceWithRawResponse(self._providers.anthropic) + + @cached_property + def openai(self) -> OpenAIResourceWithRawResponse: + return OpenAIResourceWithRawResponse(self._providers.openai) + + +class AsyncProvidersResourceWithRawResponse: + def __init__(self, providers: AsyncProvidersResource) -> None: + self._providers = providers + + @cached_property + def anthropic(self) -> AsyncAnthropicResourceWithRawResponse: + return AsyncAnthropicResourceWithRawResponse(self._providers.anthropic) + + @cached_property + def openai(self) -> AsyncOpenAIResourceWithRawResponse: + return AsyncOpenAIResourceWithRawResponse(self._providers.openai) + + +class ProvidersResourceWithStreamingResponse: + def __init__(self, providers: ProvidersResource) -> None: + self._providers = providers + + @cached_property + def anthropic(self) -> AnthropicResourceWithStreamingResponse: + return AnthropicResourceWithStreamingResponse(self._providers.anthropic) + + @cached_property + def openai(self) -> OpenAIResourceWithStreamingResponse: + return OpenAIResourceWithStreamingResponse(self._providers.openai) + + +class AsyncProvidersResourceWithStreamingResponse: + def __init__(self, providers: AsyncProvidersResource) -> None: + self._providers = providers + + @cached_property + def anthropic(self) -> AsyncAnthropicResourceWithStreamingResponse: + return AsyncAnthropicResourceWithStreamingResponse(self._providers.anthropic) + + @cached_property + def openai(self) -> AsyncOpenAIResourceWithStreamingResponse: + return AsyncOpenAIResourceWithStreamingResponse(self._providers.openai) diff --git a/src/gradientai/resources/regions.py b/src/gradientai/resources/regions.py new file mode 100644 index 00000000..ad760c24 --- /dev/null +++ b/src/gradientai/resources/regions.py @@ -0,0 +1,191 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ..types import region_list_params +from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from .._utils import maybe_transform, async_maybe_transform +from .._compat import cached_property +from .._resource import SyncAPIResource, AsyncAPIResource +from .._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from .._base_client import make_request_options +from ..types.region_list_response import RegionListResponse + +__all__ = ["RegionsResource", "AsyncRegionsResource"] + + +class RegionsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> RegionsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return RegionsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> RegionsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return RegionsResourceWithStreamingResponse(self) + + def list( + self, + *, + serves_batch: bool | NotGiven = NOT_GIVEN, + serves_inference: bool | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RegionListResponse: + """ + To list all datacenter regions, send a GET request to `/v2/gen-ai/regions`. + + Args: + serves_batch: include datacenters that are capable of running batch jobs. + + serves_inference: include datacenters that serve inference. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/gen-ai/regions", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "serves_batch": serves_batch, + "serves_inference": serves_inference, + }, + region_list_params.RegionListParams, + ), + ), + cast_to=RegionListResponse, + ) + + +class AsyncRegionsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncRegionsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncRegionsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncRegionsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncRegionsResourceWithStreamingResponse(self) + + async def list( + self, + *, + serves_batch: bool | NotGiven = NOT_GIVEN, + serves_inference: bool | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RegionListResponse: + """ + To list all datacenter regions, send a GET request to `/v2/gen-ai/regions`. + + Args: + serves_batch: include datacenters that are capable of running batch jobs. + + serves_inference: include datacenters that serve inference. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/gen-ai/regions", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "serves_batch": serves_batch, + "serves_inference": serves_inference, + }, + region_list_params.RegionListParams, + ), + ), + cast_to=RegionListResponse, + ) + + +class RegionsResourceWithRawResponse: + def __init__(self, regions: RegionsResource) -> None: + self._regions = regions + + self.list = to_raw_response_wrapper( + regions.list, + ) + + +class AsyncRegionsResourceWithRawResponse: + def __init__(self, regions: AsyncRegionsResource) -> None: + self._regions = regions + + self.list = async_to_raw_response_wrapper( + regions.list, + ) + + +class RegionsResourceWithStreamingResponse: + def __init__(self, regions: RegionsResource) -> None: + self._regions = regions + + self.list = to_streamed_response_wrapper( + regions.list, + ) + + +class AsyncRegionsResourceWithStreamingResponse: + def __init__(self, regions: AsyncRegionsResource) -> None: + self._regions = regions + + self.list = async_to_streamed_response_wrapper( + regions.list, + ) diff --git a/src/gradientai/types/__init__.py b/src/gradientai/types/__init__.py index cb52748c..ee516f83 100644 --- a/src/gradientai/types/__init__.py +++ b/src/gradientai/types/__init__.py @@ -10,29 +10,43 @@ from .agent_list_params import AgentListParams as AgentListParams from .api_model_version import APIModelVersion as APIModelVersion from .api_knowledge_base import APIKnowledgeBase as APIKnowledgeBase +from .region_list_params import RegionListParams as RegionListParams from .agent_create_params import AgentCreateParams as AgentCreateParams from .agent_list_response import AgentListResponse as AgentListResponse +from .agent_update_params import AgentUpdateParams as AgentUpdateParams +from .api_key_list_params import APIKeyListParams as APIKeyListParams from .model_list_response import ModelListResponse as ModelListResponse from .api_retrieval_method import APIRetrievalMethod as APIRetrievalMethod +from .region_list_response import RegionListResponse as RegionListResponse from .agent_create_response import AgentCreateResponse as AgentCreateResponse +from .agent_delete_response import AgentDeleteResponse as AgentDeleteResponse +from .agent_update_response import AgentUpdateResponse as AgentUpdateResponse +from .api_key_list_response import APIKeyListResponse as APIKeyListResponse from .api_agent_api_key_info import APIAgentAPIKeyInfo as APIAgentAPIKeyInfo +from .agent_retrieve_response import AgentRetrieveResponse as AgentRetrieveResponse from .api_openai_api_key_info import APIOpenAIAPIKeyInfo as APIOpenAIAPIKeyInfo from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams from .indexing_job_list_params import IndexingJobListParams as IndexingJobListParams from .api_deployment_visibility import APIDeploymentVisibility as APIDeploymentVisibility from .embedding_create_response import EmbeddingCreateResponse as EmbeddingCreateResponse +from .agent_update_status_params import AgentUpdateStatusParams as AgentUpdateStatusParams from .api_anthropic_api_key_info import APIAnthropicAPIKeyInfo as APIAnthropicAPIKeyInfo from .indexing_job_create_params import IndexingJobCreateParams as IndexingJobCreateParams from .indexing_job_list_response import IndexingJobListResponse as IndexingJobListResponse from .knowledge_base_list_params import KnowledgeBaseListParams as KnowledgeBaseListParams +from .agent_update_status_response import AgentUpdateStatusResponse as AgentUpdateStatusResponse from .indexing_job_create_response import IndexingJobCreateResponse as IndexingJobCreateResponse from .knowledge_base_create_params import KnowledgeBaseCreateParams as KnowledgeBaseCreateParams from .knowledge_base_list_response import KnowledgeBaseListResponse as KnowledgeBaseListResponse +from .knowledge_base_update_params import KnowledgeBaseUpdateParams as KnowledgeBaseUpdateParams from .chat_completion_token_logprob import ChatCompletionTokenLogprob as ChatCompletionTokenLogprob from .chat_create_completion_params import ChatCreateCompletionParams as ChatCreateCompletionParams from .indexing_job_retrieve_response import IndexingJobRetrieveResponse as IndexingJobRetrieveResponse from .knowledge_base_create_response import KnowledgeBaseCreateResponse as KnowledgeBaseCreateResponse +from .knowledge_base_delete_response import KnowledgeBaseDeleteResponse as KnowledgeBaseDeleteResponse +from .knowledge_base_update_response import KnowledgeBaseUpdateResponse as KnowledgeBaseUpdateResponse from .chat_create_completion_response import ChatCreateCompletionResponse as ChatCreateCompletionResponse +from .knowledge_base_retrieve_response import KnowledgeBaseRetrieveResponse as KnowledgeBaseRetrieveResponse from .indexing_job_update_cancel_params import IndexingJobUpdateCancelParams as IndexingJobUpdateCancelParams from .indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse as IndexingJobUpdateCancelResponse from .indexing_job_retrieve_data_sources_response import ( diff --git a/src/gradientai/types/agent_delete_response.py b/src/gradientai/types/agent_delete_response.py new file mode 100644 index 00000000..eb1d440d --- /dev/null +++ b/src/gradientai/types/agent_delete_response.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional + +from .._models import BaseModel + +__all__ = ["AgentDeleteResponse"] + + +class AgentDeleteResponse(BaseModel): + agent: Optional["APIAgent"] = None + + +from .api_agent import APIAgent diff --git a/src/gradientai/types/agent_retrieve_response.py b/src/gradientai/types/agent_retrieve_response.py new file mode 100644 index 00000000..2eed88af --- /dev/null +++ b/src/gradientai/types/agent_retrieve_response.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional + +from .._models import BaseModel + +__all__ = ["AgentRetrieveResponse"] + + +class AgentRetrieveResponse(BaseModel): + agent: Optional["APIAgent"] = None + + +from .api_agent import APIAgent diff --git a/src/gradientai/types/agent_update_params.py b/src/gradientai/types/agent_update_params.py new file mode 100644 index 00000000..85f9a9c2 --- /dev/null +++ b/src/gradientai/types/agent_update_params.py @@ -0,0 +1,65 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Annotated, TypedDict + +from .._utils import PropertyInfo +from .api_retrieval_method import APIRetrievalMethod + +__all__ = ["AgentUpdateParams"] + + +class AgentUpdateParams(TypedDict, total=False): + anthropic_key_uuid: str + + description: str + + instruction: str + """Agent instruction. + + Instructions help your agent to perform its job effectively. See + [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) + for best practices. + """ + + k: int + + max_tokens: int + """ + Specifies the maximum number of tokens the model can process in a single input + or output, set as a number between 1 and 512. This determines the length of each + response. + """ + + model_uuid: str + """Identifier for the foundation model.""" + + name: str + + openai_key_uuid: Annotated[str, PropertyInfo(alias="open_ai_key_uuid")] + + project_id: str + + provide_citations: bool + + retrieval_method: APIRetrievalMethod + + tags: List[str] + + temperature: float + """Controls the model’s creativity, specified as a number between 0 and 1. + + Lower values produce more predictable and conservative responses, while higher + values encourage creativity and variation. + """ + + top_p: float + """ + Defines the cumulative probability threshold for word selection, specified as a + number between 0 and 1. Higher values allow for more diverse outputs, while + lower values ensure focused and coherent responses. + """ + + body_uuid: Annotated[str, PropertyInfo(alias="uuid")] diff --git a/src/gradientai/types/agent_update_response.py b/src/gradientai/types/agent_update_response.py new file mode 100644 index 00000000..2948aa1c --- /dev/null +++ b/src/gradientai/types/agent_update_response.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional + +from .._models import BaseModel + +__all__ = ["AgentUpdateResponse"] + + +class AgentUpdateResponse(BaseModel): + agent: Optional["APIAgent"] = None + + +from .api_agent import APIAgent diff --git a/src/gradientai/types/agent_update_status_params.py b/src/gradientai/types/agent_update_status_params.py new file mode 100644 index 00000000..a0cdc0b9 --- /dev/null +++ b/src/gradientai/types/agent_update_status_params.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Annotated, TypedDict + +from .._utils import PropertyInfo +from .api_deployment_visibility import APIDeploymentVisibility + +__all__ = ["AgentUpdateStatusParams"] + + +class AgentUpdateStatusParams(TypedDict, total=False): + body_uuid: Annotated[str, PropertyInfo(alias="uuid")] + + visibility: APIDeploymentVisibility diff --git a/src/gradientai/types/agent_update_status_response.py b/src/gradientai/types/agent_update_status_response.py new file mode 100644 index 00000000..b200f99d --- /dev/null +++ b/src/gradientai/types/agent_update_status_response.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional + +from .._models import BaseModel + +__all__ = ["AgentUpdateStatusResponse"] + + +class AgentUpdateStatusResponse(BaseModel): + agent: Optional["APIAgent"] = None + + +from .api_agent import APIAgent diff --git a/src/gradientai/types/agents/__init__.py b/src/gradientai/types/agents/__init__.py index 2a7a830e..aae0ee6b 100644 --- a/src/gradientai/types/agents/__init__.py +++ b/src/gradientai/types/agents/__init__.py @@ -11,14 +11,21 @@ from .api_key_update_params import APIKeyUpdateParams as APIKeyUpdateParams from .version_list_response import VersionListResponse as VersionListResponse from .version_update_params import VersionUpdateParams as VersionUpdateParams +from .child_agent_add_params import ChildAgentAddParams as ChildAgentAddParams from .function_create_params import FunctionCreateParams as FunctionCreateParams from .function_update_params import FunctionUpdateParams as FunctionUpdateParams from .api_key_create_response import APIKeyCreateResponse as APIKeyCreateResponse from .api_key_delete_response import APIKeyDeleteResponse as APIKeyDeleteResponse from .api_key_update_response import APIKeyUpdateResponse as APIKeyUpdateResponse from .version_update_response import VersionUpdateResponse as VersionUpdateResponse +from .child_agent_add_response import ChildAgentAddResponse as ChildAgentAddResponse from .function_create_response import FunctionCreateResponse as FunctionCreateResponse from .function_delete_response import FunctionDeleteResponse as FunctionDeleteResponse from .function_update_response import FunctionUpdateResponse as FunctionUpdateResponse +from .child_agent_update_params import ChildAgentUpdateParams as ChildAgentUpdateParams +from .child_agent_view_response import ChildAgentViewResponse as ChildAgentViewResponse from .api_key_regenerate_response import APIKeyRegenerateResponse as APIKeyRegenerateResponse +from .child_agent_delete_response import ChildAgentDeleteResponse as ChildAgentDeleteResponse +from .child_agent_update_response import ChildAgentUpdateResponse as ChildAgentUpdateResponse from .api_link_knowledge_base_output import APILinkKnowledgeBaseOutput as APILinkKnowledgeBaseOutput +from .knowledge_base_detach_response import KnowledgeBaseDetachResponse as KnowledgeBaseDetachResponse diff --git a/src/gradientai/types/agents/child_agent_add_params.py b/src/gradientai/types/agents/child_agent_add_params.py new file mode 100644 index 00000000..001baa6f --- /dev/null +++ b/src/gradientai/types/agents/child_agent_add_params.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, Annotated, TypedDict + +from ..._utils import PropertyInfo + +__all__ = ["ChildAgentAddParams"] + + +class ChildAgentAddParams(TypedDict, total=False): + path_parent_agent_uuid: Required[Annotated[str, PropertyInfo(alias="parent_agent_uuid")]] + + body_child_agent_uuid: Annotated[str, PropertyInfo(alias="child_agent_uuid")] + + if_case: str + + body_parent_agent_uuid: Annotated[str, PropertyInfo(alias="parent_agent_uuid")] + """A unique identifier for the parent agent.""" + + route_name: str diff --git a/src/gradientai/types/agents/child_agent_add_response.py b/src/gradientai/types/agents/child_agent_add_response.py new file mode 100644 index 00000000..baccec10 --- /dev/null +++ b/src/gradientai/types/agents/child_agent_add_response.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["ChildAgentAddResponse"] + + +class ChildAgentAddResponse(BaseModel): + child_agent_uuid: Optional[str] = None + + parent_agent_uuid: Optional[str] = None + """A unique identifier for the parent agent.""" diff --git a/src/gradientai/types/agents/child_agent_delete_response.py b/src/gradientai/types/agents/child_agent_delete_response.py new file mode 100644 index 00000000..b50fb024 --- /dev/null +++ b/src/gradientai/types/agents/child_agent_delete_response.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["ChildAgentDeleteResponse"] + + +class ChildAgentDeleteResponse(BaseModel): + child_agent_uuid: Optional[str] = None + + parent_agent_uuid: Optional[str] = None diff --git a/src/gradientai/types/agents/child_agent_update_params.py b/src/gradientai/types/agents/child_agent_update_params.py new file mode 100644 index 00000000..2f009a52 --- /dev/null +++ b/src/gradientai/types/agents/child_agent_update_params.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, Annotated, TypedDict + +from ..._utils import PropertyInfo + +__all__ = ["ChildAgentUpdateParams"] + + +class ChildAgentUpdateParams(TypedDict, total=False): + path_parent_agent_uuid: Required[Annotated[str, PropertyInfo(alias="parent_agent_uuid")]] + + body_child_agent_uuid: Annotated[str, PropertyInfo(alias="child_agent_uuid")] + + if_case: str + + body_parent_agent_uuid: Annotated[str, PropertyInfo(alias="parent_agent_uuid")] + """A unique identifier for the parent agent.""" + + route_name: str + + uuid: str diff --git a/src/gradientai/types/agents/child_agent_update_response.py b/src/gradientai/types/agents/child_agent_update_response.py new file mode 100644 index 00000000..48a13c72 --- /dev/null +++ b/src/gradientai/types/agents/child_agent_update_response.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["ChildAgentUpdateResponse"] + + +class ChildAgentUpdateResponse(BaseModel): + child_agent_uuid: Optional[str] = None + + parent_agent_uuid: Optional[str] = None + """A unique identifier for the parent agent.""" + + rollback: Optional[bool] = None + + uuid: Optional[str] = None diff --git a/src/gradientai/types/agents/child_agent_view_response.py b/src/gradientai/types/agents/child_agent_view_response.py new file mode 100644 index 00000000..ffbaef12 --- /dev/null +++ b/src/gradientai/types/agents/child_agent_view_response.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Optional + +from ..._models import BaseModel + +__all__ = ["ChildAgentViewResponse"] + + +class ChildAgentViewResponse(BaseModel): + children: Optional[List["APIAgent"]] = None + + +from ..api_agent import APIAgent diff --git a/src/gradientai/types/agents/knowledge_base_detach_response.py b/src/gradientai/types/agents/knowledge_base_detach_response.py new file mode 100644 index 00000000..76bb4236 --- /dev/null +++ b/src/gradientai/types/agents/knowledge_base_detach_response.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["KnowledgeBaseDetachResponse"] + + +class KnowledgeBaseDetachResponse(BaseModel): + agent: Optional["APIAgent"] = None + + +from ..api_agent import APIAgent diff --git a/src/gradientai/types/api_key_list_params.py b/src/gradientai/types/api_key_list_params.py new file mode 100644 index 00000000..a1ab60dc --- /dev/null +++ b/src/gradientai/types/api_key_list_params.py @@ -0,0 +1,42 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Literal, TypedDict + +__all__ = ["APIKeyListParams"] + + +class APIKeyListParams(TypedDict, total=False): + page: int + """page number.""" + + per_page: int + """items per page.""" + + public_only: bool + """only include models that are publicly available.""" + + usecases: List[ + Literal[ + "MODEL_USECASE_UNKNOWN", + "MODEL_USECASE_AGENT", + "MODEL_USECASE_FINETUNED", + "MODEL_USECASE_KNOWLEDGEBASE", + "MODEL_USECASE_GUARDRAIL", + "MODEL_USECASE_REASONING", + "MODEL_USECASE_SERVERLESS", + ] + ] + """include only models defined for the listed usecases. + + - MODEL_USECASE_UNKNOWN: The use case of the model is unknown + - MODEL_USECASE_AGENT: The model maybe used in an agent + - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning + - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases + (embedding models) + - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails + - MODEL_USECASE_REASONING: The model usecase for reasoning + - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference + """ diff --git a/src/gradientai/types/api_key_list_response.py b/src/gradientai/types/api_key_list_response.py new file mode 100644 index 00000000..360de7a4 --- /dev/null +++ b/src/gradientai/types/api_key_list_response.py @@ -0,0 +1,42 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime + +from .._models import BaseModel +from .api_agreement import APIAgreement +from .agents.api_meta import APIMeta +from .agents.api_links import APILinks +from .api_model_version import APIModelVersion + +__all__ = ["APIKeyListResponse", "Model"] + + +class Model(BaseModel): + agreement: Optional[APIAgreement] = None + + created_at: Optional[datetime] = None + + is_foundational: Optional[bool] = None + + name: Optional[str] = None + + parent_uuid: Optional[str] = None + + updated_at: Optional[datetime] = None + + upload_complete: Optional[bool] = None + + url: Optional[str] = None + + uuid: Optional[str] = None + + version: Optional[APIModelVersion] = None + + +class APIKeyListResponse(BaseModel): + links: Optional[APILinks] = None + + meta: Optional[APIMeta] = None + + models: Optional[List[Model]] = None diff --git a/src/gradientai/types/api_keys/__init__.py b/src/gradientai/types/api_keys/__init__.py index f8ee8b14..c3cbcd6d 100644 --- a/src/gradientai/types/api_keys/__init__.py +++ b/src/gradientai/types/api_keys/__init__.py @@ -1,3 +1,13 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from __future__ import annotations + +from .api_key_list_params import APIKeyListParams as APIKeyListParams +from .api_key_create_params import APIKeyCreateParams as APIKeyCreateParams +from .api_key_list_response import APIKeyListResponse as APIKeyListResponse +from .api_key_update_params import APIKeyUpdateParams as APIKeyUpdateParams +from .api_model_api_key_info import APIModelAPIKeyInfo as APIModelAPIKeyInfo +from .api_key_create_response import APIKeyCreateResponse as APIKeyCreateResponse +from .api_key_delete_response import APIKeyDeleteResponse as APIKeyDeleteResponse +from .api_key_update_response import APIKeyUpdateResponse as APIKeyUpdateResponse +from .api_key_update_regenerate_response import APIKeyUpdateRegenerateResponse as APIKeyUpdateRegenerateResponse diff --git a/src/gradientai/types/api_keys/api_key_create_params.py b/src/gradientai/types/api_keys/api_key_create_params.py new file mode 100644 index 00000000..16cc23c9 --- /dev/null +++ b/src/gradientai/types/api_keys/api_key_create_params.py @@ -0,0 +1,11 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["APIKeyCreateParams"] + + +class APIKeyCreateParams(TypedDict, total=False): + name: str diff --git a/src/gradientai/types/api_keys/api_key_create_response.py b/src/gradientai/types/api_keys/api_key_create_response.py new file mode 100644 index 00000000..654e9f1e --- /dev/null +++ b/src/gradientai/types/api_keys/api_key_create_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from .api_model_api_key_info import APIModelAPIKeyInfo + +__all__ = ["APIKeyCreateResponse"] + + +class APIKeyCreateResponse(BaseModel): + api_key_info: Optional[APIModelAPIKeyInfo] = None diff --git a/src/gradientai/types/api_keys/api_key_delete_response.py b/src/gradientai/types/api_keys/api_key_delete_response.py new file mode 100644 index 00000000..4d81d047 --- /dev/null +++ b/src/gradientai/types/api_keys/api_key_delete_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from .api_model_api_key_info import APIModelAPIKeyInfo + +__all__ = ["APIKeyDeleteResponse"] + + +class APIKeyDeleteResponse(BaseModel): + api_key_info: Optional[APIModelAPIKeyInfo] = None diff --git a/src/gradientai/types/api_keys/api_key_list_params.py b/src/gradientai/types/api_keys/api_key_list_params.py new file mode 100644 index 00000000..11da9398 --- /dev/null +++ b/src/gradientai/types/api_keys/api_key_list_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["APIKeyListParams"] + + +class APIKeyListParams(TypedDict, total=False): + page: int + """page number.""" + + per_page: int + """items per page.""" diff --git a/src/gradientai/types/api_keys/api_key_list_response.py b/src/gradientai/types/api_keys/api_key_list_response.py new file mode 100644 index 00000000..535e2f96 --- /dev/null +++ b/src/gradientai/types/api_keys/api_key_list_response.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel +from ..agents.api_meta import APIMeta +from ..agents.api_links import APILinks +from .api_model_api_key_info import APIModelAPIKeyInfo + +__all__ = ["APIKeyListResponse"] + + +class APIKeyListResponse(BaseModel): + api_key_infos: Optional[List[APIModelAPIKeyInfo]] = None + + links: Optional[APILinks] = None + + meta: Optional[APIMeta] = None diff --git a/src/gradientai/types/api_keys/api_key_update_params.py b/src/gradientai/types/api_keys/api_key_update_params.py new file mode 100644 index 00000000..23c1c0b9 --- /dev/null +++ b/src/gradientai/types/api_keys/api_key_update_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Annotated, TypedDict + +from ..._utils import PropertyInfo + +__all__ = ["APIKeyUpdateParams"] + + +class APIKeyUpdateParams(TypedDict, total=False): + body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")] + + name: str diff --git a/src/gradientai/types/api_keys/api_key_update_regenerate_response.py b/src/gradientai/types/api_keys/api_key_update_regenerate_response.py new file mode 100644 index 00000000..44a316dc --- /dev/null +++ b/src/gradientai/types/api_keys/api_key_update_regenerate_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from .api_model_api_key_info import APIModelAPIKeyInfo + +__all__ = ["APIKeyUpdateRegenerateResponse"] + + +class APIKeyUpdateRegenerateResponse(BaseModel): + api_key_info: Optional[APIModelAPIKeyInfo] = None diff --git a/src/gradientai/types/api_keys/api_key_update_response.py b/src/gradientai/types/api_keys/api_key_update_response.py new file mode 100644 index 00000000..3671addf --- /dev/null +++ b/src/gradientai/types/api_keys/api_key_update_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from .api_model_api_key_info import APIModelAPIKeyInfo + +__all__ = ["APIKeyUpdateResponse"] + + +class APIKeyUpdateResponse(BaseModel): + api_key_info: Optional[APIModelAPIKeyInfo] = None diff --git a/src/gradientai/types/api_keys/api_model_api_key_info.py b/src/gradientai/types/api_keys/api_model_api_key_info.py new file mode 100644 index 00000000..bf354a47 --- /dev/null +++ b/src/gradientai/types/api_keys/api_model_api_key_info.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from datetime import datetime + +from ..._models import BaseModel + +__all__ = ["APIModelAPIKeyInfo"] + + +class APIModelAPIKeyInfo(BaseModel): + created_at: Optional[datetime] = None + + created_by: Optional[str] = None + + deleted_at: Optional[datetime] = None + + name: Optional[str] = None + + secret_key: Optional[str] = None + + uuid: Optional[str] = None diff --git a/src/gradientai/types/auth/agents/__init__.py b/src/gradientai/types/auth/agents/__init__.py index f8ee8b14..9fae55b6 100644 --- a/src/gradientai/types/auth/agents/__init__.py +++ b/src/gradientai/types/auth/agents/__init__.py @@ -1,3 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from __future__ import annotations + +from .token_create_params import TokenCreateParams as TokenCreateParams +from .token_create_response import TokenCreateResponse as TokenCreateResponse diff --git a/src/gradientai/types/auth/agents/token_create_params.py b/src/gradientai/types/auth/agents/token_create_params.py new file mode 100644 index 00000000..0df640f9 --- /dev/null +++ b/src/gradientai/types/auth/agents/token_create_params.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Annotated, TypedDict + +from ...._utils import PropertyInfo + +__all__ = ["TokenCreateParams"] + + +class TokenCreateParams(TypedDict, total=False): + body_agent_uuid: Annotated[str, PropertyInfo(alias="agent_uuid")] diff --git a/src/gradientai/types/auth/agents/token_create_response.py b/src/gradientai/types/auth/agents/token_create_response.py new file mode 100644 index 00000000..e58b7399 --- /dev/null +++ b/src/gradientai/types/auth/agents/token_create_response.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel + +__all__ = ["TokenCreateResponse"] + + +class TokenCreateResponse(BaseModel): + access_token: Optional[str] = None + + refresh_token: Optional[str] = None diff --git a/src/gradientai/types/knowledge_base_delete_response.py b/src/gradientai/types/knowledge_base_delete_response.py new file mode 100644 index 00000000..6401e25a --- /dev/null +++ b/src/gradientai/types/knowledge_base_delete_response.py @@ -0,0 +1,11 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from .._models import BaseModel + +__all__ = ["KnowledgeBaseDeleteResponse"] + + +class KnowledgeBaseDeleteResponse(BaseModel): + uuid: Optional[str] = None diff --git a/src/gradientai/types/knowledge_base_retrieve_response.py b/src/gradientai/types/knowledge_base_retrieve_response.py new file mode 100644 index 00000000..5a3b5f2c --- /dev/null +++ b/src/gradientai/types/knowledge_base_retrieve_response.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from .._models import BaseModel +from .api_knowledge_base import APIKnowledgeBase + +__all__ = ["KnowledgeBaseRetrieveResponse"] + + +class KnowledgeBaseRetrieveResponse(BaseModel): + database_status: Optional[ + Literal[ + "CREATING", + "ONLINE", + "POWEROFF", + "REBUILDING", + "REBALANCING", + "DECOMMISSIONED", + "FORKING", + "MIGRATING", + "RESIZING", + "RESTORING", + "POWERING_ON", + "UNHEALTHY", + ] + ] = None + + knowledge_base: Optional[APIKnowledgeBase] = None diff --git a/src/gradientai/types/knowledge_base_update_params.py b/src/gradientai/types/knowledge_base_update_params.py new file mode 100644 index 00000000..297c79de --- /dev/null +++ b/src/gradientai/types/knowledge_base_update_params.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Annotated, TypedDict + +from .._utils import PropertyInfo + +__all__ = ["KnowledgeBaseUpdateParams"] + + +class KnowledgeBaseUpdateParams(TypedDict, total=False): + database_id: str + """the id of the DigitalOcean database this knowledge base will use, optiona.""" + + embedding_model_uuid: str + """Identifier for the foundation model.""" + + name: str + + project_id: str + + tags: List[str] + """Tags to organize your knowledge base.""" + + body_uuid: Annotated[str, PropertyInfo(alias="uuid")] diff --git a/src/gradientai/types/knowledge_base_update_response.py b/src/gradientai/types/knowledge_base_update_response.py new file mode 100644 index 00000000..f3ba2c32 --- /dev/null +++ b/src/gradientai/types/knowledge_base_update_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from .._models import BaseModel +from .api_knowledge_base import APIKnowledgeBase + +__all__ = ["KnowledgeBaseUpdateResponse"] + + +class KnowledgeBaseUpdateResponse(BaseModel): + knowledge_base: Optional[APIKnowledgeBase] = None diff --git a/src/gradientai/types/knowledge_bases/__init__.py b/src/gradientai/types/knowledge_bases/__init__.py index e716e1f6..f5f31034 100644 --- a/src/gradientai/types/knowledge_bases/__init__.py +++ b/src/gradientai/types/knowledge_bases/__init__.py @@ -9,6 +9,7 @@ from .api_file_upload_data_source import APIFileUploadDataSource as APIFileUploadDataSource from .api_web_crawler_data_source import APIWebCrawlerDataSource as APIWebCrawlerDataSource from .data_source_create_response import DataSourceCreateResponse as DataSourceCreateResponse +from .data_source_delete_response import DataSourceDeleteResponse as DataSourceDeleteResponse from .api_spaces_data_source_param import APISpacesDataSourceParam as APISpacesDataSourceParam from .api_knowledge_base_data_source import APIKnowledgeBaseDataSource as APIKnowledgeBaseDataSource from .api_file_upload_data_source_param import APIFileUploadDataSourceParam as APIFileUploadDataSourceParam diff --git a/src/gradientai/types/knowledge_bases/data_source_delete_response.py b/src/gradientai/types/knowledge_bases/data_source_delete_response.py new file mode 100644 index 00000000..53954d7f --- /dev/null +++ b/src/gradientai/types/knowledge_bases/data_source_delete_response.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["DataSourceDeleteResponse"] + + +class DataSourceDeleteResponse(BaseModel): + data_source_uuid: Optional[str] = None + + knowledge_base_uuid: Optional[str] = None diff --git a/src/gradientai/types/providers/anthropic/__init__.py b/src/gradientai/types/providers/anthropic/__init__.py index f8ee8b14..eb47e709 100644 --- a/src/gradientai/types/providers/anthropic/__init__.py +++ b/src/gradientai/types/providers/anthropic/__init__.py @@ -1,3 +1,14 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from __future__ import annotations + +from .key_list_params import KeyListParams as KeyListParams +from .key_create_params import KeyCreateParams as KeyCreateParams +from .key_list_response import KeyListResponse as KeyListResponse +from .key_update_params import KeyUpdateParams as KeyUpdateParams +from .key_create_response import KeyCreateResponse as KeyCreateResponse +from .key_delete_response import KeyDeleteResponse as KeyDeleteResponse +from .key_update_response import KeyUpdateResponse as KeyUpdateResponse +from .key_retrieve_response import KeyRetrieveResponse as KeyRetrieveResponse +from .key_list_agents_params import KeyListAgentsParams as KeyListAgentsParams +from .key_list_agents_response import KeyListAgentsResponse as KeyListAgentsResponse diff --git a/src/gradientai/types/providers/anthropic/key_create_params.py b/src/gradientai/types/providers/anthropic/key_create_params.py new file mode 100644 index 00000000..389f167c --- /dev/null +++ b/src/gradientai/types/providers/anthropic/key_create_params.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["KeyCreateParams"] + + +class KeyCreateParams(TypedDict, total=False): + api_key: str + + name: str diff --git a/src/gradientai/types/providers/anthropic/key_create_response.py b/src/gradientai/types/providers/anthropic/key_create_response.py new file mode 100644 index 00000000..a032810c --- /dev/null +++ b/src/gradientai/types/providers/anthropic/key_create_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel +from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo + +__all__ = ["KeyCreateResponse"] + + +class KeyCreateResponse(BaseModel): + api_key_info: Optional[APIAnthropicAPIKeyInfo] = None diff --git a/src/gradientai/types/providers/anthropic/key_delete_response.py b/src/gradientai/types/providers/anthropic/key_delete_response.py new file mode 100644 index 00000000..2afe2dda --- /dev/null +++ b/src/gradientai/types/providers/anthropic/key_delete_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel +from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo + +__all__ = ["KeyDeleteResponse"] + + +class KeyDeleteResponse(BaseModel): + api_key_info: Optional[APIAnthropicAPIKeyInfo] = None diff --git a/src/gradientai/types/providers/anthropic/key_list_agents_params.py b/src/gradientai/types/providers/anthropic/key_list_agents_params.py new file mode 100644 index 00000000..ebbc3b7e --- /dev/null +++ b/src/gradientai/types/providers/anthropic/key_list_agents_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["KeyListAgentsParams"] + + +class KeyListAgentsParams(TypedDict, total=False): + page: int + """page number.""" + + per_page: int + """items per page.""" diff --git a/src/gradientai/types/providers/anthropic/key_list_agents_response.py b/src/gradientai/types/providers/anthropic/key_list_agents_response.py new file mode 100644 index 00000000..ba6ca946 --- /dev/null +++ b/src/gradientai/types/providers/anthropic/key_list_agents_response.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Optional + +from ...._models import BaseModel +from ...agents.api_meta import APIMeta +from ...agents.api_links import APILinks + +__all__ = ["KeyListAgentsResponse"] + + +class KeyListAgentsResponse(BaseModel): + agents: Optional[List["APIAgent"]] = None + + links: Optional[APILinks] = None + + meta: Optional[APIMeta] = None + + +from ...api_agent import APIAgent diff --git a/src/gradientai/types/providers/anthropic/key_list_params.py b/src/gradientai/types/providers/anthropic/key_list_params.py new file mode 100644 index 00000000..a11458ad --- /dev/null +++ b/src/gradientai/types/providers/anthropic/key_list_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["KeyListParams"] + + +class KeyListParams(TypedDict, total=False): + page: int + """page number.""" + + per_page: int + """items per page.""" diff --git a/src/gradientai/types/providers/anthropic/key_list_response.py b/src/gradientai/types/providers/anthropic/key_list_response.py new file mode 100644 index 00000000..d0b84e96 --- /dev/null +++ b/src/gradientai/types/providers/anthropic/key_list_response.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ...._models import BaseModel +from ...agents.api_meta import APIMeta +from ...agents.api_links import APILinks +from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo + +__all__ = ["KeyListResponse"] + + +class KeyListResponse(BaseModel): + api_key_infos: Optional[List[APIAnthropicAPIKeyInfo]] = None + + links: Optional[APILinks] = None + + meta: Optional[APIMeta] = None diff --git a/src/gradientai/types/providers/anthropic/key_retrieve_response.py b/src/gradientai/types/providers/anthropic/key_retrieve_response.py new file mode 100644 index 00000000..b8361fc2 --- /dev/null +++ b/src/gradientai/types/providers/anthropic/key_retrieve_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel +from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo + +__all__ = ["KeyRetrieveResponse"] + + +class KeyRetrieveResponse(BaseModel): + api_key_info: Optional[APIAnthropicAPIKeyInfo] = None diff --git a/src/gradientai/types/providers/anthropic/key_update_params.py b/src/gradientai/types/providers/anthropic/key_update_params.py new file mode 100644 index 00000000..c07d7f66 --- /dev/null +++ b/src/gradientai/types/providers/anthropic/key_update_params.py @@ -0,0 +1,17 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Annotated, TypedDict + +from ...._utils import PropertyInfo + +__all__ = ["KeyUpdateParams"] + + +class KeyUpdateParams(TypedDict, total=False): + api_key: str + + body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")] + + name: str diff --git a/src/gradientai/types/providers/anthropic/key_update_response.py b/src/gradientai/types/providers/anthropic/key_update_response.py new file mode 100644 index 00000000..b04277a6 --- /dev/null +++ b/src/gradientai/types/providers/anthropic/key_update_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel +from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo + +__all__ = ["KeyUpdateResponse"] + + +class KeyUpdateResponse(BaseModel): + api_key_info: Optional[APIAnthropicAPIKeyInfo] = None diff --git a/src/gradientai/types/providers/openai/__init__.py b/src/gradientai/types/providers/openai/__init__.py index f8ee8b14..70abf332 100644 --- a/src/gradientai/types/providers/openai/__init__.py +++ b/src/gradientai/types/providers/openai/__init__.py @@ -1,3 +1,14 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from __future__ import annotations + +from .key_list_params import KeyListParams as KeyListParams +from .key_create_params import KeyCreateParams as KeyCreateParams +from .key_list_response import KeyListResponse as KeyListResponse +from .key_update_params import KeyUpdateParams as KeyUpdateParams +from .key_create_response import KeyCreateResponse as KeyCreateResponse +from .key_delete_response import KeyDeleteResponse as KeyDeleteResponse +from .key_update_response import KeyUpdateResponse as KeyUpdateResponse +from .key_retrieve_response import KeyRetrieveResponse as KeyRetrieveResponse +from .key_retrieve_agents_params import KeyRetrieveAgentsParams as KeyRetrieveAgentsParams +from .key_retrieve_agents_response import KeyRetrieveAgentsResponse as KeyRetrieveAgentsResponse diff --git a/src/gradientai/types/providers/openai/key_create_params.py b/src/gradientai/types/providers/openai/key_create_params.py new file mode 100644 index 00000000..389f167c --- /dev/null +++ b/src/gradientai/types/providers/openai/key_create_params.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["KeyCreateParams"] + + +class KeyCreateParams(TypedDict, total=False): + api_key: str + + name: str diff --git a/src/gradientai/types/providers/openai/key_create_response.py b/src/gradientai/types/providers/openai/key_create_response.py new file mode 100644 index 00000000..f3b4d36c --- /dev/null +++ b/src/gradientai/types/providers/openai/key_create_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel +from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo + +__all__ = ["KeyCreateResponse"] + + +class KeyCreateResponse(BaseModel): + api_key_info: Optional[APIOpenAIAPIKeyInfo] = None diff --git a/src/gradientai/types/providers/openai/key_delete_response.py b/src/gradientai/types/providers/openai/key_delete_response.py new file mode 100644 index 00000000..0c8922bb --- /dev/null +++ b/src/gradientai/types/providers/openai/key_delete_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel +from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo + +__all__ = ["KeyDeleteResponse"] + + +class KeyDeleteResponse(BaseModel): + api_key_info: Optional[APIOpenAIAPIKeyInfo] = None diff --git a/src/gradientai/types/providers/openai/key_list_params.py b/src/gradientai/types/providers/openai/key_list_params.py new file mode 100644 index 00000000..a11458ad --- /dev/null +++ b/src/gradientai/types/providers/openai/key_list_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["KeyListParams"] + + +class KeyListParams(TypedDict, total=False): + page: int + """page number.""" + + per_page: int + """items per page.""" diff --git a/src/gradientai/types/providers/openai/key_list_response.py b/src/gradientai/types/providers/openai/key_list_response.py new file mode 100644 index 00000000..c263cba3 --- /dev/null +++ b/src/gradientai/types/providers/openai/key_list_response.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ...._models import BaseModel +from ...agents.api_meta import APIMeta +from ...agents.api_links import APILinks +from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo + +__all__ = ["KeyListResponse"] + + +class KeyListResponse(BaseModel): + api_key_infos: Optional[List[APIOpenAIAPIKeyInfo]] = None + + links: Optional[APILinks] = None + + meta: Optional[APIMeta] = None diff --git a/src/gradientai/types/providers/openai/key_retrieve_agents_params.py b/src/gradientai/types/providers/openai/key_retrieve_agents_params.py new file mode 100644 index 00000000..ec745d14 --- /dev/null +++ b/src/gradientai/types/providers/openai/key_retrieve_agents_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["KeyRetrieveAgentsParams"] + + +class KeyRetrieveAgentsParams(TypedDict, total=False): + page: int + """page number.""" + + per_page: int + """items per page.""" diff --git a/src/gradientai/types/providers/openai/key_retrieve_agents_response.py b/src/gradientai/types/providers/openai/key_retrieve_agents_response.py new file mode 100644 index 00000000..f42edea6 --- /dev/null +++ b/src/gradientai/types/providers/openai/key_retrieve_agents_response.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Optional + +from ...._models import BaseModel +from ...agents.api_meta import APIMeta +from ...agents.api_links import APILinks + +__all__ = ["KeyRetrieveAgentsResponse"] + + +class KeyRetrieveAgentsResponse(BaseModel): + agents: Optional[List["APIAgent"]] = None + + links: Optional[APILinks] = None + + meta: Optional[APIMeta] = None + + +from ...api_agent import APIAgent diff --git a/src/gradientai/types/providers/openai/key_retrieve_response.py b/src/gradientai/types/providers/openai/key_retrieve_response.py new file mode 100644 index 00000000..7015b6f7 --- /dev/null +++ b/src/gradientai/types/providers/openai/key_retrieve_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel +from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo + +__all__ = ["KeyRetrieveResponse"] + + +class KeyRetrieveResponse(BaseModel): + api_key_info: Optional[APIOpenAIAPIKeyInfo] = None diff --git a/src/gradientai/types/providers/openai/key_update_params.py b/src/gradientai/types/providers/openai/key_update_params.py new file mode 100644 index 00000000..c07d7f66 --- /dev/null +++ b/src/gradientai/types/providers/openai/key_update_params.py @@ -0,0 +1,17 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Annotated, TypedDict + +from ...._utils import PropertyInfo + +__all__ = ["KeyUpdateParams"] + + +class KeyUpdateParams(TypedDict, total=False): + api_key: str + + body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")] + + name: str diff --git a/src/gradientai/types/providers/openai/key_update_response.py b/src/gradientai/types/providers/openai/key_update_response.py new file mode 100644 index 00000000..4889f994 --- /dev/null +++ b/src/gradientai/types/providers/openai/key_update_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel +from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo + +__all__ = ["KeyUpdateResponse"] + + +class KeyUpdateResponse(BaseModel): + api_key_info: Optional[APIOpenAIAPIKeyInfo] = None diff --git a/src/gradientai/types/region_list_params.py b/src/gradientai/types/region_list_params.py new file mode 100644 index 00000000..1db0ad50 --- /dev/null +++ b/src/gradientai/types/region_list_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["RegionListParams"] + + +class RegionListParams(TypedDict, total=False): + serves_batch: bool + """include datacenters that are capable of running batch jobs.""" + + serves_inference: bool + """include datacenters that serve inference.""" diff --git a/src/gradientai/types/region_list_response.py b/src/gradientai/types/region_list_response.py new file mode 100644 index 00000000..0f955b36 --- /dev/null +++ b/src/gradientai/types/region_list_response.py @@ -0,0 +1,23 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from .._models import BaseModel + +__all__ = ["RegionListResponse", "Region"] + + +class Region(BaseModel): + inference_url: Optional[str] = None + + region: Optional[str] = None + + serves_batch: Optional[bool] = None + + serves_inference: Optional[bool] = None + + stream_inference_url: Optional[str] = None + + +class RegionListResponse(BaseModel): + regions: Optional[List[Region]] = None diff --git a/tests/api_resources/agents/test_child_agents.py b/tests/api_resources/agents/test_child_agents.py new file mode 100644 index 00000000..14af3b93 --- /dev/null +++ b/tests/api_resources/agents/test_child_agents.py @@ -0,0 +1,485 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.agents import ( + ChildAgentAddResponse, + ChildAgentViewResponse, + ChildAgentDeleteResponse, + ChildAgentUpdateResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestChildAgents: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: GradientAI) -> None: + child_agent = client.agents.child_agents.update( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + ) + assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params(self, client: GradientAI) -> None: + child_agent = client.agents.child_agents.update( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + body_child_agent_uuid="child_agent_uuid", + if_case="if_case", + body_parent_agent_uuid="parent_agent_uuid", + route_name="route_name", + uuid="uuid", + ) + assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: GradientAI) -> None: + response = client.agents.child_agents.with_raw_response.update( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + child_agent = response.parse() + assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: GradientAI) -> None: + with client.agents.child_agents.with_streaming_response.update( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + child_agent = response.parse() + assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update(self, client: GradientAI) -> None: + with pytest.raises( + ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" + ): + client.agents.child_agents.with_raw_response.update( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): + client.agents.child_agents.with_raw_response.update( + path_child_agent_uuid="", + path_parent_agent_uuid="parent_agent_uuid", + ) + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: GradientAI) -> None: + child_agent = client.agents.child_agents.delete( + child_agent_uuid="child_agent_uuid", + parent_agent_uuid="parent_agent_uuid", + ) + assert_matches_type(ChildAgentDeleteResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: GradientAI) -> None: + response = client.agents.child_agents.with_raw_response.delete( + child_agent_uuid="child_agent_uuid", + parent_agent_uuid="parent_agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + child_agent = response.parse() + assert_matches_type(ChildAgentDeleteResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: GradientAI) -> None: + with client.agents.child_agents.with_streaming_response.delete( + child_agent_uuid="child_agent_uuid", + parent_agent_uuid="parent_agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + child_agent = response.parse() + assert_matches_type(ChildAgentDeleteResponse, child_agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `parent_agent_uuid` but received ''"): + client.agents.child_agents.with_raw_response.delete( + child_agent_uuid="child_agent_uuid", + parent_agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `child_agent_uuid` but received ''"): + client.agents.child_agents.with_raw_response.delete( + child_agent_uuid="", + parent_agent_uuid="parent_agent_uuid", + ) + + @pytest.mark.skip() + @parametrize + def test_method_add(self, client: GradientAI) -> None: + child_agent = client.agents.child_agents.add( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + ) + assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_add_with_all_params(self, client: GradientAI) -> None: + child_agent = client.agents.child_agents.add( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + body_child_agent_uuid="child_agent_uuid", + if_case="if_case", + body_parent_agent_uuid="parent_agent_uuid", + route_name="route_name", + ) + assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_add(self, client: GradientAI) -> None: + response = client.agents.child_agents.with_raw_response.add( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + child_agent = response.parse() + assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_add(self, client: GradientAI) -> None: + with client.agents.child_agents.with_streaming_response.add( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + child_agent = response.parse() + assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_add(self, client: GradientAI) -> None: + with pytest.raises( + ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" + ): + client.agents.child_agents.with_raw_response.add( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): + client.agents.child_agents.with_raw_response.add( + path_child_agent_uuid="", + path_parent_agent_uuid="parent_agent_uuid", + ) + + @pytest.mark.skip() + @parametrize + def test_method_view(self, client: GradientAI) -> None: + child_agent = client.agents.child_agents.view( + "uuid", + ) + assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_view(self, client: GradientAI) -> None: + response = client.agents.child_agents.with_raw_response.view( + "uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + child_agent = response.parse() + assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_view(self, client: GradientAI) -> None: + with client.agents.child_agents.with_streaming_response.view( + "uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + child_agent = response.parse() + assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_view(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + client.agents.child_agents.with_raw_response.view( + "", + ) + + +class TestAsyncChildAgents: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncGradientAI) -> None: + child_agent = await async_client.agents.child_agents.update( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + ) + assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: + child_agent = await async_client.agents.child_agents.update( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + body_child_agent_uuid="child_agent_uuid", + if_case="if_case", + body_parent_agent_uuid="parent_agent_uuid", + route_name="route_name", + uuid="uuid", + ) + assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.child_agents.with_raw_response.update( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + child_agent = await response.parse() + assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.child_agents.with_streaming_response.update( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + child_agent = await response.parse() + assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: + with pytest.raises( + ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" + ): + await async_client.agents.child_agents.with_raw_response.update( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): + await async_client.agents.child_agents.with_raw_response.update( + path_child_agent_uuid="", + path_parent_agent_uuid="parent_agent_uuid", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + child_agent = await async_client.agents.child_agents.delete( + child_agent_uuid="child_agent_uuid", + parent_agent_uuid="parent_agent_uuid", + ) + assert_matches_type(ChildAgentDeleteResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.child_agents.with_raw_response.delete( + child_agent_uuid="child_agent_uuid", + parent_agent_uuid="parent_agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + child_agent = await response.parse() + assert_matches_type(ChildAgentDeleteResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.child_agents.with_streaming_response.delete( + child_agent_uuid="child_agent_uuid", + parent_agent_uuid="parent_agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + child_agent = await response.parse() + assert_matches_type(ChildAgentDeleteResponse, child_agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `parent_agent_uuid` but received ''"): + await async_client.agents.child_agents.with_raw_response.delete( + child_agent_uuid="child_agent_uuid", + parent_agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `child_agent_uuid` but received ''"): + await async_client.agents.child_agents.with_raw_response.delete( + child_agent_uuid="", + parent_agent_uuid="parent_agent_uuid", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_add(self, async_client: AsyncGradientAI) -> None: + child_agent = await async_client.agents.child_agents.add( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + ) + assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_add_with_all_params(self, async_client: AsyncGradientAI) -> None: + child_agent = await async_client.agents.child_agents.add( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + body_child_agent_uuid="child_agent_uuid", + if_case="if_case", + body_parent_agent_uuid="parent_agent_uuid", + route_name="route_name", + ) + assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_add(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.child_agents.with_raw_response.add( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + child_agent = await response.parse() + assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_add(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.child_agents.with_streaming_response.add( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + child_agent = await response.parse() + assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_add(self, async_client: AsyncGradientAI) -> None: + with pytest.raises( + ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" + ): + await async_client.agents.child_agents.with_raw_response.add( + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): + await async_client.agents.child_agents.with_raw_response.add( + path_child_agent_uuid="", + path_parent_agent_uuid="parent_agent_uuid", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_view(self, async_client: AsyncGradientAI) -> None: + child_agent = await async_client.agents.child_agents.view( + "uuid", + ) + assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_view(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.child_agents.with_raw_response.view( + "uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + child_agent = await response.parse() + assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_view(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.child_agents.with_streaming_response.view( + "uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + child_agent = await response.parse() + assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_view(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + await async_client.agents.child_agents.with_raw_response.view( + "", + ) diff --git a/tests/api_resources/agents/test_knowledge_bases.py b/tests/api_resources/agents/test_knowledge_bases.py index c8b5541d..dff80a9a 100644 --- a/tests/api_resources/agents/test_knowledge_bases.py +++ b/tests/api_resources/agents/test_knowledge_bases.py @@ -9,7 +9,7 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import APILinkKnowledgeBaseOutput +from gradientai.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -59,6 +59,110 @@ def test_path_params_attach(self, client: GradientAI) -> None: "", ) + @pytest.mark.skip() + @parametrize + def test_method_attach_single(self, client: GradientAI) -> None: + knowledge_base = client.agents.knowledge_bases.attach_single( + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="agent_uuid", + ) + assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_attach_single(self, client: GradientAI) -> None: + response = client.agents.knowledge_bases.with_raw_response.attach_single( + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + knowledge_base = response.parse() + assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_attach_single(self, client: GradientAI) -> None: + with client.agents.knowledge_bases.with_streaming_response.attach_single( + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + knowledge_base = response.parse() + assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_attach_single(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): + client.agents.knowledge_bases.with_raw_response.attach_single( + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): + client.agents.knowledge_bases.with_raw_response.attach_single( + knowledge_base_uuid="", + agent_uuid="agent_uuid", + ) + + @pytest.mark.skip() + @parametrize + def test_method_detach(self, client: GradientAI) -> None: + knowledge_base = client.agents.knowledge_bases.detach( + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="agent_uuid", + ) + assert_matches_type(KnowledgeBaseDetachResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_detach(self, client: GradientAI) -> None: + response = client.agents.knowledge_bases.with_raw_response.detach( + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + knowledge_base = response.parse() + assert_matches_type(KnowledgeBaseDetachResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_detach(self, client: GradientAI) -> None: + with client.agents.knowledge_bases.with_streaming_response.detach( + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + knowledge_base = response.parse() + assert_matches_type(KnowledgeBaseDetachResponse, knowledge_base, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_detach(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): + client.agents.knowledge_bases.with_raw_response.detach( + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): + client.agents.knowledge_bases.with_raw_response.detach( + knowledge_base_uuid="", + agent_uuid="agent_uuid", + ) + class TestAsyncKnowledgeBases: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @@ -104,3 +208,107 @@ async def test_path_params_attach(self, async_client: AsyncGradientAI) -> None: await async_client.agents.knowledge_bases.with_raw_response.attach( "", ) + + @pytest.mark.skip() + @parametrize + async def test_method_attach_single(self, async_client: AsyncGradientAI) -> None: + knowledge_base = await async_client.agents.knowledge_bases.attach_single( + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="agent_uuid", + ) + assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_attach_single(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.knowledge_bases.with_raw_response.attach_single( + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + knowledge_base = await response.parse() + assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_attach_single(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.knowledge_bases.with_streaming_response.attach_single( + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + knowledge_base = await response.parse() + assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_attach_single(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): + await async_client.agents.knowledge_bases.with_raw_response.attach_single( + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): + await async_client.agents.knowledge_bases.with_raw_response.attach_single( + knowledge_base_uuid="", + agent_uuid="agent_uuid", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_detach(self, async_client: AsyncGradientAI) -> None: + knowledge_base = await async_client.agents.knowledge_bases.detach( + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="agent_uuid", + ) + assert_matches_type(KnowledgeBaseDetachResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_detach(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.knowledge_bases.with_raw_response.detach( + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + knowledge_base = await response.parse() + assert_matches_type(KnowledgeBaseDetachResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_detach(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.knowledge_bases.with_streaming_response.detach( + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + knowledge_base = await response.parse() + assert_matches_type(KnowledgeBaseDetachResponse, knowledge_base, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_detach(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): + await async_client.agents.knowledge_bases.with_raw_response.detach( + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): + await async_client.agents.knowledge_bases.with_raw_response.detach( + knowledge_base_uuid="", + agent_uuid="agent_uuid", + ) diff --git a/tests/api_resources/api_keys/__init__.py b/tests/api_resources/api_keys/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/api_keys/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/api_keys/test_api_keys_.py b/tests/api_resources/api_keys/test_api_keys_.py new file mode 100644 index 00000000..01e8dcfa --- /dev/null +++ b/tests/api_resources/api_keys/test_api_keys_.py @@ -0,0 +1,446 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.api_keys import ( + APIKeyListResponse, + APIKeyCreateResponse, + APIKeyDeleteResponse, + APIKeyUpdateResponse, + APIKeyUpdateRegenerateResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestAPIKeys: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: GradientAI) -> None: + api_key = client.api_keys.api_keys.create() + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: GradientAI) -> None: + api_key = client.api_keys.api_keys.create( + name="name", + ) + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: GradientAI) -> None: + response = client.api_keys.api_keys.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = response.parse() + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: GradientAI) -> None: + with client.api_keys.api_keys.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = response.parse() + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: GradientAI) -> None: + api_key = client.api_keys.api_keys.update( + path_api_key_uuid="api_key_uuid", + ) + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params(self, client: GradientAI) -> None: + api_key = client.api_keys.api_keys.update( + path_api_key_uuid="api_key_uuid", + body_api_key_uuid="api_key_uuid", + name="name", + ) + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: GradientAI) -> None: + response = client.api_keys.api_keys.with_raw_response.update( + path_api_key_uuid="api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = response.parse() + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: GradientAI) -> None: + with client.api_keys.api_keys.with_streaming_response.update( + path_api_key_uuid="api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = response.parse() + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): + client.api_keys.api_keys.with_raw_response.update( + path_api_key_uuid="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + api_key = client.api_keys.api_keys.list() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + api_key = client.api_keys.api_keys.list( + page=0, + per_page=0, + ) + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.api_keys.api_keys.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = response.parse() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.api_keys.api_keys.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = response.parse() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: GradientAI) -> None: + api_key = client.api_keys.api_keys.delete( + "api_key_uuid", + ) + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: GradientAI) -> None: + response = client.api_keys.api_keys.with_raw_response.delete( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = response.parse() + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: GradientAI) -> None: + with client.api_keys.api_keys.with_streaming_response.delete( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = response.parse() + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + client.api_keys.api_keys.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update_regenerate(self, client: GradientAI) -> None: + api_key = client.api_keys.api_keys.update_regenerate( + "api_key_uuid", + ) + assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update_regenerate(self, client: GradientAI) -> None: + response = client.api_keys.api_keys.with_raw_response.update_regenerate( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = response.parse() + assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update_regenerate(self, client: GradientAI) -> None: + with client.api_keys.api_keys.with_streaming_response.update_regenerate( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = response.parse() + assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update_regenerate(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + client.api_keys.api_keys.with_raw_response.update_regenerate( + "", + ) + + +class TestAsyncAPIKeys: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncGradientAI) -> None: + api_key = await async_client.api_keys.api_keys.create() + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + api_key = await async_client.api_keys.api_keys.create( + name="name", + ) + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + response = await async_client.api_keys.api_keys.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = await response.parse() + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async with async_client.api_keys.api_keys.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = await response.parse() + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncGradientAI) -> None: + api_key = await async_client.api_keys.api_keys.update( + path_api_key_uuid="api_key_uuid", + ) + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: + api_key = await async_client.api_keys.api_keys.update( + path_api_key_uuid="api_key_uuid", + body_api_key_uuid="api_key_uuid", + name="name", + ) + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + response = await async_client.api_keys.api_keys.with_raw_response.update( + path_api_key_uuid="api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = await response.parse() + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async with async_client.api_keys.api_keys.with_streaming_response.update( + path_api_key_uuid="api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = await response.parse() + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): + await async_client.api_keys.api_keys.with_raw_response.update( + path_api_key_uuid="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + api_key = await async_client.api_keys.api_keys.list() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + api_key = await async_client.api_keys.api_keys.list( + page=0, + per_page=0, + ) + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.api_keys.api_keys.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = await response.parse() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.api_keys.api_keys.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = await response.parse() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + api_key = await async_client.api_keys.api_keys.delete( + "api_key_uuid", + ) + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + response = await async_client.api_keys.api_keys.with_raw_response.delete( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = await response.parse() + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async with async_client.api_keys.api_keys.with_streaming_response.delete( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = await response.parse() + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + await async_client.api_keys.api_keys.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update_regenerate(self, async_client: AsyncGradientAI) -> None: + api_key = await async_client.api_keys.api_keys.update_regenerate( + "api_key_uuid", + ) + assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update_regenerate(self, async_client: AsyncGradientAI) -> None: + response = await async_client.api_keys.api_keys.with_raw_response.update_regenerate( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = await response.parse() + assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update_regenerate(self, async_client: AsyncGradientAI) -> None: + async with async_client.api_keys.api_keys.with_streaming_response.update_regenerate( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = await response.parse() + assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update_regenerate(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + await async_client.api_keys.api_keys.with_raw_response.update_regenerate( + "", + ) diff --git a/tests/api_resources/auth/__init__.py b/tests/api_resources/auth/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/auth/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/auth/agents/__init__.py b/tests/api_resources/auth/agents/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/auth/agents/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/auth/agents/test_token.py b/tests/api_resources/auth/agents/test_token.py new file mode 100644 index 00000000..ef721cd0 --- /dev/null +++ b/tests/api_resources/auth/agents/test_token.py @@ -0,0 +1,124 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.auth.agents import TokenCreateResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestToken: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: GradientAI) -> None: + token = client.auth.agents.token.create( + path_agent_uuid="agent_uuid", + ) + assert_matches_type(TokenCreateResponse, token, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: GradientAI) -> None: + token = client.auth.agents.token.create( + path_agent_uuid="agent_uuid", + body_agent_uuid="agent_uuid", + ) + assert_matches_type(TokenCreateResponse, token, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: GradientAI) -> None: + response = client.auth.agents.token.with_raw_response.create( + path_agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + token = response.parse() + assert_matches_type(TokenCreateResponse, token, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: GradientAI) -> None: + with client.auth.agents.token.with_streaming_response.create( + path_agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + token = response.parse() + assert_matches_type(TokenCreateResponse, token, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_create(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): + client.auth.agents.token.with_raw_response.create( + path_agent_uuid="", + ) + + +class TestAsyncToken: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncGradientAI) -> None: + token = await async_client.auth.agents.token.create( + path_agent_uuid="agent_uuid", + ) + assert_matches_type(TokenCreateResponse, token, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + token = await async_client.auth.agents.token.create( + path_agent_uuid="agent_uuid", + body_agent_uuid="agent_uuid", + ) + assert_matches_type(TokenCreateResponse, token, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + response = await async_client.auth.agents.token.with_raw_response.create( + path_agent_uuid="agent_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + token = await response.parse() + assert_matches_type(TokenCreateResponse, token, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async with async_client.auth.agents.token.with_streaming_response.create( + path_agent_uuid="agent_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + token = await response.parse() + assert_matches_type(TokenCreateResponse, token, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_create(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): + await async_client.auth.agents.token.with_raw_response.create( + path_agent_uuid="", + ) diff --git a/tests/api_resources/knowledge_bases/test_data_sources.py b/tests/api_resources/knowledge_bases/test_data_sources.py index cc90a9d7..ce9c390e 100644 --- a/tests/api_resources/knowledge_bases/test_data_sources.py +++ b/tests/api_resources/knowledge_bases/test_data_sources.py @@ -12,6 +12,7 @@ from gradientai.types.knowledge_bases import ( DataSourceListResponse, DataSourceCreateResponse, + DataSourceDeleteResponse, ) base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -142,6 +143,58 @@ def test_path_params_list(self, client: GradientAI) -> None: knowledge_base_uuid="", ) + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: GradientAI) -> None: + data_source = client.knowledge_bases.data_sources.delete( + data_source_uuid="data_source_uuid", + knowledge_base_uuid="knowledge_base_uuid", + ) + assert_matches_type(DataSourceDeleteResponse, data_source, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: GradientAI) -> None: + response = client.knowledge_bases.data_sources.with_raw_response.delete( + data_source_uuid="data_source_uuid", + knowledge_base_uuid="knowledge_base_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + data_source = response.parse() + assert_matches_type(DataSourceDeleteResponse, data_source, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: GradientAI) -> None: + with client.knowledge_bases.data_sources.with_streaming_response.delete( + data_source_uuid="data_source_uuid", + knowledge_base_uuid="knowledge_base_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + data_source = response.parse() + assert_matches_type(DataSourceDeleteResponse, data_source, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): + client.knowledge_bases.data_sources.with_raw_response.delete( + data_source_uuid="data_source_uuid", + knowledge_base_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `data_source_uuid` but received ''"): + client.knowledge_bases.data_sources.with_raw_response.delete( + data_source_uuid="", + knowledge_base_uuid="knowledge_base_uuid", + ) + class TestAsyncDataSources: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @@ -267,3 +320,55 @@ async def test_path_params_list(self, async_client: AsyncGradientAI) -> None: await async_client.knowledge_bases.data_sources.with_raw_response.list( knowledge_base_uuid="", ) + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + data_source = await async_client.knowledge_bases.data_sources.delete( + data_source_uuid="data_source_uuid", + knowledge_base_uuid="knowledge_base_uuid", + ) + assert_matches_type(DataSourceDeleteResponse, data_source, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + response = await async_client.knowledge_bases.data_sources.with_raw_response.delete( + data_source_uuid="data_source_uuid", + knowledge_base_uuid="knowledge_base_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + data_source = await response.parse() + assert_matches_type(DataSourceDeleteResponse, data_source, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async with async_client.knowledge_bases.data_sources.with_streaming_response.delete( + data_source_uuid="data_source_uuid", + knowledge_base_uuid="knowledge_base_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + data_source = await response.parse() + assert_matches_type(DataSourceDeleteResponse, data_source, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): + await async_client.knowledge_bases.data_sources.with_raw_response.delete( + data_source_uuid="data_source_uuid", + knowledge_base_uuid="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `data_source_uuid` but received ''"): + await async_client.knowledge_bases.data_sources.with_raw_response.delete( + data_source_uuid="", + knowledge_base_uuid="knowledge_base_uuid", + ) diff --git a/tests/api_resources/providers/__init__.py b/tests/api_resources/providers/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/providers/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/providers/anthropic/__init__.py b/tests/api_resources/providers/anthropic/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/providers/anthropic/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/providers/anthropic/test_keys.py b/tests/api_resources/providers/anthropic/test_keys.py new file mode 100644 index 00000000..fab973bf --- /dev/null +++ b/tests/api_resources/providers/anthropic/test_keys.py @@ -0,0 +1,555 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.providers.anthropic import ( + KeyListResponse, + KeyCreateResponse, + KeyDeleteResponse, + KeyUpdateResponse, + KeyRetrieveResponse, + KeyListAgentsResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestKeys: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: GradientAI) -> None: + key = client.providers.anthropic.keys.create() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: GradientAI) -> None: + key = client.providers.anthropic.keys.create( + api_key="api_key", + name="name", + ) + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: GradientAI) -> None: + response = client.providers.anthropic.keys.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: GradientAI) -> None: + with client.providers.anthropic.keys.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + key = client.providers.anthropic.keys.retrieve( + "api_key_uuid", + ) + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.providers.anthropic.keys.with_raw_response.retrieve( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.providers.anthropic.keys.with_streaming_response.retrieve( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + client.providers.anthropic.keys.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: GradientAI) -> None: + key = client.providers.anthropic.keys.update( + path_api_key_uuid="api_key_uuid", + ) + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params(self, client: GradientAI) -> None: + key = client.providers.anthropic.keys.update( + path_api_key_uuid="api_key_uuid", + api_key="api_key", + body_api_key_uuid="api_key_uuid", + name="name", + ) + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: GradientAI) -> None: + response = client.providers.anthropic.keys.with_raw_response.update( + path_api_key_uuid="api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: GradientAI) -> None: + with client.providers.anthropic.keys.with_streaming_response.update( + path_api_key_uuid="api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): + client.providers.anthropic.keys.with_raw_response.update( + path_api_key_uuid="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + key = client.providers.anthropic.keys.list() + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + key = client.providers.anthropic.keys.list( + page=0, + per_page=0, + ) + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.providers.anthropic.keys.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.providers.anthropic.keys.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyListResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: GradientAI) -> None: + key = client.providers.anthropic.keys.delete( + "api_key_uuid", + ) + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: GradientAI) -> None: + response = client.providers.anthropic.keys.with_raw_response.delete( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: GradientAI) -> None: + with client.providers.anthropic.keys.with_streaming_response.delete( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + client.providers.anthropic.keys.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list_agents(self, client: GradientAI) -> None: + key = client.providers.anthropic.keys.list_agents( + uuid="uuid", + ) + assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_agents_with_all_params(self, client: GradientAI) -> None: + key = client.providers.anthropic.keys.list_agents( + uuid="uuid", + page=0, + per_page=0, + ) + assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list_agents(self, client: GradientAI) -> None: + response = client.providers.anthropic.keys.with_raw_response.list_agents( + uuid="uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list_agents(self, client: GradientAI) -> None: + with client.providers.anthropic.keys.with_streaming_response.list_agents( + uuid="uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_list_agents(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + client.providers.anthropic.keys.with_raw_response.list_agents( + uuid="", + ) + + +class TestAsyncKeys: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.anthropic.keys.create() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.anthropic.keys.create( + api_key="api_key", + name="name", + ) + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + response = await async_client.providers.anthropic.keys.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async with async_client.providers.anthropic.keys.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.anthropic.keys.retrieve( + "api_key_uuid", + ) + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.providers.anthropic.keys.with_raw_response.retrieve( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.providers.anthropic.keys.with_streaming_response.retrieve( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + await async_client.providers.anthropic.keys.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.anthropic.keys.update( + path_api_key_uuid="api_key_uuid", + ) + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.anthropic.keys.update( + path_api_key_uuid="api_key_uuid", + api_key="api_key", + body_api_key_uuid="api_key_uuid", + name="name", + ) + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + response = await async_client.providers.anthropic.keys.with_raw_response.update( + path_api_key_uuid="api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async with async_client.providers.anthropic.keys.with_streaming_response.update( + path_api_key_uuid="api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): + await async_client.providers.anthropic.keys.with_raw_response.update( + path_api_key_uuid="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.anthropic.keys.list() + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.anthropic.keys.list( + page=0, + per_page=0, + ) + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.providers.anthropic.keys.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.providers.anthropic.keys.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyListResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.anthropic.keys.delete( + "api_key_uuid", + ) + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + response = await async_client.providers.anthropic.keys.with_raw_response.delete( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async with async_client.providers.anthropic.keys.with_streaming_response.delete( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + await async_client.providers.anthropic.keys.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list_agents(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.anthropic.keys.list_agents( + uuid="uuid", + ) + assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_agents_with_all_params(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.anthropic.keys.list_agents( + uuid="uuid", + page=0, + per_page=0, + ) + assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list_agents(self, async_client: AsyncGradientAI) -> None: + response = await async_client.providers.anthropic.keys.with_raw_response.list_agents( + uuid="uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list_agents(self, async_client: AsyncGradientAI) -> None: + async with async_client.providers.anthropic.keys.with_streaming_response.list_agents( + uuid="uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyListAgentsResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_list_agents(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + await async_client.providers.anthropic.keys.with_raw_response.list_agents( + uuid="", + ) diff --git a/tests/api_resources/providers/openai/__init__.py b/tests/api_resources/providers/openai/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/providers/openai/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/providers/openai/test_keys.py b/tests/api_resources/providers/openai/test_keys.py new file mode 100644 index 00000000..1bb270b1 --- /dev/null +++ b/tests/api_resources/providers/openai/test_keys.py @@ -0,0 +1,555 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.providers.openai import ( + KeyListResponse, + KeyCreateResponse, + KeyDeleteResponse, + KeyUpdateResponse, + KeyRetrieveResponse, + KeyRetrieveAgentsResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestKeys: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: GradientAI) -> None: + key = client.providers.openai.keys.create() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: GradientAI) -> None: + key = client.providers.openai.keys.create( + api_key="api_key", + name="name", + ) + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: GradientAI) -> None: + response = client.providers.openai.keys.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: GradientAI) -> None: + with client.providers.openai.keys.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + key = client.providers.openai.keys.retrieve( + "api_key_uuid", + ) + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.providers.openai.keys.with_raw_response.retrieve( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.providers.openai.keys.with_streaming_response.retrieve( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + client.providers.openai.keys.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: GradientAI) -> None: + key = client.providers.openai.keys.update( + path_api_key_uuid="api_key_uuid", + ) + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params(self, client: GradientAI) -> None: + key = client.providers.openai.keys.update( + path_api_key_uuid="api_key_uuid", + api_key="api_key", + body_api_key_uuid="api_key_uuid", + name="name", + ) + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: GradientAI) -> None: + response = client.providers.openai.keys.with_raw_response.update( + path_api_key_uuid="api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: GradientAI) -> None: + with client.providers.openai.keys.with_streaming_response.update( + path_api_key_uuid="api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): + client.providers.openai.keys.with_raw_response.update( + path_api_key_uuid="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + key = client.providers.openai.keys.list() + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + key = client.providers.openai.keys.list( + page=0, + per_page=0, + ) + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.providers.openai.keys.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.providers.openai.keys.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyListResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: GradientAI) -> None: + key = client.providers.openai.keys.delete( + "api_key_uuid", + ) + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: GradientAI) -> None: + response = client.providers.openai.keys.with_raw_response.delete( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: GradientAI) -> None: + with client.providers.openai.keys.with_streaming_response.delete( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + client.providers.openai.keys.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve_agents(self, client: GradientAI) -> None: + key = client.providers.openai.keys.retrieve_agents( + uuid="uuid", + ) + assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve_agents_with_all_params(self, client: GradientAI) -> None: + key = client.providers.openai.keys.retrieve_agents( + uuid="uuid", + page=0, + per_page=0, + ) + assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve_agents(self, client: GradientAI) -> None: + response = client.providers.openai.keys.with_raw_response.retrieve_agents( + uuid="uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve_agents(self, client: GradientAI) -> None: + with client.providers.openai.keys.with_streaming_response.retrieve_agents( + uuid="uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve_agents(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + client.providers.openai.keys.with_raw_response.retrieve_agents( + uuid="", + ) + + +class TestAsyncKeys: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.openai.keys.create() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.openai.keys.create( + api_key="api_key", + name="name", + ) + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + response = await async_client.providers.openai.keys.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async with async_client.providers.openai.keys.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.openai.keys.retrieve( + "api_key_uuid", + ) + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.providers.openai.keys.with_raw_response.retrieve( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.providers.openai.keys.with_streaming_response.retrieve( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + await async_client.providers.openai.keys.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.openai.keys.update( + path_api_key_uuid="api_key_uuid", + ) + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.openai.keys.update( + path_api_key_uuid="api_key_uuid", + api_key="api_key", + body_api_key_uuid="api_key_uuid", + name="name", + ) + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + response = await async_client.providers.openai.keys.with_raw_response.update( + path_api_key_uuid="api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async with async_client.providers.openai.keys.with_streaming_response.update( + path_api_key_uuid="api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): + await async_client.providers.openai.keys.with_raw_response.update( + path_api_key_uuid="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.openai.keys.list() + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.openai.keys.list( + page=0, + per_page=0, + ) + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.providers.openai.keys.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.providers.openai.keys.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyListResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.openai.keys.delete( + "api_key_uuid", + ) + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + response = await async_client.providers.openai.keys.with_raw_response.delete( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async with async_client.providers.openai.keys.with_streaming_response.delete( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyDeleteResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + await async_client.providers.openai.keys.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve_agents(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.openai.keys.retrieve_agents( + uuid="uuid", + ) + assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve_agents_with_all_params(self, async_client: AsyncGradientAI) -> None: + key = await async_client.providers.openai.keys.retrieve_agents( + uuid="uuid", + page=0, + per_page=0, + ) + assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve_agents(self, async_client: AsyncGradientAI) -> None: + response = await async_client.providers.openai.keys.with_raw_response.retrieve_agents( + uuid="uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve_agents(self, async_client: AsyncGradientAI) -> None: + async with async_client.providers.openai.keys.with_streaming_response.retrieve_agents( + uuid="uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve_agents(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + await async_client.providers.openai.keys.with_raw_response.retrieve_agents( + uuid="", + ) diff --git a/tests/api_resources/test_agents.py b/tests/api_resources/test_agents.py index d88d4791..f39ac4d5 100644 --- a/tests/api_resources/test_agents.py +++ b/tests/api_resources/test_agents.py @@ -9,7 +9,14 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types import AgentListResponse, AgentCreateResponse +from gradientai.types import ( + AgentListResponse, + AgentCreateResponse, + AgentDeleteResponse, + AgentUpdateResponse, + AgentRetrieveResponse, + AgentUpdateStatusResponse, +) base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -62,6 +69,113 @@ def test_streaming_response_create(self, client: GradientAI) -> None: assert cast(Any, response.is_closed) is True + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + agent = client.agents.retrieve( + "uuid", + ) + assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.agents.with_raw_response.retrieve( + "uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + agent = response.parse() + assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.agents.with_streaming_response.retrieve( + "uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + agent = response.parse() + assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + client.agents.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: GradientAI) -> None: + agent = client.agents.update( + path_uuid="uuid", + ) + assert_matches_type(AgentUpdateResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params(self, client: GradientAI) -> None: + agent = client.agents.update( + path_uuid="uuid", + anthropic_key_uuid="anthropic_key_uuid", + description="description", + instruction="instruction", + k=0, + max_tokens=0, + model_uuid="model_uuid", + name="name", + openai_key_uuid="open_ai_key_uuid", + project_id="project_id", + provide_citations=True, + retrieval_method="RETRIEVAL_METHOD_UNKNOWN", + tags=["string"], + temperature=0, + top_p=0, + body_uuid="uuid", + ) + assert_matches_type(AgentUpdateResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: GradientAI) -> None: + response = client.agents.with_raw_response.update( + path_uuid="uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + agent = response.parse() + assert_matches_type(AgentUpdateResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: GradientAI) -> None: + with client.agents.with_streaming_response.update( + path_uuid="uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + agent = response.parse() + assert_matches_type(AgentUpdateResponse, agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): + client.agents.with_raw_response.update( + path_uuid="", + ) + @pytest.mark.skip() @parametrize def test_method_list(self, client: GradientAI) -> None: @@ -100,6 +214,100 @@ def test_streaming_response_list(self, client: GradientAI) -> None: assert cast(Any, response.is_closed) is True + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: GradientAI) -> None: + agent = client.agents.delete( + "uuid", + ) + assert_matches_type(AgentDeleteResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: GradientAI) -> None: + response = client.agents.with_raw_response.delete( + "uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + agent = response.parse() + assert_matches_type(AgentDeleteResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: GradientAI) -> None: + with client.agents.with_streaming_response.delete( + "uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + agent = response.parse() + assert_matches_type(AgentDeleteResponse, agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + client.agents.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update_status(self, client: GradientAI) -> None: + agent = client.agents.update_status( + path_uuid="uuid", + ) + assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_status_with_all_params(self, client: GradientAI) -> None: + agent = client.agents.update_status( + path_uuid="uuid", + body_uuid="uuid", + visibility="VISIBILITY_UNKNOWN", + ) + assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update_status(self, client: GradientAI) -> None: + response = client.agents.with_raw_response.update_status( + path_uuid="uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + agent = response.parse() + assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update_status(self, client: GradientAI) -> None: + with client.agents.with_streaming_response.update_status( + path_uuid="uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + agent = response.parse() + assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update_status(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): + client.agents.with_raw_response.update_status( + path_uuid="", + ) + class TestAsyncAgents: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @@ -149,6 +357,113 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> assert cast(Any, response.is_closed) is True + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + agent = await async_client.agents.retrieve( + "uuid", + ) + assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.with_raw_response.retrieve( + "uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + agent = await response.parse() + assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.with_streaming_response.retrieve( + "uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + agent = await response.parse() + assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + await async_client.agents.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncGradientAI) -> None: + agent = await async_client.agents.update( + path_uuid="uuid", + ) + assert_matches_type(AgentUpdateResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: + agent = await async_client.agents.update( + path_uuid="uuid", + anthropic_key_uuid="anthropic_key_uuid", + description="description", + instruction="instruction", + k=0, + max_tokens=0, + model_uuid="model_uuid", + name="name", + openai_key_uuid="open_ai_key_uuid", + project_id="project_id", + provide_citations=True, + retrieval_method="RETRIEVAL_METHOD_UNKNOWN", + tags=["string"], + temperature=0, + top_p=0, + body_uuid="uuid", + ) + assert_matches_type(AgentUpdateResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.with_raw_response.update( + path_uuid="uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + agent = await response.parse() + assert_matches_type(AgentUpdateResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.with_streaming_response.update( + path_uuid="uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + agent = await response.parse() + assert_matches_type(AgentUpdateResponse, agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): + await async_client.agents.with_raw_response.update( + path_uuid="", + ) + @pytest.mark.skip() @parametrize async def test_method_list(self, async_client: AsyncGradientAI) -> None: @@ -186,3 +501,97 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N assert_matches_type(AgentListResponse, agent, path=["response"]) assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + agent = await async_client.agents.delete( + "uuid", + ) + assert_matches_type(AgentDeleteResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.with_raw_response.delete( + "uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + agent = await response.parse() + assert_matches_type(AgentDeleteResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.with_streaming_response.delete( + "uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + agent = await response.parse() + assert_matches_type(AgentDeleteResponse, agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + await async_client.agents.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update_status(self, async_client: AsyncGradientAI) -> None: + agent = await async_client.agents.update_status( + path_uuid="uuid", + ) + assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_status_with_all_params(self, async_client: AsyncGradientAI) -> None: + agent = await async_client.agents.update_status( + path_uuid="uuid", + body_uuid="uuid", + visibility="VISIBILITY_UNKNOWN", + ) + assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update_status(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.with_raw_response.update_status( + path_uuid="uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + agent = await response.parse() + assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update_status(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.with_streaming_response.update_status( + path_uuid="uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + agent = await response.parse() + assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update_status(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): + await async_client.agents.with_raw_response.update_status( + path_uuid="", + ) diff --git a/tests/api_resources/test_api_keys.py b/tests/api_resources/test_api_keys.py new file mode 100644 index 00000000..fa1895c9 --- /dev/null +++ b/tests/api_resources/test_api_keys.py @@ -0,0 +1,100 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types import APIKeyListResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestAPIKeys: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + api_key = client.api_keys.list() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + api_key = client.api_keys.list( + page=0, + per_page=0, + public_only=True, + usecases=["MODEL_USECASE_UNKNOWN"], + ) + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.api_keys.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = response.parse() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.api_keys.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = response.parse() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncAPIKeys: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + api_key = await async_client.api_keys.list() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + api_key = await async_client.api_keys.list( + page=0, + per_page=0, + public_only=True, + usecases=["MODEL_USECASE_UNKNOWN"], + ) + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.api_keys.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = await response.parse() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.api_keys.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = await response.parse() + assert_matches_type(APIKeyListResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_knowledge_bases.py b/tests/api_resources/test_knowledge_bases.py index bf761cf2..e204f9fe 100644 --- a/tests/api_resources/test_knowledge_bases.py +++ b/tests/api_resources/test_knowledge_bases.py @@ -12,6 +12,9 @@ from gradientai.types import ( KnowledgeBaseListResponse, KnowledgeBaseCreateResponse, + KnowledgeBaseDeleteResponse, + KnowledgeBaseUpdateResponse, + KnowledgeBaseRetrieveResponse, ) base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -84,6 +87,104 @@ def test_streaming_response_create(self, client: GradientAI) -> None: assert cast(Any, response.is_closed) is True + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + knowledge_base = client.knowledge_bases.retrieve( + "uuid", + ) + assert_matches_type(KnowledgeBaseRetrieveResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.knowledge_bases.with_raw_response.retrieve( + "uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + knowledge_base = response.parse() + assert_matches_type(KnowledgeBaseRetrieveResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.knowledge_bases.with_streaming_response.retrieve( + "uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + knowledge_base = response.parse() + assert_matches_type(KnowledgeBaseRetrieveResponse, knowledge_base, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + client.knowledge_bases.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: GradientAI) -> None: + knowledge_base = client.knowledge_bases.update( + path_uuid="uuid", + ) + assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params(self, client: GradientAI) -> None: + knowledge_base = client.knowledge_bases.update( + path_uuid="uuid", + database_id="database_id", + embedding_model_uuid="embedding_model_uuid", + name="name", + project_id="project_id", + tags=["string"], + body_uuid="uuid", + ) + assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: GradientAI) -> None: + response = client.knowledge_bases.with_raw_response.update( + path_uuid="uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + knowledge_base = response.parse() + assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: GradientAI) -> None: + with client.knowledge_bases.with_streaming_response.update( + path_uuid="uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + knowledge_base = response.parse() + assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): + client.knowledge_bases.with_raw_response.update( + path_uuid="", + ) + @pytest.mark.skip() @parametrize def test_method_list(self, client: GradientAI) -> None: @@ -121,6 +222,48 @@ def test_streaming_response_list(self, client: GradientAI) -> None: assert cast(Any, response.is_closed) is True + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: GradientAI) -> None: + knowledge_base = client.knowledge_bases.delete( + "uuid", + ) + assert_matches_type(KnowledgeBaseDeleteResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: GradientAI) -> None: + response = client.knowledge_bases.with_raw_response.delete( + "uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + knowledge_base = response.parse() + assert_matches_type(KnowledgeBaseDeleteResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: GradientAI) -> None: + with client.knowledge_bases.with_streaming_response.delete( + "uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + knowledge_base = response.parse() + assert_matches_type(KnowledgeBaseDeleteResponse, knowledge_base, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + client.knowledge_bases.with_raw_response.delete( + "", + ) + class TestAsyncKnowledgeBases: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @@ -189,6 +332,104 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> assert cast(Any, response.is_closed) is True + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + knowledge_base = await async_client.knowledge_bases.retrieve( + "uuid", + ) + assert_matches_type(KnowledgeBaseRetrieveResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.knowledge_bases.with_raw_response.retrieve( + "uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + knowledge_base = await response.parse() + assert_matches_type(KnowledgeBaseRetrieveResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.knowledge_bases.with_streaming_response.retrieve( + "uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + knowledge_base = await response.parse() + assert_matches_type(KnowledgeBaseRetrieveResponse, knowledge_base, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + await async_client.knowledge_bases.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncGradientAI) -> None: + knowledge_base = await async_client.knowledge_bases.update( + path_uuid="uuid", + ) + assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: + knowledge_base = await async_client.knowledge_bases.update( + path_uuid="uuid", + database_id="database_id", + embedding_model_uuid="embedding_model_uuid", + name="name", + project_id="project_id", + tags=["string"], + body_uuid="uuid", + ) + assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + response = await async_client.knowledge_bases.with_raw_response.update( + path_uuid="uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + knowledge_base = await response.parse() + assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async with async_client.knowledge_bases.with_streaming_response.update( + path_uuid="uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + knowledge_base = await response.parse() + assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): + await async_client.knowledge_bases.with_raw_response.update( + path_uuid="", + ) + @pytest.mark.skip() @parametrize async def test_method_list(self, async_client: AsyncGradientAI) -> None: @@ -225,3 +466,45 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N assert_matches_type(KnowledgeBaseListResponse, knowledge_base, path=["response"]) assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + knowledge_base = await async_client.knowledge_bases.delete( + "uuid", + ) + assert_matches_type(KnowledgeBaseDeleteResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + response = await async_client.knowledge_bases.with_raw_response.delete( + "uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + knowledge_base = await response.parse() + assert_matches_type(KnowledgeBaseDeleteResponse, knowledge_base, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async with async_client.knowledge_bases.with_streaming_response.delete( + "uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + knowledge_base = await response.parse() + assert_matches_type(KnowledgeBaseDeleteResponse, knowledge_base, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): + await async_client.knowledge_bases.with_raw_response.delete( + "", + ) diff --git a/tests/api_resources/test_regions.py b/tests/api_resources/test_regions.py new file mode 100644 index 00000000..64c84612 --- /dev/null +++ b/tests/api_resources/test_regions.py @@ -0,0 +1,96 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types import RegionListResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestRegions: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + region = client.regions.list() + assert_matches_type(RegionListResponse, region, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + region = client.regions.list( + serves_batch=True, + serves_inference=True, + ) + assert_matches_type(RegionListResponse, region, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.regions.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + region = response.parse() + assert_matches_type(RegionListResponse, region, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.regions.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + region = response.parse() + assert_matches_type(RegionListResponse, region, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncRegions: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + region = await async_client.regions.list() + assert_matches_type(RegionListResponse, region, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + region = await async_client.regions.list( + serves_batch=True, + serves_inference=True, + ) + assert_matches_type(RegionListResponse, region, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.regions.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + region = await response.parse() + assert_matches_type(RegionListResponse, region, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.regions.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + region = await response.parse() + assert_matches_type(RegionListResponse, region, path=["response"]) + + assert cast(Any, response.is_closed) is True From ed595b0a23df125ffba733d7339e771997c3f149 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 16 Jun 2025 05:43:32 +0000 Subject: [PATCH 11/41] chore: update SDK settings --- .github/workflows/create-releases.yml | 38 ------------------- .github/workflows/publish-pypi.yml | 8 +++- .github/workflows/release-doctor.yml | 3 +- .stats.yml | 2 +- CONTRIBUTING.md | 4 +- README.md | 6 +-- bin/check-release-environment | 4 -- pyproject.toml | 6 +-- src/gradientai/resources/agents/agents.py | 8 ++-- src/gradientai/resources/agents/api_keys.py | 8 ++-- .../resources/agents/child_agents.py | 8 ++-- src/gradientai/resources/agents/functions.py | 8 ++-- .../resources/agents/knowledge_bases.py | 8 ++-- src/gradientai/resources/agents/versions.py | 8 ++-- src/gradientai/resources/api_keys/api_keys.py | 8 ++-- .../resources/api_keys/api_keys_.py | 8 ++-- .../resources/auth/agents/agents.py | 8 ++-- src/gradientai/resources/auth/agents/token.py | 8 ++-- src/gradientai/resources/auth/auth.py | 8 ++-- src/gradientai/resources/chat.py | 8 ++-- src/gradientai/resources/embeddings.py | 8 ++-- src/gradientai/resources/indexing_jobs.py | 8 ++-- .../resources/knowledge_bases/data_sources.py | 8 ++-- .../knowledge_bases/knowledge_bases.py | 8 ++-- src/gradientai/resources/models.py | 8 ++-- .../providers/anthropic/anthropic.py | 8 ++-- .../resources/providers/anthropic/keys.py | 8 ++-- .../resources/providers/openai/keys.py | 8 ++-- .../resources/providers/openai/openai.py | 8 ++-- .../resources/providers/providers.py | 8 ++-- src/gradientai/resources/regions.py | 8 ++-- 31 files changed, 108 insertions(+), 147 deletions(-) delete mode 100644 .github/workflows/create-releases.yml diff --git a/.github/workflows/create-releases.yml b/.github/workflows/create-releases.yml deleted file mode 100644 index 04dac49f..00000000 --- a/.github/workflows/create-releases.yml +++ /dev/null @@ -1,38 +0,0 @@ -name: Create releases -on: - schedule: - - cron: '0 5 * * *' # every day at 5am UTC - push: - branches: - - main - -jobs: - release: - name: release - if: github.ref == 'refs/heads/main' && github.repository == 'digitalocean/gradientai-python' - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v4 - - - uses: stainless-api/trigger-release-please@v1 - id: release - with: - repo: ${{ github.event.repository.full_name }} - stainless-api-key: ${{ secrets.STAINLESS_API_KEY }} - - - name: Install Rye - if: ${{ steps.release.outputs.releases_created }} - run: | - curl -sSf https://rye.astral.sh/get | bash - echo "$HOME/.rye/shims" >> $GITHUB_PATH - env: - RYE_VERSION: '0.44.0' - RYE_INSTALL_OPTION: '--yes' - - - name: Publish to PyPI - if: ${{ steps.release.outputs.releases_created }} - run: | - bash ./bin/publish-pypi - env: - PYPI_TOKEN: ${{ secrets.GRADIENT_AI_PYPI_TOKEN || secrets.PYPI_TOKEN }} diff --git a/.github/workflows/publish-pypi.yml b/.github/workflows/publish-pypi.yml index bff3a970..34110cd4 100644 --- a/.github/workflows/publish-pypi.yml +++ b/.github/workflows/publish-pypi.yml @@ -1,9 +1,13 @@ -# workflow for re-running publishing to PyPI in case it fails for some reason -# you can run this workflow by navigating to https://www.github.com/digitalocean/gradientai-python/actions/workflows/publish-pypi.yml +# This workflow is triggered when a GitHub release is created. +# It can also be run manually to re-publish to PyPI in case it failed for some reason. +# You can run this workflow by navigating to https://www.github.com/digitalocean/genai-python/actions/workflows/publish-pypi.yml name: Publish PyPI on: workflow_dispatch: + release: + types: [published] + jobs: publish: name: publish diff --git a/.github/workflows/release-doctor.yml b/.github/workflows/release-doctor.yml index 94e02117..9845ae8d 100644 --- a/.github/workflows/release-doctor.yml +++ b/.github/workflows/release-doctor.yml @@ -9,7 +9,7 @@ jobs: release_doctor: name: release doctor runs-on: ubuntu-latest - if: github.repository == 'digitalocean/gradientai-python' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch' || startsWith(github.head_ref, 'release-please') || github.head_ref == 'next') + if: github.repository == 'digitalocean/genai-python' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch' || startsWith(github.head_ref, 'release-please') || github.head_ref == 'next') steps: - uses: actions/checkout@v4 @@ -18,5 +18,4 @@ jobs: run: | bash ./bin/check-release-environment env: - STAINLESS_API_KEY: ${{ secrets.STAINLESS_API_KEY }} PYPI_TOKEN: ${{ secrets.GRADIENT_AI_PYPI_TOKEN || secrets.PYPI_TOKEN }} diff --git a/.stats.yml b/.stats.yml index 74cbd5c9..54f59bb8 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 60 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-6e449984986e066baea73af5c2726811e74a284f0d68d49926ec5c7821c7ed31.yml openapi_spec_hash: 78f43f68f46df0d81891ae2ff66bf3a0 -config_hash: 69dc66269416b2e01e8852b5a6788b97 +config_hash: 53eac5170a4d8967367b33767544a858 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 086907ef..fe7e0d7c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -62,7 +62,7 @@ If you’d like to use the repository from source, you can either install from g To install via git: ```sh -$ pip install git+ssh://git@github.com/digitalocean/gradientai-python.git +$ pip install git+ssh://git@github.com/digitalocean/genai-python.git ``` Alternatively, you can build from source and install the wheel file: @@ -120,7 +120,7 @@ the changes aren't made through the automated pipeline, you may want to make rel ### Publish with a GitHub workflow -You can release to package managers by using [the `Publish PyPI` GitHub action](https://www.github.com/digitalocean/gradientai-python/actions/workflows/publish-pypi.yml). This requires a setup organization or repository secret to be set up. +You can release to package managers by using [the `Publish PyPI` GitHub action](https://www.github.com/digitalocean/genai-python/actions/workflows/publish-pypi.yml). This requires a setup organization or repository secret to be set up. ### Publish manually diff --git a/README.md b/README.md index 10236f18..bf235be1 100644 --- a/README.md +++ b/README.md @@ -235,9 +235,9 @@ version = response.parse() # get the object that `agents.versions.list()` would print(version.agent_versions) ``` -These methods return an [`APIResponse`](https://github.com/digitalocean/gradientai-python/tree/main/src/gradientai/_response.py) object. +These methods return an [`APIResponse`](https://github.com/digitalocean/genai-python/tree/main/src/gradientai/_response.py) object. -The async client returns an [`AsyncAPIResponse`](https://github.com/digitalocean/gradientai-python/tree/main/src/gradientai/_response.py) with the same structure, the only difference being `await`able methods for reading the response content. +The async client returns an [`AsyncAPIResponse`](https://github.com/digitalocean/genai-python/tree/main/src/gradientai/_response.py) with the same structure, the only difference being `await`able methods for reading the response content. #### `.with_streaming_response` @@ -343,7 +343,7 @@ This package generally follows [SemVer](https://semver.org/spec/v2.0.0.html) con We take backwards-compatibility seriously and work hard to ensure you can rely on a smooth upgrade experience. -We are keen for your feedback; please open an [issue](https://www.github.com/digitalocean/gradientai-python/issues) with questions, bugs, or suggestions. +We are keen for your feedback; please open an [issue](https://www.github.com/digitalocean/genai-python/issues) with questions, bugs, or suggestions. ### Determining the installed version diff --git a/bin/check-release-environment b/bin/check-release-environment index 78967e8b..b1bd8969 100644 --- a/bin/check-release-environment +++ b/bin/check-release-environment @@ -2,10 +2,6 @@ errors=() -if [ -z "${STAINLESS_API_KEY}" ]; then - errors+=("The STAINLESS_API_KEY secret has not been set. Please contact Stainless for an API key & set it in your organization secrets on GitHub.") -fi - if [ -z "${PYPI_TOKEN}" ]; then errors+=("The GRADIENT_AI_PYPI_TOKEN secret has not been set. Please set it in either this repository's secrets or your organization secrets.") fi diff --git a/pyproject.toml b/pyproject.toml index 9c6fdd19..8f36a952 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -34,8 +34,8 @@ classifiers = [ ] [project.urls] -Homepage = "https://github.com/digitalocean/gradientai-python" -Repository = "https://github.com/digitalocean/gradientai-python" +Homepage = "https://github.com/digitalocean/genai-python" +Repository = "https://github.com/digitalocean/genai-python" [tool.rye] @@ -122,7 +122,7 @@ path = "README.md" [[tool.hatch.metadata.hooks.fancy-pypi-readme.substitutions]] # replace relative links with absolute links pattern = '\[(.+?)\]\(((?!https?://)\S+?)\)' -replacement = '[\1](https://github.com/digitalocean/gradientai-python/tree/main/\g<2>)' +replacement = '[\1](https://github.com/digitalocean/genai-python/tree/main/\g<2>)' [tool.pytest.ini_options] testpaths = ["tests"] diff --git a/src/gradientai/resources/agents/agents.py b/src/gradientai/resources/agents/agents.py index 87e2aeca..036abf75 100644 --- a/src/gradientai/resources/agents/agents.py +++ b/src/gradientai/resources/agents/agents.py @@ -104,7 +104,7 @@ def with_raw_response(self) -> AgentsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AgentsResourceWithRawResponse(self) @@ -113,7 +113,7 @@ def with_streaming_response(self) -> AgentsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AgentsResourceWithStreamingResponse(self) @@ -460,7 +460,7 @@ def with_raw_response(self) -> AsyncAgentsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncAgentsResourceWithRawResponse(self) @@ -469,7 +469,7 @@ def with_streaming_response(self) -> AsyncAgentsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncAgentsResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/agents/api_keys.py b/src/gradientai/resources/agents/api_keys.py index 7180503f..4470850c 100644 --- a/src/gradientai/resources/agents/api_keys.py +++ b/src/gradientai/resources/agents/api_keys.py @@ -32,7 +32,7 @@ def with_raw_response(self) -> APIKeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return APIKeysResourceWithRawResponse(self) @@ -41,7 +41,7 @@ def with_streaming_response(self) -> APIKeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return APIKeysResourceWithStreamingResponse(self) @@ -268,7 +268,7 @@ def with_raw_response(self) -> AsyncAPIKeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncAPIKeysResourceWithRawResponse(self) @@ -277,7 +277,7 @@ def with_streaming_response(self) -> AsyncAPIKeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncAPIKeysResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/agents/child_agents.py b/src/gradientai/resources/agents/child_agents.py index 1f7fe3ce..163e52cf 100644 --- a/src/gradientai/resources/agents/child_agents.py +++ b/src/gradientai/resources/agents/child_agents.py @@ -31,7 +31,7 @@ def with_raw_response(self) -> ChildAgentsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return ChildAgentsResourceWithRawResponse(self) @@ -40,7 +40,7 @@ def with_streaming_response(self) -> ChildAgentsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return ChildAgentsResourceWithStreamingResponse(self) @@ -237,7 +237,7 @@ def with_raw_response(self) -> AsyncChildAgentsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncChildAgentsResourceWithRawResponse(self) @@ -246,7 +246,7 @@ def with_streaming_response(self) -> AsyncChildAgentsResourceWithStreamingRespon """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncChildAgentsResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/agents/functions.py b/src/gradientai/resources/agents/functions.py index 6de9b141..19c63d8c 100644 --- a/src/gradientai/resources/agents/functions.py +++ b/src/gradientai/resources/agents/functions.py @@ -30,7 +30,7 @@ def with_raw_response(self) -> FunctionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return FunctionsResourceWithRawResponse(self) @@ -39,7 +39,7 @@ def with_streaming_response(self) -> FunctionsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return FunctionsResourceWithStreamingResponse(self) @@ -199,7 +199,7 @@ def with_raw_response(self) -> AsyncFunctionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncFunctionsResourceWithRawResponse(self) @@ -208,7 +208,7 @@ def with_streaming_response(self) -> AsyncFunctionsResourceWithStreamingResponse """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncFunctionsResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/agents/knowledge_bases.py b/src/gradientai/resources/agents/knowledge_bases.py index 97b086e0..a400c56a 100644 --- a/src/gradientai/resources/agents/knowledge_bases.py +++ b/src/gradientai/resources/agents/knowledge_bases.py @@ -27,7 +27,7 @@ def with_raw_response(self) -> KnowledgeBasesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return KnowledgeBasesResourceWithRawResponse(self) @@ -36,7 +36,7 @@ def with_streaming_response(self) -> KnowledgeBasesResourceWithStreamingResponse """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return KnowledgeBasesResourceWithStreamingResponse(self) @@ -160,7 +160,7 @@ def with_raw_response(self) -> AsyncKnowledgeBasesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncKnowledgeBasesResourceWithRawResponse(self) @@ -169,7 +169,7 @@ def with_streaming_response(self) -> AsyncKnowledgeBasesResourceWithStreamingRes """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncKnowledgeBasesResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/agents/versions.py b/src/gradientai/resources/agents/versions.py index d71da8df..e77a252b 100644 --- a/src/gradientai/resources/agents/versions.py +++ b/src/gradientai/resources/agents/versions.py @@ -29,7 +29,7 @@ def with_raw_response(self) -> VersionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return VersionsResourceWithRawResponse(self) @@ -38,7 +38,7 @@ def with_streaming_response(self) -> VersionsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return VersionsResourceWithStreamingResponse(self) @@ -143,7 +143,7 @@ def with_raw_response(self) -> AsyncVersionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncVersionsResourceWithRawResponse(self) @@ -152,7 +152,7 @@ def with_streaming_response(self) -> AsyncVersionsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncVersionsResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/api_keys/api_keys.py b/src/gradientai/resources/api_keys/api_keys.py index aecccfc3..355cea17 100644 --- a/src/gradientai/resources/api_keys/api_keys.py +++ b/src/gradientai/resources/api_keys/api_keys.py @@ -36,7 +36,7 @@ def with_raw_response(self) -> APIKeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return APIKeysResourceWithRawResponse(self) @@ -45,7 +45,7 @@ def with_streaming_response(self) -> APIKeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return APIKeysResourceWithStreamingResponse(self) @@ -135,7 +135,7 @@ def with_raw_response(self) -> AsyncAPIKeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncAPIKeysResourceWithRawResponse(self) @@ -144,7 +144,7 @@ def with_streaming_response(self) -> AsyncAPIKeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncAPIKeysResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/api_keys/api_keys_.py b/src/gradientai/resources/api_keys/api_keys_.py index 969bcfb9..03d70150 100644 --- a/src/gradientai/resources/api_keys/api_keys_.py +++ b/src/gradientai/resources/api_keys/api_keys_.py @@ -32,7 +32,7 @@ def with_raw_response(self) -> APIKeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return APIKeysResourceWithRawResponse(self) @@ -41,7 +41,7 @@ def with_streaming_response(self) -> APIKeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return APIKeysResourceWithStreamingResponse(self) @@ -242,7 +242,7 @@ def with_raw_response(self) -> AsyncAPIKeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncAPIKeysResourceWithRawResponse(self) @@ -251,7 +251,7 @@ def with_streaming_response(self) -> AsyncAPIKeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncAPIKeysResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/auth/agents/agents.py b/src/gradientai/resources/auth/agents/agents.py index 52426560..a0aa9faf 100644 --- a/src/gradientai/resources/auth/agents/agents.py +++ b/src/gradientai/resources/auth/agents/agents.py @@ -27,7 +27,7 @@ def with_raw_response(self) -> AgentsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AgentsResourceWithRawResponse(self) @@ -36,7 +36,7 @@ def with_streaming_response(self) -> AgentsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AgentsResourceWithStreamingResponse(self) @@ -52,7 +52,7 @@ def with_raw_response(self) -> AsyncAgentsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncAgentsResourceWithRawResponse(self) @@ -61,7 +61,7 @@ def with_streaming_response(self) -> AsyncAgentsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncAgentsResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/auth/agents/token.py b/src/gradientai/resources/auth/agents/token.py index 26de7c06..f39c892d 100644 --- a/src/gradientai/resources/auth/agents/token.py +++ b/src/gradientai/resources/auth/agents/token.py @@ -28,7 +28,7 @@ def with_raw_response(self) -> TokenResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return TokenResourceWithRawResponse(self) @@ -37,7 +37,7 @@ def with_streaming_response(self) -> TokenResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return TokenResourceWithStreamingResponse(self) @@ -85,7 +85,7 @@ def with_raw_response(self) -> AsyncTokenResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncTokenResourceWithRawResponse(self) @@ -94,7 +94,7 @@ def with_streaming_response(self) -> AsyncTokenResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncTokenResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/auth/auth.py b/src/gradientai/resources/auth/auth.py index 7a502a2c..985fc56c 100644 --- a/src/gradientai/resources/auth/auth.py +++ b/src/gradientai/resources/auth/auth.py @@ -27,7 +27,7 @@ def with_raw_response(self) -> AuthResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AuthResourceWithRawResponse(self) @@ -36,7 +36,7 @@ def with_streaming_response(self) -> AuthResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AuthResourceWithStreamingResponse(self) @@ -52,7 +52,7 @@ def with_raw_response(self) -> AsyncAuthResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncAuthResourceWithRawResponse(self) @@ -61,7 +61,7 @@ def with_streaming_response(self) -> AsyncAuthResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncAuthResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/chat.py b/src/gradientai/resources/chat.py index 223e7cf3..518fbad8 100644 --- a/src/gradientai/resources/chat.py +++ b/src/gradientai/resources/chat.py @@ -30,7 +30,7 @@ def with_raw_response(self) -> ChatResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return ChatResourceWithRawResponse(self) @@ -39,7 +39,7 @@ def with_streaming_response(self) -> ChatResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return ChatResourceWithStreamingResponse(self) @@ -191,7 +191,7 @@ def with_raw_response(self) -> AsyncChatResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncChatResourceWithRawResponse(self) @@ -200,7 +200,7 @@ def with_streaming_response(self) -> AsyncChatResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncChatResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/embeddings.py b/src/gradientai/resources/embeddings.py index 36ffe3c6..1bcd3145 100644 --- a/src/gradientai/resources/embeddings.py +++ b/src/gradientai/resources/embeddings.py @@ -30,7 +30,7 @@ def with_raw_response(self) -> EmbeddingsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return EmbeddingsResourceWithRawResponse(self) @@ -39,7 +39,7 @@ def with_streaming_response(self) -> EmbeddingsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return EmbeddingsResourceWithStreamingResponse(self) @@ -101,7 +101,7 @@ def with_raw_response(self) -> AsyncEmbeddingsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncEmbeddingsResourceWithRawResponse(self) @@ -110,7 +110,7 @@ def with_streaming_response(self) -> AsyncEmbeddingsResourceWithStreamingRespons """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncEmbeddingsResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/indexing_jobs.py b/src/gradientai/resources/indexing_jobs.py index 6647d36c..d0b933e8 100644 --- a/src/gradientai/resources/indexing_jobs.py +++ b/src/gradientai/resources/indexing_jobs.py @@ -34,7 +34,7 @@ def with_raw_response(self) -> IndexingJobsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return IndexingJobsResourceWithRawResponse(self) @@ -43,7 +43,7 @@ def with_streaming_response(self) -> IndexingJobsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return IndexingJobsResourceWithStreamingResponse(self) @@ -250,7 +250,7 @@ def with_raw_response(self) -> AsyncIndexingJobsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncIndexingJobsResourceWithRawResponse(self) @@ -259,7 +259,7 @@ def with_streaming_response(self) -> AsyncIndexingJobsResourceWithStreamingRespo """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncIndexingJobsResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/knowledge_bases/data_sources.py b/src/gradientai/resources/knowledge_bases/data_sources.py index a1d2c575..68714895 100644 --- a/src/gradientai/resources/knowledge_bases/data_sources.py +++ b/src/gradientai/resources/knowledge_bases/data_sources.py @@ -35,7 +35,7 @@ def with_raw_response(self) -> DataSourcesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return DataSourcesResourceWithRawResponse(self) @@ -44,7 +44,7 @@ def with_streaming_response(self) -> DataSourcesResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return DataSourcesResourceWithStreamingResponse(self) @@ -196,7 +196,7 @@ def with_raw_response(self) -> AsyncDataSourcesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncDataSourcesResourceWithRawResponse(self) @@ -205,7 +205,7 @@ def with_streaming_response(self) -> AsyncDataSourcesResourceWithStreamingRespon """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncDataSourcesResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/knowledge_bases/knowledge_bases.py b/src/gradientai/resources/knowledge_bases/knowledge_bases.py index 7d4f38e3..f73ab08c 100644 --- a/src/gradientai/resources/knowledge_bases/knowledge_bases.py +++ b/src/gradientai/resources/knowledge_bases/knowledge_bases.py @@ -46,7 +46,7 @@ def with_raw_response(self) -> KnowledgeBasesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return KnowledgeBasesResourceWithRawResponse(self) @@ -55,7 +55,7 @@ def with_streaming_response(self) -> KnowledgeBasesResourceWithStreamingResponse """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return KnowledgeBasesResourceWithStreamingResponse(self) @@ -312,7 +312,7 @@ def with_raw_response(self) -> AsyncKnowledgeBasesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncKnowledgeBasesResourceWithRawResponse(self) @@ -321,7 +321,7 @@ def with_streaming_response(self) -> AsyncKnowledgeBasesResourceWithStreamingRes """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncKnowledgeBasesResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/models.py b/src/gradientai/resources/models.py index c30e1135..81b75441 100644 --- a/src/gradientai/resources/models.py +++ b/src/gradientai/resources/models.py @@ -27,7 +27,7 @@ def with_raw_response(self) -> ModelsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return ModelsResourceWithRawResponse(self) @@ -36,7 +36,7 @@ def with_streaming_response(self) -> ModelsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return ModelsResourceWithStreamingResponse(self) @@ -104,7 +104,7 @@ def with_raw_response(self) -> AsyncModelsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncModelsResourceWithRawResponse(self) @@ -113,7 +113,7 @@ def with_streaming_response(self) -> AsyncModelsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncModelsResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/providers/anthropic/anthropic.py b/src/gradientai/resources/providers/anthropic/anthropic.py index 23a914e9..64783563 100644 --- a/src/gradientai/resources/providers/anthropic/anthropic.py +++ b/src/gradientai/resources/providers/anthropic/anthropic.py @@ -27,7 +27,7 @@ def with_raw_response(self) -> AnthropicResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AnthropicResourceWithRawResponse(self) @@ -36,7 +36,7 @@ def with_streaming_response(self) -> AnthropicResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AnthropicResourceWithStreamingResponse(self) @@ -52,7 +52,7 @@ def with_raw_response(self) -> AsyncAnthropicResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncAnthropicResourceWithRawResponse(self) @@ -61,7 +61,7 @@ def with_streaming_response(self) -> AsyncAnthropicResourceWithStreamingResponse """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncAnthropicResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/providers/anthropic/keys.py b/src/gradientai/resources/providers/anthropic/keys.py index 8fbb64db..1b11fc99 100644 --- a/src/gradientai/resources/providers/anthropic/keys.py +++ b/src/gradientai/resources/providers/anthropic/keys.py @@ -33,7 +33,7 @@ def with_raw_response(self) -> KeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return KeysResourceWithRawResponse(self) @@ -42,7 +42,7 @@ def with_streaming_response(self) -> KeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return KeysResourceWithStreamingResponse(self) @@ -303,7 +303,7 @@ def with_raw_response(self) -> AsyncKeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncKeysResourceWithRawResponse(self) @@ -312,7 +312,7 @@ def with_streaming_response(self) -> AsyncKeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncKeysResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/providers/openai/keys.py b/src/gradientai/resources/providers/openai/keys.py index 44ac8508..abcb22f0 100644 --- a/src/gradientai/resources/providers/openai/keys.py +++ b/src/gradientai/resources/providers/openai/keys.py @@ -33,7 +33,7 @@ def with_raw_response(self) -> KeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return KeysResourceWithRawResponse(self) @@ -42,7 +42,7 @@ def with_streaming_response(self) -> KeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return KeysResourceWithStreamingResponse(self) @@ -301,7 +301,7 @@ def with_raw_response(self) -> AsyncKeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncKeysResourceWithRawResponse(self) @@ -310,7 +310,7 @@ def with_streaming_response(self) -> AsyncKeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncKeysResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/providers/openai/openai.py b/src/gradientai/resources/providers/openai/openai.py index b02dc2e1..d29fd062 100644 --- a/src/gradientai/resources/providers/openai/openai.py +++ b/src/gradientai/resources/providers/openai/openai.py @@ -27,7 +27,7 @@ def with_raw_response(self) -> OpenAIResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return OpenAIResourceWithRawResponse(self) @@ -36,7 +36,7 @@ def with_streaming_response(self) -> OpenAIResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return OpenAIResourceWithStreamingResponse(self) @@ -52,7 +52,7 @@ def with_raw_response(self) -> AsyncOpenAIResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncOpenAIResourceWithRawResponse(self) @@ -61,7 +61,7 @@ def with_streaming_response(self) -> AsyncOpenAIResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncOpenAIResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/providers/providers.py b/src/gradientai/resources/providers/providers.py index ef942f73..50e3db1a 100644 --- a/src/gradientai/resources/providers/providers.py +++ b/src/gradientai/resources/providers/providers.py @@ -39,7 +39,7 @@ def with_raw_response(self) -> ProvidersResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return ProvidersResourceWithRawResponse(self) @@ -48,7 +48,7 @@ def with_streaming_response(self) -> ProvidersResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return ProvidersResourceWithStreamingResponse(self) @@ -68,7 +68,7 @@ def with_raw_response(self) -> AsyncProvidersResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncProvidersResourceWithRawResponse(self) @@ -77,7 +77,7 @@ def with_streaming_response(self) -> AsyncProvidersResourceWithStreamingResponse """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncProvidersResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/regions.py b/src/gradientai/resources/regions.py index ad760c24..bbf07c3e 100644 --- a/src/gradientai/resources/regions.py +++ b/src/gradientai/resources/regions.py @@ -28,7 +28,7 @@ def with_raw_response(self) -> RegionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return RegionsResourceWithRawResponse(self) @@ -37,7 +37,7 @@ def with_streaming_response(self) -> RegionsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return RegionsResourceWithStreamingResponse(self) @@ -95,7 +95,7 @@ def with_raw_response(self) -> AsyncRegionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ return AsyncRegionsResourceWithRawResponse(self) @@ -104,7 +104,7 @@ def with_streaming_response(self) -> AsyncRegionsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ return AsyncRegionsResourceWithStreamingResponse(self) From 7ecf66c58a124c153a32055967beacbd1a3bbcf3 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 17 Jun 2025 02:46:26 +0000 Subject: [PATCH 12/41] chore(tests): add tests for httpx client instantiation & proxies --- tests/test_client.py | 46 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/tests/test_client.py b/tests/test_client.py index 59eee2ff..4cf52324 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -30,6 +30,8 @@ DEFAULT_TIMEOUT, HTTPX_DEFAULT_TIMEOUT, BaseClient, + DefaultHttpxClient, + DefaultAsyncHttpxClient, make_request_options, ) @@ -826,6 +828,28 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert response.http_request.headers.get("x-stainless-retry-count") == "42" + def test_proxy_environment_variables(self, monkeypatch: pytest.MonkeyPatch) -> None: + # Test that the proxy environment variables are set correctly + monkeypatch.setenv("HTTPS_PROXY", "https://example.org") + + client = DefaultHttpxClient() + + mounts = tuple(client._mounts.items()) + assert len(mounts) == 1 + assert mounts[0][0].pattern == "https://" + + @pytest.mark.filterwarnings("ignore:.*deprecated.*:DeprecationWarning") + def test_default_client_creation(self) -> None: + # Ensure that the client can be initialized without any exceptions + DefaultHttpxClient( + verify=True, + cert=None, + trust_env=True, + http1=True, + http2=False, + limits=httpx.Limits(max_connections=100, max_keepalive_connections=20), + ) + @pytest.mark.respx(base_url=base_url) def test_follow_redirects(self, respx_mock: MockRouter) -> None: # Test that the default follow_redirects=True allows following redirects @@ -1679,6 +1703,28 @@ async def test_main() -> None: time.sleep(0.1) + async def test_proxy_environment_variables(self, monkeypatch: pytest.MonkeyPatch) -> None: + # Test that the proxy environment variables are set correctly + monkeypatch.setenv("HTTPS_PROXY", "https://example.org") + + client = DefaultAsyncHttpxClient() + + mounts = tuple(client._mounts.items()) + assert len(mounts) == 1 + assert mounts[0][0].pattern == "https://" + + @pytest.mark.filterwarnings("ignore:.*deprecated.*:DeprecationWarning") + async def test_default_client_creation(self) -> None: + # Ensure that the client can be initialized without any exceptions + DefaultAsyncHttpxClient( + verify=True, + cert=None, + trust_env=True, + http1=True, + http2=False, + limits=httpx.Limits(max_connections=100, max_keepalive_connections=20), + ) + @pytest.mark.respx(base_url=base_url) async def test_follow_redirects(self, respx_mock: MockRouter) -> None: # Test that the default follow_redirects=True allows following redirects From 12e210389204ff74f504e1ec3aa5ba99f1b4971c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 17 Jun 2025 04:16:01 +0000 Subject: [PATCH 13/41] chore(internal): update conftest.py --- tests/conftest.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/conftest.py b/tests/conftest.py index 04c66a33..8432d29e 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,3 +1,5 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + from __future__ import annotations import os From b6b3f9ea85918cfc6fc7304b2d21c340d82a0083 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 17 Jun 2025 06:46:58 +0000 Subject: [PATCH 14/41] chore(ci): enable for pull requests --- .github/workflows/ci.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 88980c93..08bd7a02 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -7,6 +7,10 @@ on: - 'integrated/**' - 'stl-preview-head/**' - 'stl-preview-base/**' + pull_request: + branches-ignore: + - 'stl-preview-head/**' + - 'stl-preview-base/**' jobs: lint: From 5ebec81604a206eba5e75a7e8990bd7711ba8f47 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 17 Jun 2025 12:12:44 +0000 Subject: [PATCH 15/41] feat(api): update via SDK Studio --- .stats.yml | 4 +- api.md | 10 +- src/gradientai/_client.py | 38 -- src/gradientai/resources/__init__.py | 14 - src/gradientai/resources/chat.py | 381 ------------------ src/gradientai/types/__init__.py | 6 - ...request_message_content_part_text_param.py | 15 - .../types/chat_completion_token_logprob.py | 57 --- .../types/chat_create_completion_params.py | 208 ---------- .../types/chat_create_completion_response.py | 81 ---- tests/api_resources/test_chat.py | 184 --------- 11 files changed, 3 insertions(+), 995 deletions(-) delete mode 100644 src/gradientai/resources/chat.py delete mode 100644 src/gradientai/types/chat_completion_request_message_content_part_text_param.py delete mode 100644 src/gradientai/types/chat_completion_token_logprob.py delete mode 100644 src/gradientai/types/chat_create_completion_params.py delete mode 100644 src/gradientai/types/chat_create_completion_response.py delete mode 100644 tests/api_resources/test_chat.py diff --git a/.stats.yml b/.stats.yml index 54f59bb8..297debd9 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 60 +configured_endpoints: 59 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-6e449984986e066baea73af5c2726811e74a284f0d68d49926ec5c7821c7ed31.yml openapi_spec_hash: 78f43f68f46df0d81891ae2ff66bf3a0 -config_hash: 53eac5170a4d8967367b33767544a858 +config_hash: 3e04a2c7a4b0b9b16bd2956a3208b942 diff --git a/api.md b/api.md index a3d3e8c1..6a543de5 100644 --- a/api.md +++ b/api.md @@ -306,17 +306,9 @@ Methods: Types: ```python -from gradientai.types import ( - ChatCompletionRequestMessageContentPartText, - ChatCompletionTokenLogprob, - ChatCreateCompletionResponse, -) +from gradientai.types import ChatCompletionRequestMessageContentPartText, ChatCompletionTokenLogprob ``` -Methods: - -- client.chat.create_completion(\*\*params) -> ChatCreateCompletionResponse - # Embeddings Types: diff --git a/src/gradientai/_client.py b/src/gradientai/_client.py index 5c0172c1..9bf55fd7 100644 --- a/src/gradientai/_client.py +++ b/src/gradientai/_client.py @@ -33,7 +33,6 @@ if TYPE_CHECKING: from .resources import ( auth, - chat, agents, models, regions, @@ -43,7 +42,6 @@ indexing_jobs, knowledge_bases, ) - from .resources.chat import ChatResource, AsyncChatResource from .resources.models import ModelsResource, AsyncModelsResource from .resources.regions import RegionsResource, AsyncRegionsResource from .resources.auth.auth import AuthResource, AsyncAuthResource @@ -163,12 +161,6 @@ def api_keys(self) -> APIKeysResource: return APIKeysResource(self) - @cached_property - def chat(self) -> ChatResource: - from .resources.chat import ChatResource - - return ChatResource(self) - @cached_property def embeddings(self) -> EmbeddingsResource: from .resources.embeddings import EmbeddingsResource @@ -391,12 +383,6 @@ def api_keys(self) -> AsyncAPIKeysResource: return AsyncAPIKeysResource(self) - @cached_property - def chat(self) -> AsyncChatResource: - from .resources.chat import AsyncChatResource - - return AsyncChatResource(self) - @cached_property def embeddings(self) -> AsyncEmbeddingsResource: from .resources.embeddings import AsyncEmbeddingsResource @@ -570,12 +556,6 @@ def api_keys(self) -> api_keys.APIKeysResourceWithRawResponse: return APIKeysResourceWithRawResponse(self._client.api_keys) - @cached_property - def chat(self) -> chat.ChatResourceWithRawResponse: - from .resources.chat import ChatResourceWithRawResponse - - return ChatResourceWithRawResponse(self._client.chat) - @cached_property def embeddings(self) -> embeddings.EmbeddingsResourceWithRawResponse: from .resources.embeddings import EmbeddingsResourceWithRawResponse @@ -637,12 +617,6 @@ def api_keys(self) -> api_keys.AsyncAPIKeysResourceWithRawResponse: return AsyncAPIKeysResourceWithRawResponse(self._client.api_keys) - @cached_property - def chat(self) -> chat.AsyncChatResourceWithRawResponse: - from .resources.chat import AsyncChatResourceWithRawResponse - - return AsyncChatResourceWithRawResponse(self._client.chat) - @cached_property def embeddings(self) -> embeddings.AsyncEmbeddingsResourceWithRawResponse: from .resources.embeddings import AsyncEmbeddingsResourceWithRawResponse @@ -704,12 +678,6 @@ def api_keys(self) -> api_keys.APIKeysResourceWithStreamingResponse: return APIKeysResourceWithStreamingResponse(self._client.api_keys) - @cached_property - def chat(self) -> chat.ChatResourceWithStreamingResponse: - from .resources.chat import ChatResourceWithStreamingResponse - - return ChatResourceWithStreamingResponse(self._client.chat) - @cached_property def embeddings(self) -> embeddings.EmbeddingsResourceWithStreamingResponse: from .resources.embeddings import EmbeddingsResourceWithStreamingResponse @@ -771,12 +739,6 @@ def api_keys(self) -> api_keys.AsyncAPIKeysResourceWithStreamingResponse: return AsyncAPIKeysResourceWithStreamingResponse(self._client.api_keys) - @cached_property - def chat(self) -> chat.AsyncChatResourceWithStreamingResponse: - from .resources.chat import AsyncChatResourceWithStreamingResponse - - return AsyncChatResourceWithStreamingResponse(self._client.chat) - @cached_property def embeddings(self) -> embeddings.AsyncEmbeddingsResourceWithStreamingResponse: from .resources.embeddings import AsyncEmbeddingsResourceWithStreamingResponse diff --git a/src/gradientai/resources/__init__.py b/src/gradientai/resources/__init__.py index 6dcbff02..05417215 100644 --- a/src/gradientai/resources/__init__.py +++ b/src/gradientai/resources/__init__.py @@ -8,14 +8,6 @@ AuthResourceWithStreamingResponse, AsyncAuthResourceWithStreamingResponse, ) -from .chat import ( - ChatResource, - AsyncChatResource, - ChatResourceWithRawResponse, - AsyncChatResourceWithRawResponse, - ChatResourceWithStreamingResponse, - AsyncChatResourceWithStreamingResponse, -) from .agents import ( AgentsResource, AsyncAgentsResource, @@ -124,12 +116,6 @@ "AsyncAPIKeysResourceWithRawResponse", "APIKeysResourceWithStreamingResponse", "AsyncAPIKeysResourceWithStreamingResponse", - "ChatResource", - "AsyncChatResource", - "ChatResourceWithRawResponse", - "AsyncChatResourceWithRawResponse", - "ChatResourceWithStreamingResponse", - "AsyncChatResourceWithStreamingResponse", "EmbeddingsResource", "AsyncEmbeddingsResource", "EmbeddingsResourceWithRawResponse", diff --git a/src/gradientai/resources/chat.py b/src/gradientai/resources/chat.py deleted file mode 100644 index 518fbad8..00000000 --- a/src/gradientai/resources/chat.py +++ /dev/null @@ -1,381 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, List, Union, Iterable, Optional - -import httpx - -from ..types import chat_create_completion_params -from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from .._utils import maybe_transform, async_maybe_transform -from .._compat import cached_property -from .._resource import SyncAPIResource, AsyncAPIResource -from .._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from .._base_client import make_request_options -from ..types.chat_create_completion_response import ChatCreateCompletionResponse - -__all__ = ["ChatResource", "AsyncChatResource"] - - -class ChatResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> ChatResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return ChatResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> ChatResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return ChatResourceWithStreamingResponse(self) - - def create_completion( - self, - *, - messages: Iterable[chat_create_completion_params.Message], - model: str, - frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, - logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, - logprobs: Optional[bool] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, - stream: Optional[bool] | NotGiven = NOT_GIVEN, - stream_options: Optional[chat_create_completion_params.StreamOptions] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ChatCreateCompletionResponse: - """ - Creates a model response for the given chat conversation. - - Args: - messages: A list of messages comprising the conversation so far. - - model: Model ID used to generate the response. - - frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their - existing frequency in the text so far, decreasing the model's likelihood to - repeat the same line verbatim. - - logit_bias: Modify the likelihood of specified tokens appearing in the completion. - - Accepts a JSON object that maps tokens (specified by their token ID in the - tokenizer) to an associated bias value from -100 to 100. Mathematically, the - bias is added to the logits generated by the model prior to sampling. The exact - effect will vary per model, but values between -1 and 1 should decrease or - increase likelihood of selection; values like -100 or 100 should result in a ban - or exclusive selection of the relevant token. - - logprobs: Whether to return log probabilities of the output tokens or not. If true, - returns the log probabilities of each output token returned in the `content` of - `message`. - - max_completion_tokens: The maximum number of completion tokens that may be used over the course of the - run. The run will make a best effort to use only the number of completion tokens - specified, across multiple turns of the run. - - max_tokens: The maximum number of tokens that can be generated in the completion. - - The token count of your prompt plus `max_tokens` cannot exceed the model's - context length. - - metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format, and - querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - - n: How many chat completion choices to generate for each input message. Note that - you will be charged based on the number of generated tokens across all of the - choices. Keep `n` as `1` to minimize costs. - - presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on - whether they appear in the text so far, increasing the model's likelihood to - talk about new topics. - - stop: Up to 4 sequences where the API will stop generating further tokens. The - returned text will not contain the stop sequence. - - stream: If set to true, the model response data will be streamed to the client as it is - generated using server-sent events. - - stream_options: Options for streaming response. Only set this when you set `stream: true`. - - temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - make the output more random, while lower values like 0.2 will make it more - focused and deterministic. We generally recommend altering this or `top_p` but - not both. - - top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to - return at each token position, each with an associated log probability. - `logprobs` must be set to `true` if this parameter is used. - - top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. So 0.1 - means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or `temperature` but not both. - - user: A unique identifier representing your end-user, which can help DigitalOcean to - monitor and detect abuse. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._post( - "/chat/completions", - body=maybe_transform( - { - "messages": messages, - "model": model, - "frequency_penalty": frequency_penalty, - "logit_bias": logit_bias, - "logprobs": logprobs, - "max_completion_tokens": max_completion_tokens, - "max_tokens": max_tokens, - "metadata": metadata, - "n": n, - "presence_penalty": presence_penalty, - "stop": stop, - "stream": stream, - "stream_options": stream_options, - "temperature": temperature, - "top_logprobs": top_logprobs, - "top_p": top_p, - "user": user, - }, - chat_create_completion_params.ChatCreateCompletionParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ChatCreateCompletionResponse, - ) - - -class AsyncChatResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncChatResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncChatResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncChatResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncChatResourceWithStreamingResponse(self) - - async def create_completion( - self, - *, - messages: Iterable[chat_create_completion_params.Message], - model: str, - frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, - logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, - logprobs: Optional[bool] | NotGiven = NOT_GIVEN, - max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, - max_tokens: Optional[int] | NotGiven = NOT_GIVEN, - metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, - stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, - stream: Optional[bool] | NotGiven = NOT_GIVEN, - stream_options: Optional[chat_create_completion_params.StreamOptions] | NotGiven = NOT_GIVEN, - temperature: Optional[float] | NotGiven = NOT_GIVEN, - top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, - top_p: Optional[float] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ChatCreateCompletionResponse: - """ - Creates a model response for the given chat conversation. - - Args: - messages: A list of messages comprising the conversation so far. - - model: Model ID used to generate the response. - - frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their - existing frequency in the text so far, decreasing the model's likelihood to - repeat the same line verbatim. - - logit_bias: Modify the likelihood of specified tokens appearing in the completion. - - Accepts a JSON object that maps tokens (specified by their token ID in the - tokenizer) to an associated bias value from -100 to 100. Mathematically, the - bias is added to the logits generated by the model prior to sampling. The exact - effect will vary per model, but values between -1 and 1 should decrease or - increase likelihood of selection; values like -100 or 100 should result in a ban - or exclusive selection of the relevant token. - - logprobs: Whether to return log probabilities of the output tokens or not. If true, - returns the log probabilities of each output token returned in the `content` of - `message`. - - max_completion_tokens: The maximum number of completion tokens that may be used over the course of the - run. The run will make a best effort to use only the number of completion tokens - specified, across multiple turns of the run. - - max_tokens: The maximum number of tokens that can be generated in the completion. - - The token count of your prompt plus `max_tokens` cannot exceed the model's - context length. - - metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful - for storing additional information about the object in a structured format, and - querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - - n: How many chat completion choices to generate for each input message. Note that - you will be charged based on the number of generated tokens across all of the - choices. Keep `n` as `1` to minimize costs. - - presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on - whether they appear in the text so far, increasing the model's likelihood to - talk about new topics. - - stop: Up to 4 sequences where the API will stop generating further tokens. The - returned text will not contain the stop sequence. - - stream: If set to true, the model response data will be streamed to the client as it is - generated using server-sent events. - - stream_options: Options for streaming response. Only set this when you set `stream: true`. - - temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will - make the output more random, while lower values like 0.2 will make it more - focused and deterministic. We generally recommend altering this or `top_p` but - not both. - - top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to - return at each token position, each with an associated log probability. - `logprobs` must be set to `true` if this parameter is used. - - top_p: An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. So 0.1 - means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or `temperature` but not both. - - user: A unique identifier representing your end-user, which can help DigitalOcean to - monitor and detect abuse. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._post( - "/chat/completions", - body=await async_maybe_transform( - { - "messages": messages, - "model": model, - "frequency_penalty": frequency_penalty, - "logit_bias": logit_bias, - "logprobs": logprobs, - "max_completion_tokens": max_completion_tokens, - "max_tokens": max_tokens, - "metadata": metadata, - "n": n, - "presence_penalty": presence_penalty, - "stop": stop, - "stream": stream, - "stream_options": stream_options, - "temperature": temperature, - "top_logprobs": top_logprobs, - "top_p": top_p, - "user": user, - }, - chat_create_completion_params.ChatCreateCompletionParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ChatCreateCompletionResponse, - ) - - -class ChatResourceWithRawResponse: - def __init__(self, chat: ChatResource) -> None: - self._chat = chat - - self.create_completion = to_raw_response_wrapper( - chat.create_completion, - ) - - -class AsyncChatResourceWithRawResponse: - def __init__(self, chat: AsyncChatResource) -> None: - self._chat = chat - - self.create_completion = async_to_raw_response_wrapper( - chat.create_completion, - ) - - -class ChatResourceWithStreamingResponse: - def __init__(self, chat: ChatResource) -> None: - self._chat = chat - - self.create_completion = to_streamed_response_wrapper( - chat.create_completion, - ) - - -class AsyncChatResourceWithStreamingResponse: - def __init__(self, chat: AsyncChatResource) -> None: - self._chat = chat - - self.create_completion = async_to_streamed_response_wrapper( - chat.create_completion, - ) diff --git a/src/gradientai/types/__init__.py b/src/gradientai/types/__init__.py index ee516f83..6992d67b 100644 --- a/src/gradientai/types/__init__.py +++ b/src/gradientai/types/__init__.py @@ -39,19 +39,13 @@ from .knowledge_base_create_params import KnowledgeBaseCreateParams as KnowledgeBaseCreateParams from .knowledge_base_list_response import KnowledgeBaseListResponse as KnowledgeBaseListResponse from .knowledge_base_update_params import KnowledgeBaseUpdateParams as KnowledgeBaseUpdateParams -from .chat_completion_token_logprob import ChatCompletionTokenLogprob as ChatCompletionTokenLogprob -from .chat_create_completion_params import ChatCreateCompletionParams as ChatCreateCompletionParams from .indexing_job_retrieve_response import IndexingJobRetrieveResponse as IndexingJobRetrieveResponse from .knowledge_base_create_response import KnowledgeBaseCreateResponse as KnowledgeBaseCreateResponse from .knowledge_base_delete_response import KnowledgeBaseDeleteResponse as KnowledgeBaseDeleteResponse from .knowledge_base_update_response import KnowledgeBaseUpdateResponse as KnowledgeBaseUpdateResponse -from .chat_create_completion_response import ChatCreateCompletionResponse as ChatCreateCompletionResponse from .knowledge_base_retrieve_response import KnowledgeBaseRetrieveResponse as KnowledgeBaseRetrieveResponse from .indexing_job_update_cancel_params import IndexingJobUpdateCancelParams as IndexingJobUpdateCancelParams from .indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse as IndexingJobUpdateCancelResponse from .indexing_job_retrieve_data_sources_response import ( IndexingJobRetrieveDataSourcesResponse as IndexingJobRetrieveDataSourcesResponse, ) -from .chat_completion_request_message_content_part_text_param import ( - ChatCompletionRequestMessageContentPartTextParam as ChatCompletionRequestMessageContentPartTextParam, -) diff --git a/src/gradientai/types/chat_completion_request_message_content_part_text_param.py b/src/gradientai/types/chat_completion_request_message_content_part_text_param.py deleted file mode 100644 index 4aec9488..00000000 --- a/src/gradientai/types/chat_completion_request_message_content_part_text_param.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, Required, TypedDict - -__all__ = ["ChatCompletionRequestMessageContentPartTextParam"] - - -class ChatCompletionRequestMessageContentPartTextParam(TypedDict, total=False): - text: Required[str] - """The text content.""" - - type: Required[Literal["text"]] - """The type of the content part.""" diff --git a/src/gradientai/types/chat_completion_token_logprob.py b/src/gradientai/types/chat_completion_token_logprob.py deleted file mode 100644 index 78de1dfa..00000000 --- a/src/gradientai/types/chat_completion_token_logprob.py +++ /dev/null @@ -1,57 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from .._models import BaseModel - -__all__ = ["ChatCompletionTokenLogprob", "TopLogprob"] - - -class TopLogprob(BaseModel): - token: str - """The token.""" - - bytes: Optional[List[int]] = None - """A list of integers representing the UTF-8 bytes representation of the token. - - Useful in instances where characters are represented by multiple tokens and - their byte representations must be combined to generate the correct text - representation. Can be `null` if there is no bytes representation for the token. - """ - - logprob: float - """The log probability of this token, if it is within the top 20 most likely - tokens. - - Otherwise, the value `-9999.0` is used to signify that the token is very - unlikely. - """ - - -class ChatCompletionTokenLogprob(BaseModel): - token: str - """The token.""" - - bytes: Optional[List[int]] = None - """A list of integers representing the UTF-8 bytes representation of the token. - - Useful in instances where characters are represented by multiple tokens and - their byte representations must be combined to generate the correct text - representation. Can be `null` if there is no bytes representation for the token. - """ - - logprob: float - """The log probability of this token, if it is within the top 20 most likely - tokens. - - Otherwise, the value `-9999.0` is used to signify that the token is very - unlikely. - """ - - top_logprobs: List[TopLogprob] - """List of the most likely tokens and their log probability, at this token - position. - - In rare cases, there may be fewer than the number of requested `top_logprobs` - returned. - """ diff --git a/src/gradientai/types/chat_create_completion_params.py b/src/gradientai/types/chat_create_completion_params.py deleted file mode 100644 index 05c427b1..00000000 --- a/src/gradientai/types/chat_create_completion_params.py +++ /dev/null @@ -1,208 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, List, Union, Iterable, Optional -from typing_extensions import Literal, Required, TypeAlias, TypedDict - -from .chat_completion_request_message_content_part_text_param import ChatCompletionRequestMessageContentPartTextParam - -__all__ = [ - "ChatCreateCompletionParams", - "Message", - "MessageChatCompletionRequestSystemMessage", - "MessageChatCompletionRequestDeveloperMessage", - "MessageChatCompletionRequestUserMessage", - "MessageChatCompletionRequestAssistantMessage", - "MessageChatCompletionRequestAssistantMessageContentArrayOfContentPart", - "MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartRefusal", - "StreamOptions", -] - - -class ChatCreateCompletionParams(TypedDict, total=False): - messages: Required[Iterable[Message]] - """A list of messages comprising the conversation so far.""" - - model: Required[str] - """Model ID used to generate the response.""" - - frequency_penalty: Optional[float] - """Number between -2.0 and 2.0. - - Positive values penalize new tokens based on their existing frequency in the - text so far, decreasing the model's likelihood to repeat the same line verbatim. - """ - - logit_bias: Optional[Dict[str, int]] - """Modify the likelihood of specified tokens appearing in the completion. - - Accepts a JSON object that maps tokens (specified by their token ID in the - tokenizer) to an associated bias value from -100 to 100. Mathematically, the - bias is added to the logits generated by the model prior to sampling. The exact - effect will vary per model, but values between -1 and 1 should decrease or - increase likelihood of selection; values like -100 or 100 should result in a ban - or exclusive selection of the relevant token. - """ - - logprobs: Optional[bool] - """Whether to return log probabilities of the output tokens or not. - - If true, returns the log probabilities of each output token returned in the - `content` of `message`. - """ - - max_completion_tokens: Optional[int] - """ - The maximum number of completion tokens that may be used over the course of the - run. The run will make a best effort to use only the number of completion tokens - specified, across multiple turns of the run. - """ - - max_tokens: Optional[int] - """The maximum number of tokens that can be generated in the completion. - - The token count of your prompt plus `max_tokens` cannot exceed the model's - context length. - """ - - metadata: Optional[Dict[str, str]] - """Set of 16 key-value pairs that can be attached to an object. - - This can be useful for storing additional information about the object in a - structured format, and querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - """ - - n: Optional[int] - """How many chat completion choices to generate for each input message. - - Note that you will be charged based on the number of generated tokens across all - of the choices. Keep `n` as `1` to minimize costs. - """ - - presence_penalty: Optional[float] - """Number between -2.0 and 2.0. - - Positive values penalize new tokens based on whether they appear in the text so - far, increasing the model's likelihood to talk about new topics. - """ - - stop: Union[Optional[str], List[str], None] - """Up to 4 sequences where the API will stop generating further tokens. - - The returned text will not contain the stop sequence. - """ - - stream: Optional[bool] - """ - If set to true, the model response data will be streamed to the client as it is - generated using server-sent events. - """ - - stream_options: Optional[StreamOptions] - """Options for streaming response. Only set this when you set `stream: true`.""" - - temperature: Optional[float] - """What sampling temperature to use, between 0 and 2. - - Higher values like 0.8 will make the output more random, while lower values like - 0.2 will make it more focused and deterministic. We generally recommend altering - this or `top_p` but not both. - """ - - top_logprobs: Optional[int] - """ - An integer between 0 and 20 specifying the number of most likely tokens to - return at each token position, each with an associated log probability. - `logprobs` must be set to `true` if this parameter is used. - """ - - top_p: Optional[float] - """ - An alternative to sampling with temperature, called nucleus sampling, where the - model considers the results of the tokens with top_p probability mass. So 0.1 - means only the tokens comprising the top 10% probability mass are considered. - - We generally recommend altering this or `temperature` but not both. - """ - - user: str - """ - A unique identifier representing your end-user, which can help DigitalOcean to - monitor and detect abuse. - """ - - -class MessageChatCompletionRequestSystemMessage(TypedDict, total=False): - content: Required[Union[str, Iterable[ChatCompletionRequestMessageContentPartTextParam]]] - """The contents of the system message.""" - - role: Required[Literal["system"]] - """The role of the messages author, in this case `system`.""" - - -class MessageChatCompletionRequestDeveloperMessage(TypedDict, total=False): - content: Required[Union[str, Iterable[ChatCompletionRequestMessageContentPartTextParam]]] - """The contents of the developer message.""" - - role: Required[Literal["developer"]] - """The role of the messages author, in this case `developer`.""" - - -class MessageChatCompletionRequestUserMessage(TypedDict, total=False): - content: Required[Union[str, Iterable[ChatCompletionRequestMessageContentPartTextParam]]] - """The contents of the user message.""" - - role: Required[Literal["user"]] - """The role of the messages author, in this case `user`.""" - - -class MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartRefusal( - TypedDict, total=False -): - refusal: Required[str] - """The refusal message generated by the model.""" - - type: Required[Literal["refusal"]] - """The type of the content part.""" - - -MessageChatCompletionRequestAssistantMessageContentArrayOfContentPart: TypeAlias = Union[ - ChatCompletionRequestMessageContentPartTextParam, - MessageChatCompletionRequestAssistantMessageContentArrayOfContentPartChatCompletionRequestMessageContentPartRefusal, -] - - -class MessageChatCompletionRequestAssistantMessage(TypedDict, total=False): - role: Required[Literal["assistant"]] - """The role of the messages author, in this case `assistant`.""" - - content: Union[str, Iterable[MessageChatCompletionRequestAssistantMessageContentArrayOfContentPart], None] - """The contents of the assistant message.""" - - refusal: Optional[str] - """The refusal message by the assistant.""" - - -Message: TypeAlias = Union[ - MessageChatCompletionRequestSystemMessage, - MessageChatCompletionRequestDeveloperMessage, - MessageChatCompletionRequestUserMessage, - MessageChatCompletionRequestAssistantMessage, -] - - -class StreamOptions(TypedDict, total=False): - include_usage: bool - """If set, an additional chunk will be streamed before the `data: [DONE]` message. - - The `usage` field on this chunk shows the token usage statistics for the entire - request, and the `choices` field will always be an empty array. - - All other chunks will also include a `usage` field, but with a null value. - **NOTE:** If the stream is interrupted, you may not receive the final usage - chunk which contains the total token usage for the request. - """ diff --git a/src/gradientai/types/chat_create_completion_response.py b/src/gradientai/types/chat_create_completion_response.py deleted file mode 100644 index e1f20038..00000000 --- a/src/gradientai/types/chat_create_completion_response.py +++ /dev/null @@ -1,81 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional -from typing_extensions import Literal - -from .._models import BaseModel -from .chat_completion_token_logprob import ChatCompletionTokenLogprob - -__all__ = ["ChatCreateCompletionResponse", "Choice", "ChoiceLogprobs", "ChoiceMessage", "Usage"] - - -class ChoiceLogprobs(BaseModel): - content: Optional[List[ChatCompletionTokenLogprob]] = None - """A list of message content tokens with log probability information.""" - - refusal: Optional[List[ChatCompletionTokenLogprob]] = None - """A list of message refusal tokens with log probability information.""" - - -class ChoiceMessage(BaseModel): - content: Optional[str] = None - """The contents of the message.""" - - refusal: Optional[str] = None - """The refusal message generated by the model.""" - - role: Literal["assistant"] - """The role of the author of this message.""" - - -class Choice(BaseModel): - finish_reason: Literal["stop", "length"] - """The reason the model stopped generating tokens. - - This will be `stop` if the model hit a natural stop point or a provided stop - sequence, or `length` if the maximum number of tokens specified in the request - was reached. - """ - - index: int - """The index of the choice in the list of choices.""" - - logprobs: Optional[ChoiceLogprobs] = None - """Log probability information for the choice.""" - - message: ChoiceMessage - """A chat completion message generated by the model.""" - - -class Usage(BaseModel): - completion_tokens: int - """Number of tokens in the generated completion.""" - - prompt_tokens: int - """Number of tokens in the prompt.""" - - total_tokens: int - """Total number of tokens used in the request (prompt + completion).""" - - -class ChatCreateCompletionResponse(BaseModel): - id: str - """A unique identifier for the chat completion.""" - - choices: List[Choice] - """A list of chat completion choices. - - Can be more than one if `n` is greater than 1. - """ - - created: int - """The Unix timestamp (in seconds) of when the chat completion was created.""" - - model: str - """The model used for the chat completion.""" - - object: Literal["chat.completion"] - """The object type, which is always `chat.completion`.""" - - usage: Optional[Usage] = None - """Usage statistics for the completion request.""" diff --git a/tests/api_resources/test_chat.py b/tests/api_resources/test_chat.py deleted file mode 100644 index 2c5bcbd8..00000000 --- a/tests/api_resources/test_chat.py +++ /dev/null @@ -1,184 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from gradientai import GradientAI, AsyncGradientAI -from tests.utils import assert_matches_type -from gradientai.types import ChatCreateCompletionResponse - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestChat: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_completion(self, client: GradientAI) -> None: - chat = client.chat.create_completion( - messages=[ - { - "content": "string", - "role": "system", - } - ], - model="llama3-8b-instruct", - ) - assert_matches_type(ChatCreateCompletionResponse, chat, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_completion_with_all_params(self, client: GradientAI) -> None: - chat = client.chat.create_completion( - messages=[ - { - "content": "string", - "role": "system", - } - ], - model="llama3-8b-instruct", - frequency_penalty=-2, - logit_bias={"foo": 0}, - logprobs=True, - max_completion_tokens=256, - max_tokens=0, - metadata={"foo": "string"}, - n=1, - presence_penalty=-2, - stop="\n", - stream=True, - stream_options={"include_usage": True}, - temperature=1, - top_logprobs=0, - top_p=1, - user="user-1234", - ) - assert_matches_type(ChatCreateCompletionResponse, chat, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create_completion(self, client: GradientAI) -> None: - response = client.chat.with_raw_response.create_completion( - messages=[ - { - "content": "string", - "role": "system", - } - ], - model="llama3-8b-instruct", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - chat = response.parse() - assert_matches_type(ChatCreateCompletionResponse, chat, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create_completion(self, client: GradientAI) -> None: - with client.chat.with_streaming_response.create_completion( - messages=[ - { - "content": "string", - "role": "system", - } - ], - model="llama3-8b-instruct", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - chat = response.parse() - assert_matches_type(ChatCreateCompletionResponse, chat, path=["response"]) - - assert cast(Any, response.is_closed) is True - - -class TestAsyncChat: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_completion(self, async_client: AsyncGradientAI) -> None: - chat = await async_client.chat.create_completion( - messages=[ - { - "content": "string", - "role": "system", - } - ], - model="llama3-8b-instruct", - ) - assert_matches_type(ChatCreateCompletionResponse, chat, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_completion_with_all_params(self, async_client: AsyncGradientAI) -> None: - chat = await async_client.chat.create_completion( - messages=[ - { - "content": "string", - "role": "system", - } - ], - model="llama3-8b-instruct", - frequency_penalty=-2, - logit_bias={"foo": 0}, - logprobs=True, - max_completion_tokens=256, - max_tokens=0, - metadata={"foo": "string"}, - n=1, - presence_penalty=-2, - stop="\n", - stream=True, - stream_options={"include_usage": True}, - temperature=1, - top_logprobs=0, - top_p=1, - user="user-1234", - ) - assert_matches_type(ChatCreateCompletionResponse, chat, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create_completion(self, async_client: AsyncGradientAI) -> None: - response = await async_client.chat.with_raw_response.create_completion( - messages=[ - { - "content": "string", - "role": "system", - } - ], - model="llama3-8b-instruct", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - chat = await response.parse() - assert_matches_type(ChatCreateCompletionResponse, chat, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create_completion(self, async_client: AsyncGradientAI) -> None: - async with async_client.chat.with_streaming_response.create_completion( - messages=[ - { - "content": "string", - "role": "system", - } - ], - model="llama3-8b-instruct", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - chat = await response.parse() - assert_matches_type(ChatCreateCompletionResponse, chat, path=["response"]) - - assert cast(Any, response.is_closed) is True From c04249614917198b1eb2324438605d99b719a1cf Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 17 Jun 2025 13:26:35 +0000 Subject: [PATCH 16/41] feat(api): update via SDK Studio --- .stats.yml | 4 +- api.md | 2 +- src/gradientai/resources/models.py | 125 ++++++++++++++++++-- src/gradientai/types/__init__.py | 1 + src/gradientai/types/model_list_params.py | 42 +++++++ src/gradientai/types/model_list_response.py | 39 +++++- tests/api_resources/test_models.py | 22 ++++ 7 files changed, 218 insertions(+), 17 deletions(-) create mode 100644 src/gradientai/types/model_list_params.py diff --git a/.stats.yml b/.stats.yml index 297debd9..d42f6e6a 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 59 +configured_endpoints: 58 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-6e449984986e066baea73af5c2726811e74a284f0d68d49926ec5c7821c7ed31.yml openapi_spec_hash: 78f43f68f46df0d81891ae2ff66bf3a0 -config_hash: 3e04a2c7a4b0b9b16bd2956a3208b942 +config_hash: 84ba29fbded3618d3cc3994639c82547 diff --git a/api.md b/api.md index 6a543de5..840a2d5f 100644 --- a/api.md +++ b/api.md @@ -332,4 +332,4 @@ from gradientai.types import Model, ModelListResponse Methods: - client.models.retrieve(model) -> Model -- client.models.list() -> ModelListResponse +- client.models.list(\*\*params) -> ModelListResponse diff --git a/src/gradientai/resources/models.py b/src/gradientai/resources/models.py index 81b75441..b0df90ad 100644 --- a/src/gradientai/resources/models.py +++ b/src/gradientai/resources/models.py @@ -2,9 +2,14 @@ from __future__ import annotations +from typing import List +from typing_extensions import Literal + import httpx +from ..types import model_list_params from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from .._utils import maybe_transform, async_maybe_transform from .._compat import cached_property from .._resource import SyncAPIResource, AsyncAPIResource from .._response import ( @@ -77,6 +82,21 @@ def retrieve( def list( self, *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + public_only: bool | NotGiven = NOT_GIVEN, + usecases: List[ + Literal[ + "MODEL_USECASE_UNKNOWN", + "MODEL_USECASE_AGENT", + "MODEL_USECASE_FINETUNED", + "MODEL_USECASE_KNOWLEDGEBASE", + "MODEL_USECASE_GUARDRAIL", + "MODEL_USECASE_REASONING", + "MODEL_USECASE_SERVERLESS", + ] + ] + | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -85,13 +105,50 @@ def list( timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ModelListResponse: """ - Lists the currently available models, and provides basic information about each - one such as the owner and availability. + To list all models, send a GET request to `/v2/gen-ai/models`. + + Args: + page: page number. + + per_page: items per page. + + public_only: only include models that are publicly available. + + usecases: include only models defined for the listed usecases. + + - MODEL_USECASE_UNKNOWN: The use case of the model is unknown + - MODEL_USECASE_AGENT: The model maybe used in an agent + - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning + - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases + (embedding models) + - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails + - MODEL_USECASE_REASONING: The model usecase for reasoning + - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds """ return self._get( - "/models", + "/v2/gen-ai/models", options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + "public_only": public_only, + "usecases": usecases, + }, + model_list_params.ModelListParams, + ), ), cast_to=ModelListResponse, ) @@ -154,6 +211,21 @@ async def retrieve( async def list( self, *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + public_only: bool | NotGiven = NOT_GIVEN, + usecases: List[ + Literal[ + "MODEL_USECASE_UNKNOWN", + "MODEL_USECASE_AGENT", + "MODEL_USECASE_FINETUNED", + "MODEL_USECASE_KNOWLEDGEBASE", + "MODEL_USECASE_GUARDRAIL", + "MODEL_USECASE_REASONING", + "MODEL_USECASE_SERVERLESS", + ] + ] + | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -162,13 +234,50 @@ async def list( timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ModelListResponse: """ - Lists the currently available models, and provides basic information about each - one such as the owner and availability. + To list all models, send a GET request to `/v2/gen-ai/models`. + + Args: + page: page number. + + per_page: items per page. + + public_only: only include models that are publicly available. + + usecases: include only models defined for the listed usecases. + + - MODEL_USECASE_UNKNOWN: The use case of the model is unknown + - MODEL_USECASE_AGENT: The model maybe used in an agent + - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning + - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases + (embedding models) + - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails + - MODEL_USECASE_REASONING: The model usecase for reasoning + - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds """ return await self._get( - "/models", + "/v2/gen-ai/models", options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + "public_only": public_only, + "usecases": usecases, + }, + model_list_params.ModelListParams, + ), ), cast_to=ModelListResponse, ) diff --git a/src/gradientai/types/__init__.py b/src/gradientai/types/__init__.py index 6992d67b..a389ecab 100644 --- a/src/gradientai/types/__init__.py +++ b/src/gradientai/types/__init__.py @@ -9,6 +9,7 @@ from .api_indexing_job import APIIndexingJob as APIIndexingJob from .agent_list_params import AgentListParams as AgentListParams from .api_model_version import APIModelVersion as APIModelVersion +from .model_list_params import ModelListParams as ModelListParams from .api_knowledge_base import APIKnowledgeBase as APIKnowledgeBase from .region_list_params import RegionListParams as RegionListParams from .agent_create_params import AgentCreateParams as AgentCreateParams diff --git a/src/gradientai/types/model_list_params.py b/src/gradientai/types/model_list_params.py new file mode 100644 index 00000000..4abc1dc1 --- /dev/null +++ b/src/gradientai/types/model_list_params.py @@ -0,0 +1,42 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Literal, TypedDict + +__all__ = ["ModelListParams"] + + +class ModelListParams(TypedDict, total=False): + page: int + """page number.""" + + per_page: int + """items per page.""" + + public_only: bool + """only include models that are publicly available.""" + + usecases: List[ + Literal[ + "MODEL_USECASE_UNKNOWN", + "MODEL_USECASE_AGENT", + "MODEL_USECASE_FINETUNED", + "MODEL_USECASE_KNOWLEDGEBASE", + "MODEL_USECASE_GUARDRAIL", + "MODEL_USECASE_REASONING", + "MODEL_USECASE_SERVERLESS", + ] + ] + """include only models defined for the listed usecases. + + - MODEL_USECASE_UNKNOWN: The use case of the model is unknown + - MODEL_USECASE_AGENT: The model maybe used in an agent + - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning + - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases + (embedding models) + - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails + - MODEL_USECASE_REASONING: The model usecase for reasoning + - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference + """ diff --git a/src/gradientai/types/model_list_response.py b/src/gradientai/types/model_list_response.py index 8f835449..1d0e5eee 100644 --- a/src/gradientai/types/model_list_response.py +++ b/src/gradientai/types/model_list_response.py @@ -1,15 +1,42 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List -from typing_extensions import Literal +from typing import List, Optional +from datetime import datetime -from .model import Model from .._models import BaseModel +from .api_agreement import APIAgreement +from .agents.api_meta import APIMeta +from .agents.api_links import APILinks +from .api_model_version import APIModelVersion -__all__ = ["ModelListResponse"] +__all__ = ["ModelListResponse", "Model"] + + +class Model(BaseModel): + agreement: Optional[APIAgreement] = None + + created_at: Optional[datetime] = None + + is_foundational: Optional[bool] = None + + name: Optional[str] = None + + parent_uuid: Optional[str] = None + + updated_at: Optional[datetime] = None + + upload_complete: Optional[bool] = None + + url: Optional[str] = None + + uuid: Optional[str] = None + + version: Optional[APIModelVersion] = None class ModelListResponse(BaseModel): - data: List[Model] + links: Optional[APILinks] = None + + meta: Optional[APIMeta] = None - object: Literal["list"] + models: Optional[List[Model]] = None diff --git a/tests/api_resources/test_models.py b/tests/api_resources/test_models.py index b9559c8e..04133ed4 100644 --- a/tests/api_resources/test_models.py +++ b/tests/api_resources/test_models.py @@ -65,6 +65,17 @@ def test_method_list(self, client: GradientAI) -> None: model = client.models.list() assert_matches_type(ModelListResponse, model, path=["response"]) + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + model = client.models.list( + page=0, + per_page=0, + public_only=True, + usecases=["MODEL_USECASE_UNKNOWN"], + ) + assert_matches_type(ModelListResponse, model, path=["response"]) + @pytest.mark.skip() @parametrize def test_raw_response_list(self, client: GradientAI) -> None: @@ -139,6 +150,17 @@ async def test_method_list(self, async_client: AsyncGradientAI) -> None: model = await async_client.models.list() assert_matches_type(ModelListResponse, model, path=["response"]) + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + model = await async_client.models.list( + page=0, + per_page=0, + public_only=True, + usecases=["MODEL_USECASE_UNKNOWN"], + ) + assert_matches_type(ModelListResponse, model, path=["response"]) + @pytest.mark.skip() @parametrize async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: From 6e40dc3fa4e33082be7b0bbf65d07e9ae9ac6370 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 18 Jun 2025 02:18:31 +0000 Subject: [PATCH 17/41] chore(readme): update badges --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index bf235be1..465a7a0c 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Gradient AI Python API library -[![PyPI version](https://img.shields.io/pypi/v/c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python.svg)](https://pypi.org/project/c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python/) +[![PyPI version]()](https://pypi.org/project/c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python/) The Gradient AI Python library provides convenient access to the Gradient AI REST API from any Python 3.8+ application. The library includes type definitions for all request params and response fields, From e649dcb0f9416e9bf568cc9f3480d7e222052391 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 18 Jun 2025 05:55:01 +0000 Subject: [PATCH 18/41] fix(tests): fix: tests which call HTTP endpoints directly with the example parameters --- tests/test_client.py | 39 ++++++++++++--------------------------- 1 file changed, 12 insertions(+), 27 deletions(-) diff --git a/tests/test_client.py b/tests/test_client.py index 4cf52324..d83082e3 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -24,7 +24,6 @@ from gradientai import GradientAI, AsyncGradientAI, APIResponseValidationError from gradientai._types import Omit from gradientai._models import BaseModel, FinalRequestOptions -from gradientai._constants import RAW_RESPONSE_HEADER from gradientai._exceptions import APIStatusError, APITimeoutError, GradientAIError, APIResponseValidationError from gradientai._base_client import ( DEFAULT_TIMEOUT, @@ -721,30 +720,21 @@ def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) - def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter) -> None: + def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, client: GradientAI) -> None: respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=httpx.TimeoutException("Test timeout error")) with pytest.raises(APITimeoutError): - self.client.get( - "/v2/gen-ai/agents/uuid/versions", - cast_to=httpx.Response, - options={"headers": {RAW_RESPONSE_HEADER: "stream"}}, - ) + client.agents.versions.with_streaming_response.list(uuid="uuid").__enter__() assert _get_open_connections(self.client) == 0 @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) - def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter) -> None: + def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, client: GradientAI) -> None: respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(return_value=httpx.Response(500)) with pytest.raises(APIStatusError): - self.client.get( - "/v2/gen-ai/agents/uuid/versions", - cast_to=httpx.Response, - options={"headers": {RAW_RESPONSE_HEADER: "stream"}}, - ) - + client.agents.versions.with_streaming_response.list(uuid="uuid").__enter__() assert _get_open_connections(self.client) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) @@ -1548,30 +1538,25 @@ async def test_parse_retry_after_header(self, remaining_retries: int, retry_afte @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) - async def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter) -> None: + async def test_retrying_timeout_errors_doesnt_leak( + self, respx_mock: MockRouter, async_client: AsyncGradientAI + ) -> None: respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=httpx.TimeoutException("Test timeout error")) with pytest.raises(APITimeoutError): - await self.client.get( - "/v2/gen-ai/agents/uuid/versions", - cast_to=httpx.Response, - options={"headers": {RAW_RESPONSE_HEADER: "stream"}}, - ) + await async_client.agents.versions.with_streaming_response.list(uuid="uuid").__aenter__() assert _get_open_connections(self.client) == 0 @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) - async def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter) -> None: + async def test_retrying_status_errors_doesnt_leak( + self, respx_mock: MockRouter, async_client: AsyncGradientAI + ) -> None: respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(return_value=httpx.Response(500)) with pytest.raises(APIStatusError): - await self.client.get( - "/v2/gen-ai/agents/uuid/versions", - cast_to=httpx.Response, - options={"headers": {RAW_RESPONSE_HEADER: "stream"}}, - ) - + await async_client.agents.versions.with_streaming_response.list(uuid="uuid").__aenter__() assert _get_open_connections(self.client) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) From 5f700dc7a4e757015d3bd6f2e82a311114b82d77 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 18 Jun 2025 11:32:43 +0000 Subject: [PATCH 19/41] feat(api): update via SDK Studio --- .stats.yml | 8 +- api.md | 30 +-- src/gradientai/_client.py | 96 +-------- src/gradientai/resources/__init__.py | 28 --- src/gradientai/resources/agents/agents.py | 48 +++-- src/gradientai/resources/agents/api_keys.py | 40 +++- .../resources/agents/child_agents.py | 32 ++- src/gradientai/resources/agents/functions.py | 24 ++- .../resources/agents/knowledge_bases.py | 24 ++- src/gradientai/resources/agents/versions.py | 16 +- src/gradientai/resources/api_keys/api_keys.py | 8 +- .../resources/api_keys/api_keys_.py | 40 +++- src/gradientai/resources/auth/__init__.py | 33 --- .../resources/auth/agents/__init__.py | 33 --- .../resources/auth/agents/agents.py | 102 --------- src/gradientai/resources/auth/agents/token.py | 173 --------------- src/gradientai/resources/auth/auth.py | 102 --------- src/gradientai/resources/embeddings.py | 201 ------------------ src/gradientai/resources/indexing_jobs.py | 40 +++- .../resources/knowledge_bases/data_sources.py | 24 ++- .../knowledge_bases/knowledge_bases.py | 40 +++- src/gradientai/resources/models.py | 16 +- .../resources/providers/anthropic/keys.py | 48 +++-- .../resources/providers/openai/keys.py | 48 +++-- src/gradientai/resources/regions.py | 8 +- src/gradientai/types/__init__.py | 2 - src/gradientai/types/api_agent.py | 2 + src/gradientai/types/auth/agents/__init__.py | 3 - .../types/auth/agents/token_create_params.py | 13 -- .../auth/agents/token_create_response.py | 13 -- .../types/embedding_create_params.py | 28 --- .../types/embedding_create_response.py | 41 ---- .../types/knowledge_base_create_params.py | 16 +- .../api_knowledge_base_data_source.py | 12 +- tests/api_resources/auth/__init__.py | 1 - tests/api_resources/auth/agents/__init__.py | 1 - tests/api_resources/auth/agents/test_token.py | 124 ----------- tests/api_resources/test_embeddings.py | 116 ---------- tests/api_resources/test_knowledge_bases.py | 14 ++ 39 files changed, 398 insertions(+), 1250 deletions(-) delete mode 100644 src/gradientai/resources/auth/__init__.py delete mode 100644 src/gradientai/resources/auth/agents/__init__.py delete mode 100644 src/gradientai/resources/auth/agents/agents.py delete mode 100644 src/gradientai/resources/auth/agents/token.py delete mode 100644 src/gradientai/resources/auth/auth.py delete mode 100644 src/gradientai/resources/embeddings.py delete mode 100644 src/gradientai/types/auth/agents/token_create_params.py delete mode 100644 src/gradientai/types/auth/agents/token_create_response.py delete mode 100644 src/gradientai/types/embedding_create_params.py delete mode 100644 src/gradientai/types/embedding_create_response.py delete mode 100644 tests/api_resources/auth/__init__.py delete mode 100644 tests/api_resources/auth/agents/__init__.py delete mode 100644 tests/api_resources/auth/agents/test_token.py delete mode 100644 tests/api_resources/test_embeddings.py diff --git a/.stats.yml b/.stats.yml index d42f6e6a..9eb9eab4 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 58 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-6e449984986e066baea73af5c2726811e74a284f0d68d49926ec5c7821c7ed31.yml -openapi_spec_hash: 78f43f68f46df0d81891ae2ff66bf3a0 -config_hash: 84ba29fbded3618d3cc3994639c82547 +configured_endpoints: 56 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml +openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 +config_hash: cfd2d18e8dfe7223b15ce9b204cef29e diff --git a/api.md b/api.md index 840a2d5f..278862d0 100644 --- a/api.md +++ b/api.md @@ -170,22 +170,6 @@ Methods: - client.providers.openai.keys.delete(api_key_uuid) -> KeyDeleteResponse - client.providers.openai.keys.retrieve_agents(uuid, \*\*params) -> KeyRetrieveAgentsResponse -# Auth - -## Agents - -### Token - -Types: - -```python -from gradientai.types.auth.agents import TokenCreateResponse -``` - -Methods: - -- client.auth.agents.token.create(path_agent_uuid, \*\*params) -> TokenCreateResponse - # Regions Types: @@ -306,21 +290,9 @@ Methods: Types: ```python -from gradientai.types import ChatCompletionRequestMessageContentPartText, ChatCompletionTokenLogprob -``` - -# Embeddings - -Types: - -```python -from gradientai.types import EmbeddingCreateResponse +from gradientai.types import ChatCompletionTokenLogprob ``` -Methods: - -- client.embeddings.create(\*\*params) -> EmbeddingCreateResponse - # Models Types: diff --git a/src/gradientai/_client.py b/src/gradientai/_client.py index 9bf55fd7..e050112e 100644 --- a/src/gradientai/_client.py +++ b/src/gradientai/_client.py @@ -31,21 +31,9 @@ ) if TYPE_CHECKING: - from .resources import ( - auth, - agents, - models, - regions, - api_keys, - providers, - embeddings, - indexing_jobs, - knowledge_bases, - ) + from .resources import agents, models, regions, api_keys, providers, indexing_jobs, knowledge_bases from .resources.models import ModelsResource, AsyncModelsResource from .resources.regions import RegionsResource, AsyncRegionsResource - from .resources.auth.auth import AuthResource, AsyncAuthResource - from .resources.embeddings import EmbeddingsResource, AsyncEmbeddingsResource from .resources.agents.agents import AgentsResource, AsyncAgentsResource from .resources.indexing_jobs import IndexingJobsResource, AsyncIndexingJobsResource from .resources.api_keys.api_keys import APIKeysResource, AsyncAPIKeysResource @@ -105,6 +93,7 @@ def __init__( if base_url is None: base_url = os.environ.get("GRADIENT_AI_BASE_URL") + self._base_url_overridden = base_url is not None if base_url is None: base_url = f"https://api.digitalocean.com/" @@ -131,12 +120,6 @@ def providers(self) -> ProvidersResource: return ProvidersResource(self) - @cached_property - def auth(self) -> AuthResource: - from .resources.auth import AuthResource - - return AuthResource(self) - @cached_property def regions(self) -> RegionsResource: from .resources.regions import RegionsResource @@ -161,12 +144,6 @@ def api_keys(self) -> APIKeysResource: return APIKeysResource(self) - @cached_property - def embeddings(self) -> EmbeddingsResource: - from .resources.embeddings import EmbeddingsResource - - return EmbeddingsResource(self) - @cached_property def models(self) -> ModelsResource: from .resources.models import ModelsResource @@ -237,7 +214,7 @@ def copy( params = set_default_query http_client = http_client or self._client - return self.__class__( + client = self.__class__( api_key=api_key or self.api_key, base_url=base_url or self.base_url, timeout=self.timeout if isinstance(timeout, NotGiven) else timeout, @@ -247,6 +224,8 @@ def copy( default_query=params, **_extra_kwargs, ) + client._base_url_overridden = self._base_url_overridden or base_url is not None + return client # Alias for `copy` for nicer inline usage, e.g. # client.with_options(timeout=10).foo.create(...) @@ -327,6 +306,7 @@ def __init__( if base_url is None: base_url = os.environ.get("GRADIENT_AI_BASE_URL") + self._base_url_overridden = base_url is not None if base_url is None: base_url = f"https://api.digitalocean.com/" @@ -353,12 +333,6 @@ def providers(self) -> AsyncProvidersResource: return AsyncProvidersResource(self) - @cached_property - def auth(self) -> AsyncAuthResource: - from .resources.auth import AsyncAuthResource - - return AsyncAuthResource(self) - @cached_property def regions(self) -> AsyncRegionsResource: from .resources.regions import AsyncRegionsResource @@ -383,12 +357,6 @@ def api_keys(self) -> AsyncAPIKeysResource: return AsyncAPIKeysResource(self) - @cached_property - def embeddings(self) -> AsyncEmbeddingsResource: - from .resources.embeddings import AsyncEmbeddingsResource - - return AsyncEmbeddingsResource(self) - @cached_property def models(self) -> AsyncModelsResource: from .resources.models import AsyncModelsResource @@ -459,7 +427,7 @@ def copy( params = set_default_query http_client = http_client or self._client - return self.__class__( + client = self.__class__( api_key=api_key or self.api_key, base_url=base_url or self.base_url, timeout=self.timeout if isinstance(timeout, NotGiven) else timeout, @@ -469,6 +437,8 @@ def copy( default_query=params, **_extra_kwargs, ) + client._base_url_overridden = self._base_url_overridden or base_url is not None + return client # Alias for `copy` for nicer inline usage, e.g. # client.with_options(timeout=10).foo.create(...) @@ -526,12 +496,6 @@ def providers(self) -> providers.ProvidersResourceWithRawResponse: return ProvidersResourceWithRawResponse(self._client.providers) - @cached_property - def auth(self) -> auth.AuthResourceWithRawResponse: - from .resources.auth import AuthResourceWithRawResponse - - return AuthResourceWithRawResponse(self._client.auth) - @cached_property def regions(self) -> regions.RegionsResourceWithRawResponse: from .resources.regions import RegionsResourceWithRawResponse @@ -556,12 +520,6 @@ def api_keys(self) -> api_keys.APIKeysResourceWithRawResponse: return APIKeysResourceWithRawResponse(self._client.api_keys) - @cached_property - def embeddings(self) -> embeddings.EmbeddingsResourceWithRawResponse: - from .resources.embeddings import EmbeddingsResourceWithRawResponse - - return EmbeddingsResourceWithRawResponse(self._client.embeddings) - @cached_property def models(self) -> models.ModelsResourceWithRawResponse: from .resources.models import ModelsResourceWithRawResponse @@ -587,12 +545,6 @@ def providers(self) -> providers.AsyncProvidersResourceWithRawResponse: return AsyncProvidersResourceWithRawResponse(self._client.providers) - @cached_property - def auth(self) -> auth.AsyncAuthResourceWithRawResponse: - from .resources.auth import AsyncAuthResourceWithRawResponse - - return AsyncAuthResourceWithRawResponse(self._client.auth) - @cached_property def regions(self) -> regions.AsyncRegionsResourceWithRawResponse: from .resources.regions import AsyncRegionsResourceWithRawResponse @@ -617,12 +569,6 @@ def api_keys(self) -> api_keys.AsyncAPIKeysResourceWithRawResponse: return AsyncAPIKeysResourceWithRawResponse(self._client.api_keys) - @cached_property - def embeddings(self) -> embeddings.AsyncEmbeddingsResourceWithRawResponse: - from .resources.embeddings import AsyncEmbeddingsResourceWithRawResponse - - return AsyncEmbeddingsResourceWithRawResponse(self._client.embeddings) - @cached_property def models(self) -> models.AsyncModelsResourceWithRawResponse: from .resources.models import AsyncModelsResourceWithRawResponse @@ -648,12 +594,6 @@ def providers(self) -> providers.ProvidersResourceWithStreamingResponse: return ProvidersResourceWithStreamingResponse(self._client.providers) - @cached_property - def auth(self) -> auth.AuthResourceWithStreamingResponse: - from .resources.auth import AuthResourceWithStreamingResponse - - return AuthResourceWithStreamingResponse(self._client.auth) - @cached_property def regions(self) -> regions.RegionsResourceWithStreamingResponse: from .resources.regions import RegionsResourceWithStreamingResponse @@ -678,12 +618,6 @@ def api_keys(self) -> api_keys.APIKeysResourceWithStreamingResponse: return APIKeysResourceWithStreamingResponse(self._client.api_keys) - @cached_property - def embeddings(self) -> embeddings.EmbeddingsResourceWithStreamingResponse: - from .resources.embeddings import EmbeddingsResourceWithStreamingResponse - - return EmbeddingsResourceWithStreamingResponse(self._client.embeddings) - @cached_property def models(self) -> models.ModelsResourceWithStreamingResponse: from .resources.models import ModelsResourceWithStreamingResponse @@ -709,12 +643,6 @@ def providers(self) -> providers.AsyncProvidersResourceWithStreamingResponse: return AsyncProvidersResourceWithStreamingResponse(self._client.providers) - @cached_property - def auth(self) -> auth.AsyncAuthResourceWithStreamingResponse: - from .resources.auth import AsyncAuthResourceWithStreamingResponse - - return AsyncAuthResourceWithStreamingResponse(self._client.auth) - @cached_property def regions(self) -> regions.AsyncRegionsResourceWithStreamingResponse: from .resources.regions import AsyncRegionsResourceWithStreamingResponse @@ -739,12 +667,6 @@ def api_keys(self) -> api_keys.AsyncAPIKeysResourceWithStreamingResponse: return AsyncAPIKeysResourceWithStreamingResponse(self._client.api_keys) - @cached_property - def embeddings(self) -> embeddings.AsyncEmbeddingsResourceWithStreamingResponse: - from .resources.embeddings import AsyncEmbeddingsResourceWithStreamingResponse - - return AsyncEmbeddingsResourceWithStreamingResponse(self._client.embeddings) - @cached_property def models(self) -> models.AsyncModelsResourceWithStreamingResponse: from .resources.models import AsyncModelsResourceWithStreamingResponse diff --git a/src/gradientai/resources/__init__.py b/src/gradientai/resources/__init__.py index 05417215..82f79bc7 100644 --- a/src/gradientai/resources/__init__.py +++ b/src/gradientai/resources/__init__.py @@ -1,13 +1,5 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from .auth import ( - AuthResource, - AsyncAuthResource, - AuthResourceWithRawResponse, - AsyncAuthResourceWithRawResponse, - AuthResourceWithStreamingResponse, - AsyncAuthResourceWithStreamingResponse, -) from .agents import ( AgentsResource, AsyncAgentsResource, @@ -48,14 +40,6 @@ ProvidersResourceWithStreamingResponse, AsyncProvidersResourceWithStreamingResponse, ) -from .embeddings import ( - EmbeddingsResource, - AsyncEmbeddingsResource, - EmbeddingsResourceWithRawResponse, - AsyncEmbeddingsResourceWithRawResponse, - EmbeddingsResourceWithStreamingResponse, - AsyncEmbeddingsResourceWithStreamingResponse, -) from .indexing_jobs import ( IndexingJobsResource, AsyncIndexingJobsResource, @@ -86,12 +70,6 @@ "AsyncProvidersResourceWithRawResponse", "ProvidersResourceWithStreamingResponse", "AsyncProvidersResourceWithStreamingResponse", - "AuthResource", - "AsyncAuthResource", - "AuthResourceWithRawResponse", - "AsyncAuthResourceWithRawResponse", - "AuthResourceWithStreamingResponse", - "AsyncAuthResourceWithStreamingResponse", "RegionsResource", "AsyncRegionsResource", "RegionsResourceWithRawResponse", @@ -116,12 +94,6 @@ "AsyncAPIKeysResourceWithRawResponse", "APIKeysResourceWithStreamingResponse", "AsyncAPIKeysResourceWithStreamingResponse", - "EmbeddingsResource", - "AsyncEmbeddingsResource", - "EmbeddingsResourceWithRawResponse", - "AsyncEmbeddingsResourceWithRawResponse", - "EmbeddingsResourceWithStreamingResponse", - "AsyncEmbeddingsResourceWithStreamingResponse", "ModelsResource", "AsyncModelsResource", "ModelsResourceWithRawResponse", diff --git a/src/gradientai/resources/agents/agents.py b/src/gradientai/resources/agents/agents.py index 036abf75..78439d33 100644 --- a/src/gradientai/resources/agents/agents.py +++ b/src/gradientai/resources/agents/agents.py @@ -159,7 +159,9 @@ def create( timeout: Override the client-level default timeout for this request, in seconds """ return self._post( - "/v2/gen-ai/agents", + "/v2/gen-ai/agents" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/agents", body=maybe_transform( { "anthropic_key_uuid": anthropic_key_uuid, @@ -209,7 +211,9 @@ def retrieve( if not uuid: raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") return self._get( - f"/v2/gen-ai/agents/{uuid}", + f"/v2/gen-ai/agents/{uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -278,7 +282,9 @@ def update( if not path_uuid: raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") return self._put( - f"/v2/gen-ai/agents/{path_uuid}", + f"/v2/gen-ai/agents/{path_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_uuid}", body=maybe_transform( { "anthropic_key_uuid": anthropic_key_uuid, @@ -337,7 +343,9 @@ def list( timeout: Override the client-level default timeout for this request, in seconds """ return self._get( - "/v2/gen-ai/agents", + "/v2/gen-ai/agents" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/agents", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -381,7 +389,9 @@ def delete( if not uuid: raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") return self._delete( - f"/v2/gen-ai/agents/{uuid}", + f"/v2/gen-ai/agents/{uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -418,7 +428,9 @@ def update_status( if not path_uuid: raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") return self._put( - f"/v2/gen-ai/agents/{path_uuid}/deployment_visibility", + f"/v2/gen-ai/agents/{path_uuid}/deployment_visibility" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_uuid}/deployment_visibility", body=maybe_transform( { "body_uuid": body_uuid, @@ -515,7 +527,9 @@ async def create( timeout: Override the client-level default timeout for this request, in seconds """ return await self._post( - "/v2/gen-ai/agents", + "/v2/gen-ai/agents" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/agents", body=await async_maybe_transform( { "anthropic_key_uuid": anthropic_key_uuid, @@ -565,7 +579,9 @@ async def retrieve( if not uuid: raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") return await self._get( - f"/v2/gen-ai/agents/{uuid}", + f"/v2/gen-ai/agents/{uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -634,7 +650,9 @@ async def update( if not path_uuid: raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") return await self._put( - f"/v2/gen-ai/agents/{path_uuid}", + f"/v2/gen-ai/agents/{path_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_uuid}", body=await async_maybe_transform( { "anthropic_key_uuid": anthropic_key_uuid, @@ -693,7 +711,9 @@ async def list( timeout: Override the client-level default timeout for this request, in seconds """ return await self._get( - "/v2/gen-ai/agents", + "/v2/gen-ai/agents" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/agents", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -737,7 +757,9 @@ async def delete( if not uuid: raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") return await self._delete( - f"/v2/gen-ai/agents/{uuid}", + f"/v2/gen-ai/agents/{uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -774,7 +796,9 @@ async def update_status( if not path_uuid: raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") return await self._put( - f"/v2/gen-ai/agents/{path_uuid}/deployment_visibility", + f"/v2/gen-ai/agents/{path_uuid}/deployment_visibility" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_uuid}/deployment_visibility", body=await async_maybe_transform( { "body_uuid": body_uuid, diff --git a/src/gradientai/resources/agents/api_keys.py b/src/gradientai/resources/agents/api_keys.py index 4470850c..155e3adc 100644 --- a/src/gradientai/resources/agents/api_keys.py +++ b/src/gradientai/resources/agents/api_keys.py @@ -74,7 +74,9 @@ def create( if not path_agent_uuid: raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") return self._post( - f"/v2/gen-ai/agents/{path_agent_uuid}/api_keys", + f"/v2/gen-ai/agents/{path_agent_uuid}/api_keys" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_agent_uuid}/api_keys", body=maybe_transform( { "body_agent_uuid": body_agent_uuid, @@ -121,7 +123,9 @@ def update( if not path_api_key_uuid: raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") return self._put( - f"/v2/gen-ai/agents/{path_agent_uuid}/api_keys/{path_api_key_uuid}", + f"/v2/gen-ai/agents/{path_agent_uuid}/api_keys/{path_api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_agent_uuid}/api_keys/{path_api_key_uuid}", body=maybe_transform( { "body_agent_uuid": body_agent_uuid, @@ -169,7 +173,9 @@ def list( if not agent_uuid: raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") return self._get( - f"/v2/gen-ai/agents/{agent_uuid}/api_keys", + f"/v2/gen-ai/agents/{agent_uuid}/api_keys" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{agent_uuid}/api_keys", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -216,7 +222,9 @@ def delete( if not api_key_uuid: raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") return self._delete( - f"/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}", + f"/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -253,7 +261,9 @@ def regenerate( if not api_key_uuid: raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") return self._put( - f"/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}/regenerate", + f"/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}/regenerate" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}/regenerate", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -310,7 +320,9 @@ async def create( if not path_agent_uuid: raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") return await self._post( - f"/v2/gen-ai/agents/{path_agent_uuid}/api_keys", + f"/v2/gen-ai/agents/{path_agent_uuid}/api_keys" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_agent_uuid}/api_keys", body=await async_maybe_transform( { "body_agent_uuid": body_agent_uuid, @@ -357,7 +369,9 @@ async def update( if not path_api_key_uuid: raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") return await self._put( - f"/v2/gen-ai/agents/{path_agent_uuid}/api_keys/{path_api_key_uuid}", + f"/v2/gen-ai/agents/{path_agent_uuid}/api_keys/{path_api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_agent_uuid}/api_keys/{path_api_key_uuid}", body=await async_maybe_transform( { "body_agent_uuid": body_agent_uuid, @@ -405,7 +419,9 @@ async def list( if not agent_uuid: raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") return await self._get( - f"/v2/gen-ai/agents/{agent_uuid}/api_keys", + f"/v2/gen-ai/agents/{agent_uuid}/api_keys" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{agent_uuid}/api_keys", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -452,7 +468,9 @@ async def delete( if not api_key_uuid: raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") return await self._delete( - f"/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}", + f"/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -489,7 +507,9 @@ async def regenerate( if not api_key_uuid: raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") return await self._put( - f"/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}/regenerate", + f"/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}/regenerate" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}/regenerate", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), diff --git a/src/gradientai/resources/agents/child_agents.py b/src/gradientai/resources/agents/child_agents.py index 163e52cf..9031d8ce 100644 --- a/src/gradientai/resources/agents/child_agents.py +++ b/src/gradientai/resources/agents/child_agents.py @@ -85,7 +85,9 @@ def update( f"Expected a non-empty value for `path_child_agent_uuid` but received {path_child_agent_uuid!r}" ) return self._put( - f"/v2/gen-ai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}", + f"/v2/gen-ai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}", body=maybe_transform( { "body_child_agent_uuid": body_child_agent_uuid, @@ -132,7 +134,9 @@ def delete( if not child_agent_uuid: raise ValueError(f"Expected a non-empty value for `child_agent_uuid` but received {child_agent_uuid!r}") return self._delete( - f"/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}", + f"/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -179,7 +183,9 @@ def add( f"Expected a non-empty value for `path_child_agent_uuid` but received {path_child_agent_uuid!r}" ) return self._post( - f"/v2/gen-ai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}", + f"/v2/gen-ai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}", body=maybe_transform( { "body_child_agent_uuid": body_child_agent_uuid, @@ -222,7 +228,9 @@ def view( if not uuid: raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") return self._get( - f"/v2/gen-ai/agents/{uuid}/child_agents", + f"/v2/gen-ai/agents/{uuid}/child_agents" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{uuid}/child_agents", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -291,7 +299,9 @@ async def update( f"Expected a non-empty value for `path_child_agent_uuid` but received {path_child_agent_uuid!r}" ) return await self._put( - f"/v2/gen-ai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}", + f"/v2/gen-ai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}", body=await async_maybe_transform( { "body_child_agent_uuid": body_child_agent_uuid, @@ -338,7 +348,9 @@ async def delete( if not child_agent_uuid: raise ValueError(f"Expected a non-empty value for `child_agent_uuid` but received {child_agent_uuid!r}") return await self._delete( - f"/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}", + f"/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -385,7 +397,9 @@ async def add( f"Expected a non-empty value for `path_child_agent_uuid` but received {path_child_agent_uuid!r}" ) return await self._post( - f"/v2/gen-ai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}", + f"/v2/gen-ai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_parent_agent_uuid}/child_agents/{path_child_agent_uuid}", body=await async_maybe_transform( { "body_child_agent_uuid": body_child_agent_uuid, @@ -428,7 +442,9 @@ async def view( if not uuid: raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") return await self._get( - f"/v2/gen-ai/agents/{uuid}/child_agents", + f"/v2/gen-ai/agents/{uuid}/child_agents" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{uuid}/child_agents", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), diff --git a/src/gradientai/resources/agents/functions.py b/src/gradientai/resources/agents/functions.py index 19c63d8c..67a811cc 100644 --- a/src/gradientai/resources/agents/functions.py +++ b/src/gradientai/resources/agents/functions.py @@ -77,7 +77,9 @@ def create( if not path_agent_uuid: raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") return self._post( - f"/v2/gen-ai/agents/{path_agent_uuid}/functions", + f"/v2/gen-ai/agents/{path_agent_uuid}/functions" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_agent_uuid}/functions", body=maybe_transform( { "body_agent_uuid": body_agent_uuid, @@ -134,7 +136,9 @@ def update( if not path_function_uuid: raise ValueError(f"Expected a non-empty value for `path_function_uuid` but received {path_function_uuid!r}") return self._put( - f"/v2/gen-ai/agents/{path_agent_uuid}/functions/{path_function_uuid}", + f"/v2/gen-ai/agents/{path_agent_uuid}/functions/{path_function_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_agent_uuid}/functions/{path_function_uuid}", body=maybe_transform( { "body_agent_uuid": body_agent_uuid, @@ -184,7 +188,9 @@ def delete( if not function_uuid: raise ValueError(f"Expected a non-empty value for `function_uuid` but received {function_uuid!r}") return self._delete( - f"/v2/gen-ai/agents/{agent_uuid}/functions/{function_uuid}", + f"/v2/gen-ai/agents/{agent_uuid}/functions/{function_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{agent_uuid}/functions/{function_uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -246,7 +252,9 @@ async def create( if not path_agent_uuid: raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") return await self._post( - f"/v2/gen-ai/agents/{path_agent_uuid}/functions", + f"/v2/gen-ai/agents/{path_agent_uuid}/functions" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_agent_uuid}/functions", body=await async_maybe_transform( { "body_agent_uuid": body_agent_uuid, @@ -303,7 +311,9 @@ async def update( if not path_function_uuid: raise ValueError(f"Expected a non-empty value for `path_function_uuid` but received {path_function_uuid!r}") return await self._put( - f"/v2/gen-ai/agents/{path_agent_uuid}/functions/{path_function_uuid}", + f"/v2/gen-ai/agents/{path_agent_uuid}/functions/{path_function_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_agent_uuid}/functions/{path_function_uuid}", body=await async_maybe_transform( { "body_agent_uuid": body_agent_uuid, @@ -353,7 +363,9 @@ async def delete( if not function_uuid: raise ValueError(f"Expected a non-empty value for `function_uuid` but received {function_uuid!r}") return await self._delete( - f"/v2/gen-ai/agents/{agent_uuid}/functions/{function_uuid}", + f"/v2/gen-ai/agents/{agent_uuid}/functions/{function_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{agent_uuid}/functions/{function_uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), diff --git a/src/gradientai/resources/agents/knowledge_bases.py b/src/gradientai/resources/agents/knowledge_bases.py index a400c56a..3b9b0cd2 100644 --- a/src/gradientai/resources/agents/knowledge_bases.py +++ b/src/gradientai/resources/agents/knowledge_bases.py @@ -67,7 +67,9 @@ def attach( if not agent_uuid: raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") return self._post( - f"/v2/gen-ai/agents/{agent_uuid}/knowledge_bases", + f"/v2/gen-ai/agents/{agent_uuid}/knowledge_bases" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{agent_uuid}/knowledge_bases", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -106,7 +108,9 @@ def attach_single( f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" ) return self._post( - f"/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}", + f"/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -145,7 +149,9 @@ def detach( f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" ) return self._delete( - f"/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}", + f"/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -200,7 +206,9 @@ async def attach( if not agent_uuid: raise ValueError(f"Expected a non-empty value for `agent_uuid` but received {agent_uuid!r}") return await self._post( - f"/v2/gen-ai/agents/{agent_uuid}/knowledge_bases", + f"/v2/gen-ai/agents/{agent_uuid}/knowledge_bases" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{agent_uuid}/knowledge_bases", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -239,7 +247,9 @@ async def attach_single( f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" ) return await self._post( - f"/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}", + f"/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -278,7 +288,9 @@ async def detach( f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" ) return await self._delete( - f"/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}", + f"/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{agent_uuid}/knowledge_bases/{knowledge_base_uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), diff --git a/src/gradientai/resources/agents/versions.py b/src/gradientai/resources/agents/versions.py index e77a252b..86dbf99f 100644 --- a/src/gradientai/resources/agents/versions.py +++ b/src/gradientai/resources/agents/versions.py @@ -71,7 +71,9 @@ def update( if not path_uuid: raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") return self._put( - f"/v2/gen-ai/agents/{path_uuid}/versions", + f"/v2/gen-ai/agents/{path_uuid}/versions" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_uuid}/versions", body=maybe_transform( { "body_uuid": body_uuid, @@ -118,7 +120,9 @@ def list( if not uuid: raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") return self._get( - f"/v2/gen-ai/agents/{uuid}/versions", + f"/v2/gen-ai/agents/{uuid}/versions" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{uuid}/versions", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -185,7 +189,9 @@ async def update( if not path_uuid: raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") return await self._put( - f"/v2/gen-ai/agents/{path_uuid}/versions", + f"/v2/gen-ai/agents/{path_uuid}/versions" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{path_uuid}/versions", body=await async_maybe_transform( { "body_uuid": body_uuid, @@ -232,7 +238,9 @@ async def list( if not uuid: raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") return await self._get( - f"/v2/gen-ai/agents/{uuid}/versions", + f"/v2/gen-ai/agents/{uuid}/versions" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/agents/{uuid}/versions", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, diff --git a/src/gradientai/resources/api_keys/api_keys.py b/src/gradientai/resources/api_keys/api_keys.py index 355cea17..ee94a02d 100644 --- a/src/gradientai/resources/api_keys/api_keys.py +++ b/src/gradientai/resources/api_keys/api_keys.py @@ -104,7 +104,9 @@ def list( timeout: Override the client-level default timeout for this request, in seconds """ return self._get( - "/v2/gen-ai/models", + "/v2/gen-ai/models" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/models", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -203,7 +205,9 @@ async def list( timeout: Override the client-level default timeout for this request, in seconds """ return await self._get( - "/v2/gen-ai/models", + "/v2/gen-ai/models" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/models", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, diff --git a/src/gradientai/resources/api_keys/api_keys_.py b/src/gradientai/resources/api_keys/api_keys_.py index 03d70150..7bea219b 100644 --- a/src/gradientai/resources/api_keys/api_keys_.py +++ b/src/gradientai/resources/api_keys/api_keys_.py @@ -69,7 +69,9 @@ def create( timeout: Override the client-level default timeout for this request, in seconds """ return self._post( - "/v2/gen-ai/models/api_keys", + "/v2/gen-ai/models/api_keys" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/models/api_keys", body=maybe_transform({"name": name}, api_key_create_params.APIKeyCreateParams), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout @@ -106,7 +108,9 @@ def update( if not path_api_key_uuid: raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") return self._put( - f"/v2/gen-ai/models/api_keys/{path_api_key_uuid}", + f"/v2/gen-ai/models/api_keys/{path_api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/models/api_keys/{path_api_key_uuid}", body=maybe_transform( { "body_api_key_uuid": body_api_key_uuid, @@ -149,7 +153,9 @@ def list( timeout: Override the client-level default timeout for this request, in seconds """ return self._get( - "/v2/gen-ai/models/api_keys", + "/v2/gen-ai/models/api_keys" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/models/api_keys", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -193,7 +199,9 @@ def delete( if not api_key_uuid: raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") return self._delete( - f"/v2/gen-ai/models/api_keys/{api_key_uuid}", + f"/v2/gen-ai/models/api_keys/{api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/models/api_keys/{api_key_uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -227,7 +235,9 @@ def update_regenerate( if not api_key_uuid: raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") return self._put( - f"/v2/gen-ai/models/api_keys/{api_key_uuid}/regenerate", + f"/v2/gen-ai/models/api_keys/{api_key_uuid}/regenerate" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/models/api_keys/{api_key_uuid}/regenerate", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -279,7 +289,9 @@ async def create( timeout: Override the client-level default timeout for this request, in seconds """ return await self._post( - "/v2/gen-ai/models/api_keys", + "/v2/gen-ai/models/api_keys" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/models/api_keys", body=await async_maybe_transform({"name": name}, api_key_create_params.APIKeyCreateParams), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout @@ -316,7 +328,9 @@ async def update( if not path_api_key_uuid: raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") return await self._put( - f"/v2/gen-ai/models/api_keys/{path_api_key_uuid}", + f"/v2/gen-ai/models/api_keys/{path_api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/models/api_keys/{path_api_key_uuid}", body=await async_maybe_transform( { "body_api_key_uuid": body_api_key_uuid, @@ -359,7 +373,9 @@ async def list( timeout: Override the client-level default timeout for this request, in seconds """ return await self._get( - "/v2/gen-ai/models/api_keys", + "/v2/gen-ai/models/api_keys" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/models/api_keys", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -403,7 +419,9 @@ async def delete( if not api_key_uuid: raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") return await self._delete( - f"/v2/gen-ai/models/api_keys/{api_key_uuid}", + f"/v2/gen-ai/models/api_keys/{api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/models/api_keys/{api_key_uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -437,7 +455,9 @@ async def update_regenerate( if not api_key_uuid: raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") return await self._put( - f"/v2/gen-ai/models/api_keys/{api_key_uuid}/regenerate", + f"/v2/gen-ai/models/api_keys/{api_key_uuid}/regenerate" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/models/api_keys/{api_key_uuid}/regenerate", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), diff --git a/src/gradientai/resources/auth/__init__.py b/src/gradientai/resources/auth/__init__.py deleted file mode 100644 index 7c844a98..00000000 --- a/src/gradientai/resources/auth/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .auth import ( - AuthResource, - AsyncAuthResource, - AuthResourceWithRawResponse, - AsyncAuthResourceWithRawResponse, - AuthResourceWithStreamingResponse, - AsyncAuthResourceWithStreamingResponse, -) -from .agents import ( - AgentsResource, - AsyncAgentsResource, - AgentsResourceWithRawResponse, - AsyncAgentsResourceWithRawResponse, - AgentsResourceWithStreamingResponse, - AsyncAgentsResourceWithStreamingResponse, -) - -__all__ = [ - "AgentsResource", - "AsyncAgentsResource", - "AgentsResourceWithRawResponse", - "AsyncAgentsResourceWithRawResponse", - "AgentsResourceWithStreamingResponse", - "AsyncAgentsResourceWithStreamingResponse", - "AuthResource", - "AsyncAuthResource", - "AuthResourceWithRawResponse", - "AsyncAuthResourceWithRawResponse", - "AuthResourceWithStreamingResponse", - "AsyncAuthResourceWithStreamingResponse", -] diff --git a/src/gradientai/resources/auth/agents/__init__.py b/src/gradientai/resources/auth/agents/__init__.py deleted file mode 100644 index 2972198f..00000000 --- a/src/gradientai/resources/auth/agents/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .token import ( - TokenResource, - AsyncTokenResource, - TokenResourceWithRawResponse, - AsyncTokenResourceWithRawResponse, - TokenResourceWithStreamingResponse, - AsyncTokenResourceWithStreamingResponse, -) -from .agents import ( - AgentsResource, - AsyncAgentsResource, - AgentsResourceWithRawResponse, - AsyncAgentsResourceWithRawResponse, - AgentsResourceWithStreamingResponse, - AsyncAgentsResourceWithStreamingResponse, -) - -__all__ = [ - "TokenResource", - "AsyncTokenResource", - "TokenResourceWithRawResponse", - "AsyncTokenResourceWithRawResponse", - "TokenResourceWithStreamingResponse", - "AsyncTokenResourceWithStreamingResponse", - "AgentsResource", - "AsyncAgentsResource", - "AgentsResourceWithRawResponse", - "AsyncAgentsResourceWithRawResponse", - "AgentsResourceWithStreamingResponse", - "AsyncAgentsResourceWithStreamingResponse", -] diff --git a/src/gradientai/resources/auth/agents/agents.py b/src/gradientai/resources/auth/agents/agents.py deleted file mode 100644 index a0aa9faf..00000000 --- a/src/gradientai/resources/auth/agents/agents.py +++ /dev/null @@ -1,102 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from .token import ( - TokenResource, - AsyncTokenResource, - TokenResourceWithRawResponse, - AsyncTokenResourceWithRawResponse, - TokenResourceWithStreamingResponse, - AsyncTokenResourceWithStreamingResponse, -) -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource - -__all__ = ["AgentsResource", "AsyncAgentsResource"] - - -class AgentsResource(SyncAPIResource): - @cached_property - def token(self) -> TokenResource: - return TokenResource(self._client) - - @cached_property - def with_raw_response(self) -> AgentsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AgentsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AgentsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AgentsResourceWithStreamingResponse(self) - - -class AsyncAgentsResource(AsyncAPIResource): - @cached_property - def token(self) -> AsyncTokenResource: - return AsyncTokenResource(self._client) - - @cached_property - def with_raw_response(self) -> AsyncAgentsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncAgentsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncAgentsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncAgentsResourceWithStreamingResponse(self) - - -class AgentsResourceWithRawResponse: - def __init__(self, agents: AgentsResource) -> None: - self._agents = agents - - @cached_property - def token(self) -> TokenResourceWithRawResponse: - return TokenResourceWithRawResponse(self._agents.token) - - -class AsyncAgentsResourceWithRawResponse: - def __init__(self, agents: AsyncAgentsResource) -> None: - self._agents = agents - - @cached_property - def token(self) -> AsyncTokenResourceWithRawResponse: - return AsyncTokenResourceWithRawResponse(self._agents.token) - - -class AgentsResourceWithStreamingResponse: - def __init__(self, agents: AgentsResource) -> None: - self._agents = agents - - @cached_property - def token(self) -> TokenResourceWithStreamingResponse: - return TokenResourceWithStreamingResponse(self._agents.token) - - -class AsyncAgentsResourceWithStreamingResponse: - def __init__(self, agents: AsyncAgentsResource) -> None: - self._agents = agents - - @cached_property - def token(self) -> AsyncTokenResourceWithStreamingResponse: - return AsyncTokenResourceWithStreamingResponse(self._agents.token) diff --git a/src/gradientai/resources/auth/agents/token.py b/src/gradientai/resources/auth/agents/token.py deleted file mode 100644 index f39c892d..00000000 --- a/src/gradientai/resources/auth/agents/token.py +++ /dev/null @@ -1,173 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import httpx - -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ...._utils import maybe_transform, async_maybe_transform -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource -from ...._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ...._base_client import make_request_options -from ....types.auth.agents import token_create_params -from ....types.auth.agents.token_create_response import TokenCreateResponse - -__all__ = ["TokenResource", "AsyncTokenResource"] - - -class TokenResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> TokenResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return TokenResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> TokenResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return TokenResourceWithStreamingResponse(self) - - def create( - self, - path_agent_uuid: str, - *, - body_agent_uuid: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> TokenCreateResponse: - """ - To issue an agent token, send a POST request to - `/v2/gen-ai/auth/agents/{agent_uuid}/token`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_agent_uuid: - raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") - return self._post( - f"/v2/gen-ai/auth/agents/{path_agent_uuid}/token", - body=maybe_transform({"body_agent_uuid": body_agent_uuid}, token_create_params.TokenCreateParams), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=TokenCreateResponse, - ) - - -class AsyncTokenResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncTokenResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncTokenResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncTokenResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncTokenResourceWithStreamingResponse(self) - - async def create( - self, - path_agent_uuid: str, - *, - body_agent_uuid: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> TokenCreateResponse: - """ - To issue an agent token, send a POST request to - `/v2/gen-ai/auth/agents/{agent_uuid}/token`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not path_agent_uuid: - raise ValueError(f"Expected a non-empty value for `path_agent_uuid` but received {path_agent_uuid!r}") - return await self._post( - f"/v2/gen-ai/auth/agents/{path_agent_uuid}/token", - body=await async_maybe_transform( - {"body_agent_uuid": body_agent_uuid}, token_create_params.TokenCreateParams - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=TokenCreateResponse, - ) - - -class TokenResourceWithRawResponse: - def __init__(self, token: TokenResource) -> None: - self._token = token - - self.create = to_raw_response_wrapper( - token.create, - ) - - -class AsyncTokenResourceWithRawResponse: - def __init__(self, token: AsyncTokenResource) -> None: - self._token = token - - self.create = async_to_raw_response_wrapper( - token.create, - ) - - -class TokenResourceWithStreamingResponse: - def __init__(self, token: TokenResource) -> None: - self._token = token - - self.create = to_streamed_response_wrapper( - token.create, - ) - - -class AsyncTokenResourceWithStreamingResponse: - def __init__(self, token: AsyncTokenResource) -> None: - self._token = token - - self.create = async_to_streamed_response_wrapper( - token.create, - ) diff --git a/src/gradientai/resources/auth/auth.py b/src/gradientai/resources/auth/auth.py deleted file mode 100644 index 985fc56c..00000000 --- a/src/gradientai/resources/auth/auth.py +++ /dev/null @@ -1,102 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from .agents.agents import ( - AgentsResource, - AsyncAgentsResource, - AgentsResourceWithRawResponse, - AsyncAgentsResourceWithRawResponse, - AgentsResourceWithStreamingResponse, - AsyncAgentsResourceWithStreamingResponse, -) - -__all__ = ["AuthResource", "AsyncAuthResource"] - - -class AuthResource(SyncAPIResource): - @cached_property - def agents(self) -> AgentsResource: - return AgentsResource(self._client) - - @cached_property - def with_raw_response(self) -> AuthResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AuthResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AuthResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AuthResourceWithStreamingResponse(self) - - -class AsyncAuthResource(AsyncAPIResource): - @cached_property - def agents(self) -> AsyncAgentsResource: - return AsyncAgentsResource(self._client) - - @cached_property - def with_raw_response(self) -> AsyncAuthResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncAuthResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncAuthResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncAuthResourceWithStreamingResponse(self) - - -class AuthResourceWithRawResponse: - def __init__(self, auth: AuthResource) -> None: - self._auth = auth - - @cached_property - def agents(self) -> AgentsResourceWithRawResponse: - return AgentsResourceWithRawResponse(self._auth.agents) - - -class AsyncAuthResourceWithRawResponse: - def __init__(self, auth: AsyncAuthResource) -> None: - self._auth = auth - - @cached_property - def agents(self) -> AsyncAgentsResourceWithRawResponse: - return AsyncAgentsResourceWithRawResponse(self._auth.agents) - - -class AuthResourceWithStreamingResponse: - def __init__(self, auth: AuthResource) -> None: - self._auth = auth - - @cached_property - def agents(self) -> AgentsResourceWithStreamingResponse: - return AgentsResourceWithStreamingResponse(self._auth.agents) - - -class AsyncAuthResourceWithStreamingResponse: - def __init__(self, auth: AsyncAuthResource) -> None: - self._auth = auth - - @cached_property - def agents(self) -> AsyncAgentsResourceWithStreamingResponse: - return AsyncAgentsResourceWithStreamingResponse(self._auth.agents) diff --git a/src/gradientai/resources/embeddings.py b/src/gradientai/resources/embeddings.py deleted file mode 100644 index 1bcd3145..00000000 --- a/src/gradientai/resources/embeddings.py +++ /dev/null @@ -1,201 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List, Union - -import httpx - -from ..types import embedding_create_params -from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from .._utils import maybe_transform, async_maybe_transform -from .._compat import cached_property -from .._resource import SyncAPIResource, AsyncAPIResource -from .._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from .._base_client import make_request_options -from ..types.embedding_create_response import EmbeddingCreateResponse - -__all__ = ["EmbeddingsResource", "AsyncEmbeddingsResource"] - - -class EmbeddingsResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> EmbeddingsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return EmbeddingsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> EmbeddingsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return EmbeddingsResourceWithStreamingResponse(self) - - def create( - self, - *, - input: Union[str, List[str]], - model: str, - user: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> EmbeddingCreateResponse: - """ - Creates an embedding vector representing the input text. - - Args: - input: Input text to embed, encoded as a string or array of tokens. To embed multiple - inputs in a single request, pass an array of strings. - - model: ID of the model to use. You can use the List models API to see all of your - available models. - - user: A unique identifier representing your end-user, which can help DigitalOcean to - monitor and detect abuse. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._post( - "/embeddings", - body=maybe_transform( - { - "input": input, - "model": model, - "user": user, - }, - embedding_create_params.EmbeddingCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=EmbeddingCreateResponse, - ) - - -class AsyncEmbeddingsResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncEmbeddingsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncEmbeddingsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncEmbeddingsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncEmbeddingsResourceWithStreamingResponse(self) - - async def create( - self, - *, - input: Union[str, List[str]], - model: str, - user: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> EmbeddingCreateResponse: - """ - Creates an embedding vector representing the input text. - - Args: - input: Input text to embed, encoded as a string or array of tokens. To embed multiple - inputs in a single request, pass an array of strings. - - model: ID of the model to use. You can use the List models API to see all of your - available models. - - user: A unique identifier representing your end-user, which can help DigitalOcean to - monitor and detect abuse. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._post( - "/embeddings", - body=await async_maybe_transform( - { - "input": input, - "model": model, - "user": user, - }, - embedding_create_params.EmbeddingCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=EmbeddingCreateResponse, - ) - - -class EmbeddingsResourceWithRawResponse: - def __init__(self, embeddings: EmbeddingsResource) -> None: - self._embeddings = embeddings - - self.create = to_raw_response_wrapper( - embeddings.create, - ) - - -class AsyncEmbeddingsResourceWithRawResponse: - def __init__(self, embeddings: AsyncEmbeddingsResource) -> None: - self._embeddings = embeddings - - self.create = async_to_raw_response_wrapper( - embeddings.create, - ) - - -class EmbeddingsResourceWithStreamingResponse: - def __init__(self, embeddings: EmbeddingsResource) -> None: - self._embeddings = embeddings - - self.create = to_streamed_response_wrapper( - embeddings.create, - ) - - -class AsyncEmbeddingsResourceWithStreamingResponse: - def __init__(self, embeddings: AsyncEmbeddingsResource) -> None: - self._embeddings = embeddings - - self.create = async_to_streamed_response_wrapper( - embeddings.create, - ) diff --git a/src/gradientai/resources/indexing_jobs.py b/src/gradientai/resources/indexing_jobs.py index d0b933e8..fcbcf43d 100644 --- a/src/gradientai/resources/indexing_jobs.py +++ b/src/gradientai/resources/indexing_jobs.py @@ -73,7 +73,9 @@ def create( timeout: Override the client-level default timeout for this request, in seconds """ return self._post( - "/v2/gen-ai/indexing_jobs", + "/v2/gen-ai/indexing_jobs" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/indexing_jobs", body=maybe_transform( { "data_source_uuids": data_source_uuids, @@ -114,7 +116,9 @@ def retrieve( if not uuid: raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") return self._get( - f"/v2/gen-ai/indexing_jobs/{uuid}", + f"/v2/gen-ai/indexing_jobs/{uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/indexing_jobs/{uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -151,7 +155,9 @@ def list( timeout: Override the client-level default timeout for this request, in seconds """ return self._get( - "/v2/gen-ai/indexing_jobs", + "/v2/gen-ai/indexing_jobs" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/indexing_jobs", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -195,7 +201,9 @@ def retrieve_data_sources( if not indexing_job_uuid: raise ValueError(f"Expected a non-empty value for `indexing_job_uuid` but received {indexing_job_uuid!r}") return self._get( - f"/v2/gen-ai/indexing_jobs/{indexing_job_uuid}/data_sources", + f"/v2/gen-ai/indexing_jobs/{indexing_job_uuid}/data_sources" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/indexing_jobs/{indexing_job_uuid}/data_sources", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -232,7 +240,9 @@ def update_cancel( if not path_uuid: raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") return self._put( - f"/v2/gen-ai/indexing_jobs/{path_uuid}/cancel", + f"/v2/gen-ai/indexing_jobs/{path_uuid}/cancel" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/indexing_jobs/{path_uuid}/cancel", body=maybe_transform( {"body_uuid": body_uuid}, indexing_job_update_cancel_params.IndexingJobUpdateCancelParams ), @@ -289,7 +299,9 @@ async def create( timeout: Override the client-level default timeout for this request, in seconds """ return await self._post( - "/v2/gen-ai/indexing_jobs", + "/v2/gen-ai/indexing_jobs" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/indexing_jobs", body=await async_maybe_transform( { "data_source_uuids": data_source_uuids, @@ -330,7 +342,9 @@ async def retrieve( if not uuid: raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") return await self._get( - f"/v2/gen-ai/indexing_jobs/{uuid}", + f"/v2/gen-ai/indexing_jobs/{uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/indexing_jobs/{uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -367,7 +381,9 @@ async def list( timeout: Override the client-level default timeout for this request, in seconds """ return await self._get( - "/v2/gen-ai/indexing_jobs", + "/v2/gen-ai/indexing_jobs" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/indexing_jobs", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -411,7 +427,9 @@ async def retrieve_data_sources( if not indexing_job_uuid: raise ValueError(f"Expected a non-empty value for `indexing_job_uuid` but received {indexing_job_uuid!r}") return await self._get( - f"/v2/gen-ai/indexing_jobs/{indexing_job_uuid}/data_sources", + f"/v2/gen-ai/indexing_jobs/{indexing_job_uuid}/data_sources" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/indexing_jobs/{indexing_job_uuid}/data_sources", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -448,7 +466,9 @@ async def update_cancel( if not path_uuid: raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") return await self._put( - f"/v2/gen-ai/indexing_jobs/{path_uuid}/cancel", + f"/v2/gen-ai/indexing_jobs/{path_uuid}/cancel" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/indexing_jobs/{path_uuid}/cancel", body=await async_maybe_transform( {"body_uuid": body_uuid}, indexing_job_update_cancel_params.IndexingJobUpdateCancelParams ), diff --git a/src/gradientai/resources/knowledge_bases/data_sources.py b/src/gradientai/resources/knowledge_bases/data_sources.py index 68714895..b549b3dc 100644 --- a/src/gradientai/resources/knowledge_bases/data_sources.py +++ b/src/gradientai/resources/knowledge_bases/data_sources.py @@ -81,7 +81,9 @@ def create( f"Expected a non-empty value for `path_knowledge_base_uuid` but received {path_knowledge_base_uuid!r}" ) return self._post( - f"/v2/gen-ai/knowledge_bases/{path_knowledge_base_uuid}/data_sources", + f"/v2/gen-ai/knowledge_bases/{path_knowledge_base_uuid}/data_sources" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{path_knowledge_base_uuid}/data_sources", body=maybe_transform( { "aws_data_source": aws_data_source, @@ -132,7 +134,9 @@ def list( f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" ) return self._get( - f"/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources", + f"/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -181,7 +185,9 @@ def delete( if not data_source_uuid: raise ValueError(f"Expected a non-empty value for `data_source_uuid` but received {data_source_uuid!r}") return self._delete( - f"/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources/{data_source_uuid}", + f"/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources/{data_source_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources/{data_source_uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -242,7 +248,9 @@ async def create( f"Expected a non-empty value for `path_knowledge_base_uuid` but received {path_knowledge_base_uuid!r}" ) return await self._post( - f"/v2/gen-ai/knowledge_bases/{path_knowledge_base_uuid}/data_sources", + f"/v2/gen-ai/knowledge_bases/{path_knowledge_base_uuid}/data_sources" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{path_knowledge_base_uuid}/data_sources", body=await async_maybe_transform( { "aws_data_source": aws_data_source, @@ -293,7 +301,9 @@ async def list( f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}" ) return await self._get( - f"/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources", + f"/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -342,7 +352,9 @@ async def delete( if not data_source_uuid: raise ValueError(f"Expected a non-empty value for `data_source_uuid` but received {data_source_uuid!r}") return await self._delete( - f"/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources/{data_source_uuid}", + f"/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources/{data_source_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources/{data_source_uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), diff --git a/src/gradientai/resources/knowledge_bases/knowledge_bases.py b/src/gradientai/resources/knowledge_bases/knowledge_bases.py index f73ab08c..cf0cd8d8 100644 --- a/src/gradientai/resources/knowledge_bases/knowledge_bases.py +++ b/src/gradientai/resources/knowledge_bases/knowledge_bases.py @@ -109,7 +109,9 @@ def create( timeout: Override the client-level default timeout for this request, in seconds """ return self._post( - "/v2/gen-ai/knowledge_bases", + "/v2/gen-ai/knowledge_bases" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/knowledge_bases", body=maybe_transform( { "database_id": database_id, @@ -156,7 +158,9 @@ def retrieve( if not uuid: raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") return self._get( - f"/v2/gen-ai/knowledge_bases/{uuid}", + f"/v2/gen-ai/knowledge_bases/{uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -202,7 +206,9 @@ def update( if not path_uuid: raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") return self._put( - f"/v2/gen-ai/knowledge_bases/{path_uuid}", + f"/v2/gen-ai/knowledge_bases/{path_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{path_uuid}", body=maybe_transform( { "database_id": database_id, @@ -249,7 +255,9 @@ def list( timeout: Override the client-level default timeout for this request, in seconds """ return self._get( - "/v2/gen-ai/knowledge_bases", + "/v2/gen-ai/knowledge_bases" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/knowledge_bases", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -293,7 +301,9 @@ def delete( if not uuid: raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") return self._delete( - f"/v2/gen-ai/knowledge_bases/{uuid}", + f"/v2/gen-ai/knowledge_bases/{uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -375,7 +385,9 @@ async def create( timeout: Override the client-level default timeout for this request, in seconds """ return await self._post( - "/v2/gen-ai/knowledge_bases", + "/v2/gen-ai/knowledge_bases" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/knowledge_bases", body=await async_maybe_transform( { "database_id": database_id, @@ -422,7 +434,9 @@ async def retrieve( if not uuid: raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") return await self._get( - f"/v2/gen-ai/knowledge_bases/{uuid}", + f"/v2/gen-ai/knowledge_bases/{uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -468,7 +482,9 @@ async def update( if not path_uuid: raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}") return await self._put( - f"/v2/gen-ai/knowledge_bases/{path_uuid}", + f"/v2/gen-ai/knowledge_bases/{path_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{path_uuid}", body=await async_maybe_transform( { "database_id": database_id, @@ -515,7 +531,9 @@ async def list( timeout: Override the client-level default timeout for this request, in seconds """ return await self._get( - "/v2/gen-ai/knowledge_bases", + "/v2/gen-ai/knowledge_bases" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/knowledge_bases", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -559,7 +577,9 @@ async def delete( if not uuid: raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") return await self._delete( - f"/v2/gen-ai/knowledge_bases/{uuid}", + f"/v2/gen-ai/knowledge_bases/{uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), diff --git a/src/gradientai/resources/models.py b/src/gradientai/resources/models.py index b0df90ad..d8b6b385 100644 --- a/src/gradientai/resources/models.py +++ b/src/gradientai/resources/models.py @@ -72,7 +72,9 @@ def retrieve( if not model: raise ValueError(f"Expected a non-empty value for `model` but received {model!r}") return self._get( - f"/models/{model}", + f"/models/{model}" + if self._client._base_url_overridden + else f"https://inference.do-ai.run/v1/models/{model}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -134,7 +136,9 @@ def list( timeout: Override the client-level default timeout for this request, in seconds """ return self._get( - "/v2/gen-ai/models", + "/v2/gen-ai/models" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/models", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -201,7 +205,9 @@ async def retrieve( if not model: raise ValueError(f"Expected a non-empty value for `model` but received {model!r}") return await self._get( - f"/models/{model}", + f"/models/{model}" + if self._client._base_url_overridden + else f"https://inference.do-ai.run/v1/models/{model}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -263,7 +269,9 @@ async def list( timeout: Override the client-level default timeout for this request, in seconds """ return await self._get( - "/v2/gen-ai/models", + "/v2/gen-ai/models" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/models", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, diff --git a/src/gradientai/resources/providers/anthropic/keys.py b/src/gradientai/resources/providers/anthropic/keys.py index 1b11fc99..9c1f6391 100644 --- a/src/gradientai/resources/providers/anthropic/keys.py +++ b/src/gradientai/resources/providers/anthropic/keys.py @@ -72,7 +72,9 @@ def create( timeout: Override the client-level default timeout for this request, in seconds """ return self._post( - "/v2/gen-ai/anthropic/keys", + "/v2/gen-ai/anthropic/keys" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/anthropic/keys", body=maybe_transform( { "api_key": api_key, @@ -113,7 +115,9 @@ def retrieve( if not api_key_uuid: raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") return self._get( - f"/v2/gen-ai/anthropic/keys/{api_key_uuid}", + f"/v2/gen-ai/anthropic/keys/{api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{api_key_uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -150,7 +154,9 @@ def update( if not path_api_key_uuid: raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") return self._put( - f"/v2/gen-ai/anthropic/keys/{path_api_key_uuid}", + f"/v2/gen-ai/anthropic/keys/{path_api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{path_api_key_uuid}", body=maybe_transform( { "api_key": api_key, @@ -195,7 +201,9 @@ def list( timeout: Override the client-level default timeout for this request, in seconds """ return self._get( - "/v2/gen-ai/anthropic/keys", + "/v2/gen-ai/anthropic/keys" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/anthropic/keys", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -239,7 +247,9 @@ def delete( if not api_key_uuid: raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") return self._delete( - f"/v2/gen-ai/anthropic/keys/{api_key_uuid}", + f"/v2/gen-ai/anthropic/keys/{api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{api_key_uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -278,7 +288,9 @@ def list_agents( if not uuid: raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") return self._get( - f"/v2/gen-ai/anthropic/keys/{uuid}/agents", + f"/v2/gen-ai/anthropic/keys/{uuid}/agents" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{uuid}/agents", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -342,7 +354,9 @@ async def create( timeout: Override the client-level default timeout for this request, in seconds """ return await self._post( - "/v2/gen-ai/anthropic/keys", + "/v2/gen-ai/anthropic/keys" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/anthropic/keys", body=await async_maybe_transform( { "api_key": api_key, @@ -383,7 +397,9 @@ async def retrieve( if not api_key_uuid: raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") return await self._get( - f"/v2/gen-ai/anthropic/keys/{api_key_uuid}", + f"/v2/gen-ai/anthropic/keys/{api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{api_key_uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -420,7 +436,9 @@ async def update( if not path_api_key_uuid: raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") return await self._put( - f"/v2/gen-ai/anthropic/keys/{path_api_key_uuid}", + f"/v2/gen-ai/anthropic/keys/{path_api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{path_api_key_uuid}", body=await async_maybe_transform( { "api_key": api_key, @@ -465,7 +483,9 @@ async def list( timeout: Override the client-level default timeout for this request, in seconds """ return await self._get( - "/v2/gen-ai/anthropic/keys", + "/v2/gen-ai/anthropic/keys" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/anthropic/keys", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -509,7 +529,9 @@ async def delete( if not api_key_uuid: raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") return await self._delete( - f"/v2/gen-ai/anthropic/keys/{api_key_uuid}", + f"/v2/gen-ai/anthropic/keys/{api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{api_key_uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -548,7 +570,9 @@ async def list_agents( if not uuid: raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") return await self._get( - f"/v2/gen-ai/anthropic/keys/{uuid}/agents", + f"/v2/gen-ai/anthropic/keys/{uuid}/agents" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{uuid}/agents", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, diff --git a/src/gradientai/resources/providers/openai/keys.py b/src/gradientai/resources/providers/openai/keys.py index abcb22f0..9bfaba8e 100644 --- a/src/gradientai/resources/providers/openai/keys.py +++ b/src/gradientai/resources/providers/openai/keys.py @@ -71,7 +71,9 @@ def create( timeout: Override the client-level default timeout for this request, in seconds """ return self._post( - "/v2/gen-ai/openai/keys", + "/v2/gen-ai/openai/keys" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/openai/keys", body=maybe_transform( { "api_key": api_key, @@ -112,7 +114,9 @@ def retrieve( if not api_key_uuid: raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") return self._get( - f"/v2/gen-ai/openai/keys/{api_key_uuid}", + f"/v2/gen-ai/openai/keys/{api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{api_key_uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -149,7 +153,9 @@ def update( if not path_api_key_uuid: raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") return self._put( - f"/v2/gen-ai/openai/keys/{path_api_key_uuid}", + f"/v2/gen-ai/openai/keys/{path_api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{path_api_key_uuid}", body=maybe_transform( { "api_key": api_key, @@ -193,7 +199,9 @@ def list( timeout: Override the client-level default timeout for this request, in seconds """ return self._get( - "/v2/gen-ai/openai/keys", + "/v2/gen-ai/openai/keys" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/openai/keys", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -237,7 +245,9 @@ def delete( if not api_key_uuid: raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") return self._delete( - f"/v2/gen-ai/openai/keys/{api_key_uuid}", + f"/v2/gen-ai/openai/keys/{api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{api_key_uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -276,7 +286,9 @@ def retrieve_agents( if not uuid: raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") return self._get( - f"/v2/gen-ai/openai/keys/{uuid}/agents", + f"/v2/gen-ai/openai/keys/{uuid}/agents" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{uuid}/agents", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -339,7 +351,9 @@ async def create( timeout: Override the client-level default timeout for this request, in seconds """ return await self._post( - "/v2/gen-ai/openai/keys", + "/v2/gen-ai/openai/keys" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/openai/keys", body=await async_maybe_transform( { "api_key": api_key, @@ -380,7 +394,9 @@ async def retrieve( if not api_key_uuid: raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") return await self._get( - f"/v2/gen-ai/openai/keys/{api_key_uuid}", + f"/v2/gen-ai/openai/keys/{api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{api_key_uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -417,7 +433,9 @@ async def update( if not path_api_key_uuid: raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}") return await self._put( - f"/v2/gen-ai/openai/keys/{path_api_key_uuid}", + f"/v2/gen-ai/openai/keys/{path_api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{path_api_key_uuid}", body=await async_maybe_transform( { "api_key": api_key, @@ -461,7 +479,9 @@ async def list( timeout: Override the client-level default timeout for this request, in seconds """ return await self._get( - "/v2/gen-ai/openai/keys", + "/v2/gen-ai/openai/keys" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/openai/keys", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -505,7 +525,9 @@ async def delete( if not api_key_uuid: raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}") return await self._delete( - f"/v2/gen-ai/openai/keys/{api_key_uuid}", + f"/v2/gen-ai/openai/keys/{api_key_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{api_key_uuid}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), @@ -544,7 +566,9 @@ async def retrieve_agents( if not uuid: raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}") return await self._get( - f"/v2/gen-ai/openai/keys/{uuid}/agents", + f"/v2/gen-ai/openai/keys/{uuid}/agents" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{uuid}/agents", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, diff --git a/src/gradientai/resources/regions.py b/src/gradientai/resources/regions.py index bbf07c3e..43c2038b 100644 --- a/src/gradientai/resources/regions.py +++ b/src/gradientai/resources/regions.py @@ -70,7 +70,9 @@ def list( timeout: Override the client-level default timeout for this request, in seconds """ return self._get( - "/v2/gen-ai/regions", + "/v2/gen-ai/regions" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/regions", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -137,7 +139,9 @@ async def list( timeout: Override the client-level default timeout for this request, in seconds """ return await self._get( - "/v2/gen-ai/regions", + "/v2/gen-ai/regions" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/regions", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, diff --git a/src/gradientai/types/__init__.py b/src/gradientai/types/__init__.py index a389ecab..4b12d65c 100644 --- a/src/gradientai/types/__init__.py +++ b/src/gradientai/types/__init__.py @@ -26,10 +26,8 @@ from .api_agent_api_key_info import APIAgentAPIKeyInfo as APIAgentAPIKeyInfo from .agent_retrieve_response import AgentRetrieveResponse as AgentRetrieveResponse from .api_openai_api_key_info import APIOpenAIAPIKeyInfo as APIOpenAIAPIKeyInfo -from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams from .indexing_job_list_params import IndexingJobListParams as IndexingJobListParams from .api_deployment_visibility import APIDeploymentVisibility as APIDeploymentVisibility -from .embedding_create_response import EmbeddingCreateResponse as EmbeddingCreateResponse from .agent_update_status_params import AgentUpdateStatusParams as AgentUpdateStatusParams from .api_anthropic_api_key_info import APIAnthropicAPIKeyInfo as APIAnthropicAPIKeyInfo from .indexing_job_create_params import IndexingJobCreateParams as IndexingJobCreateParams diff --git a/src/gradientai/types/api_agent.py b/src/gradientai/types/api_agent.py index d6e18ca2..0a8df679 100644 --- a/src/gradientai/types/api_agent.py +++ b/src/gradientai/types/api_agent.py @@ -261,3 +261,5 @@ class APIAgent(BaseModel): user_id: Optional[str] = None uuid: Optional[str] = None + + workspace: Optional[object] = None diff --git a/src/gradientai/types/auth/agents/__init__.py b/src/gradientai/types/auth/agents/__init__.py index 9fae55b6..f8ee8b14 100644 --- a/src/gradientai/types/auth/agents/__init__.py +++ b/src/gradientai/types/auth/agents/__init__.py @@ -1,6 +1,3 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from __future__ import annotations - -from .token_create_params import TokenCreateParams as TokenCreateParams -from .token_create_response import TokenCreateResponse as TokenCreateResponse diff --git a/src/gradientai/types/auth/agents/token_create_params.py b/src/gradientai/types/auth/agents/token_create_params.py deleted file mode 100644 index 0df640f9..00000000 --- a/src/gradientai/types/auth/agents/token_create_params.py +++ /dev/null @@ -1,13 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Annotated, TypedDict - -from ...._utils import PropertyInfo - -__all__ = ["TokenCreateParams"] - - -class TokenCreateParams(TypedDict, total=False): - body_agent_uuid: Annotated[str, PropertyInfo(alias="agent_uuid")] diff --git a/src/gradientai/types/auth/agents/token_create_response.py b/src/gradientai/types/auth/agents/token_create_response.py deleted file mode 100644 index e58b7399..00000000 --- a/src/gradientai/types/auth/agents/token_create_response.py +++ /dev/null @@ -1,13 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ...._models import BaseModel - -__all__ = ["TokenCreateResponse"] - - -class TokenCreateResponse(BaseModel): - access_token: Optional[str] = None - - refresh_token: Optional[str] = None diff --git a/src/gradientai/types/embedding_create_params.py b/src/gradientai/types/embedding_create_params.py deleted file mode 100644 index d3e923ad..00000000 --- a/src/gradientai/types/embedding_create_params.py +++ /dev/null @@ -1,28 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List, Union -from typing_extensions import Required, TypedDict - -__all__ = ["EmbeddingCreateParams"] - - -class EmbeddingCreateParams(TypedDict, total=False): - input: Required[Union[str, List[str]]] - """Input text to embed, encoded as a string or array of tokens. - - To embed multiple inputs in a single request, pass an array of strings. - """ - - model: Required[str] - """ID of the model to use. - - You can use the List models API to see all of your available models. - """ - - user: str - """ - A unique identifier representing your end-user, which can help DigitalOcean to - monitor and detect abuse. - """ diff --git a/src/gradientai/types/embedding_create_response.py b/src/gradientai/types/embedding_create_response.py deleted file mode 100644 index 19c474fd..00000000 --- a/src/gradientai/types/embedding_create_response.py +++ /dev/null @@ -1,41 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List -from typing_extensions import Literal - -from .._models import BaseModel - -__all__ = ["EmbeddingCreateResponse", "Data", "Usage"] - - -class Data(BaseModel): - embedding: List[float] - """The embedding vector, which is a list of floats.""" - - index: int - """The index of the embedding in the list of embeddings.""" - - object: Literal["embedding"] - """The object type, which is always "embedding".""" - - -class Usage(BaseModel): - prompt_tokens: int - """The number of tokens used by the prompt.""" - - total_tokens: int - """The total number of tokens used by the request.""" - - -class EmbeddingCreateResponse(BaseModel): - data: List[Data] - """The list of embeddings generated by the model.""" - - model: str - """The name of the model used to generate the embedding.""" - - object: Literal["list"] - """The object type, which is always "list".""" - - usage: Usage - """The usage information for the request.""" diff --git a/src/gradientai/types/knowledge_base_create_params.py b/src/gradientai/types/knowledge_base_create_params.py index 3a58166b..2552bcf6 100644 --- a/src/gradientai/types/knowledge_base_create_params.py +++ b/src/gradientai/types/knowledge_base_create_params.py @@ -9,7 +9,7 @@ from .knowledge_bases.api_file_upload_data_source_param import APIFileUploadDataSourceParam from .knowledge_bases.api_web_crawler_data_source_param import APIWebCrawlerDataSourceParam -__all__ = ["KnowledgeBaseCreateParams", "Datasource"] +__all__ = ["KnowledgeBaseCreateParams", "Datasource", "DatasourceAwsDataSource"] class KnowledgeBaseCreateParams(TypedDict, total=False): @@ -49,7 +49,21 @@ class KnowledgeBaseCreateParams(TypedDict, total=False): vpc_uuid: str +class DatasourceAwsDataSource(TypedDict, total=False): + bucket_name: str + + item_path: str + + key_id: str + + region: str + + secret_key: str + + class Datasource(TypedDict, total=False): + aws_data_source: DatasourceAwsDataSource + bucket_name: str bucket_region: str diff --git a/src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py b/src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py index df1cd3bb..57080aaa 100644 --- a/src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py +++ b/src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py @@ -9,10 +9,20 @@ from .api_file_upload_data_source import APIFileUploadDataSource from .api_web_crawler_data_source import APIWebCrawlerDataSource -__all__ = ["APIKnowledgeBaseDataSource"] +__all__ = ["APIKnowledgeBaseDataSource", "AwsDataSource"] + + +class AwsDataSource(BaseModel): + bucket_name: Optional[str] = None + + item_path: Optional[str] = None + + region: Optional[str] = None class APIKnowledgeBaseDataSource(BaseModel): + aws_data_source: Optional[AwsDataSource] = None + bucket_name: Optional[str] = None created_at: Optional[datetime] = None diff --git a/tests/api_resources/auth/__init__.py b/tests/api_resources/auth/__init__.py deleted file mode 100644 index fd8019a9..00000000 --- a/tests/api_resources/auth/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/auth/agents/__init__.py b/tests/api_resources/auth/agents/__init__.py deleted file mode 100644 index fd8019a9..00000000 --- a/tests/api_resources/auth/agents/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/auth/agents/test_token.py b/tests/api_resources/auth/agents/test_token.py deleted file mode 100644 index ef721cd0..00000000 --- a/tests/api_resources/auth/agents/test_token.py +++ /dev/null @@ -1,124 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from gradientai import GradientAI, AsyncGradientAI -from tests.utils import assert_matches_type -from gradientai.types.auth.agents import TokenCreateResponse - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestToken: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create(self, client: GradientAI) -> None: - token = client.auth.agents.token.create( - path_agent_uuid="agent_uuid", - ) - assert_matches_type(TokenCreateResponse, token, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params(self, client: GradientAI) -> None: - token = client.auth.agents.token.create( - path_agent_uuid="agent_uuid", - body_agent_uuid="agent_uuid", - ) - assert_matches_type(TokenCreateResponse, token, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create(self, client: GradientAI) -> None: - response = client.auth.agents.token.with_raw_response.create( - path_agent_uuid="agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - token = response.parse() - assert_matches_type(TokenCreateResponse, token, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create(self, client: GradientAI) -> None: - with client.auth.agents.token.with_streaming_response.create( - path_agent_uuid="agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - token = response.parse() - assert_matches_type(TokenCreateResponse, token, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_create(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - client.auth.agents.token.with_raw_response.create( - path_agent_uuid="", - ) - - -class TestAsyncToken: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create(self, async_client: AsyncGradientAI) -> None: - token = await async_client.auth.agents.token.create( - path_agent_uuid="agent_uuid", - ) - assert_matches_type(TokenCreateResponse, token, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: - token = await async_client.auth.agents.token.create( - path_agent_uuid="agent_uuid", - body_agent_uuid="agent_uuid", - ) - assert_matches_type(TokenCreateResponse, token, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: - response = await async_client.auth.agents.token.with_raw_response.create( - path_agent_uuid="agent_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - token = await response.parse() - assert_matches_type(TokenCreateResponse, token, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: - async with async_client.auth.agents.token.with_streaming_response.create( - path_agent_uuid="agent_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - token = await response.parse() - assert_matches_type(TokenCreateResponse, token, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_create(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - await async_client.auth.agents.token.with_raw_response.create( - path_agent_uuid="", - ) diff --git a/tests/api_resources/test_embeddings.py b/tests/api_resources/test_embeddings.py deleted file mode 100644 index e5b394ef..00000000 --- a/tests/api_resources/test_embeddings.py +++ /dev/null @@ -1,116 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from gradientai import GradientAI, AsyncGradientAI -from tests.utils import assert_matches_type -from gradientai.types import EmbeddingCreateResponse - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestEmbeddings: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create(self, client: GradientAI) -> None: - embedding = client.embeddings.create( - input="The quick brown fox jumped over the lazy dog", - model="text-embedding-3-small", - ) - assert_matches_type(EmbeddingCreateResponse, embedding, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params(self, client: GradientAI) -> None: - embedding = client.embeddings.create( - input="The quick brown fox jumped over the lazy dog", - model="text-embedding-3-small", - user="user-1234", - ) - assert_matches_type(EmbeddingCreateResponse, embedding, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create(self, client: GradientAI) -> None: - response = client.embeddings.with_raw_response.create( - input="The quick brown fox jumped over the lazy dog", - model="text-embedding-3-small", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - embedding = response.parse() - assert_matches_type(EmbeddingCreateResponse, embedding, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create(self, client: GradientAI) -> None: - with client.embeddings.with_streaming_response.create( - input="The quick brown fox jumped over the lazy dog", - model="text-embedding-3-small", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - embedding = response.parse() - assert_matches_type(EmbeddingCreateResponse, embedding, path=["response"]) - - assert cast(Any, response.is_closed) is True - - -class TestAsyncEmbeddings: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create(self, async_client: AsyncGradientAI) -> None: - embedding = await async_client.embeddings.create( - input="The quick brown fox jumped over the lazy dog", - model="text-embedding-3-small", - ) - assert_matches_type(EmbeddingCreateResponse, embedding, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: - embedding = await async_client.embeddings.create( - input="The quick brown fox jumped over the lazy dog", - model="text-embedding-3-small", - user="user-1234", - ) - assert_matches_type(EmbeddingCreateResponse, embedding, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: - response = await async_client.embeddings.with_raw_response.create( - input="The quick brown fox jumped over the lazy dog", - model="text-embedding-3-small", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - embedding = await response.parse() - assert_matches_type(EmbeddingCreateResponse, embedding, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: - async with async_client.embeddings.with_streaming_response.create( - input="The quick brown fox jumped over the lazy dog", - model="text-embedding-3-small", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - embedding = await response.parse() - assert_matches_type(EmbeddingCreateResponse, embedding, path=["response"]) - - assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_knowledge_bases.py b/tests/api_resources/test_knowledge_bases.py index e204f9fe..c9171644 100644 --- a/tests/api_resources/test_knowledge_bases.py +++ b/tests/api_resources/test_knowledge_bases.py @@ -36,6 +36,13 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: database_id="database_id", datasources=[ { + "aws_data_source": { + "bucket_name": "bucket_name", + "item_path": "item_path", + "key_id": "key_id", + "region": "region", + "secret_key": "secret_key", + }, "bucket_name": "bucket_name", "bucket_region": "bucket_region", "file_upload_data_source": { @@ -281,6 +288,13 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI database_id="database_id", datasources=[ { + "aws_data_source": { + "bucket_name": "bucket_name", + "item_path": "item_path", + "key_id": "key_id", + "region": "region", + "secret_key": "secret_key", + }, "bucket_name": "bucket_name", "bucket_region": "bucket_region", "file_upload_data_source": { From e5c8d768388b16c06fcc2abee71a53dcc8b3e8c5 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 18 Jun 2025 11:35:24 +0000 Subject: [PATCH 20/41] feat(api): update via SDK Studio --- .stats.yml | 4 +- api.md | 8 +- src/gradientai/_client.py | 39 +- src/gradientai/resources/__init__.py | 14 + src/gradientai/resources/chat/__init__.py | 33 ++ src/gradientai/resources/chat/chat.py | 102 +++++ src/gradientai/resources/chat/completions.py | 385 ++++++++++++++++++ src/gradientai/types/auth/agents/__init__.py | 3 - src/gradientai/types/chat/__init__.py | 6 + .../types/chat/completion_create_params.py | 185 +++++++++ .../types/chat/completion_create_response.py | 190 +++++++++ .../api_resources/chat}/__init__.py | 2 - tests/api_resources/chat/test_completions.py | 184 +++++++++ 13 files changed, 1146 insertions(+), 9 deletions(-) create mode 100644 src/gradientai/resources/chat/__init__.py create mode 100644 src/gradientai/resources/chat/chat.py create mode 100644 src/gradientai/resources/chat/completions.py delete mode 100644 src/gradientai/types/auth/agents/__init__.py create mode 100644 src/gradientai/types/chat/__init__.py create mode 100644 src/gradientai/types/chat/completion_create_params.py create mode 100644 src/gradientai/types/chat/completion_create_response.py rename {src/gradientai/types/auth => tests/api_resources/chat}/__init__.py (70%) create mode 100644 tests/api_resources/chat/test_completions.py diff --git a/.stats.yml b/.stats.yml index 9eb9eab4..bd458f47 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 56 +configured_endpoints: 57 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 -config_hash: cfd2d18e8dfe7223b15ce9b204cef29e +config_hash: c880014064b4d19b42254d47f1bb2758 diff --git a/api.md b/api.md index 278862d0..ed9acf78 100644 --- a/api.md +++ b/api.md @@ -287,12 +287,18 @@ Methods: # Chat +## Completions + Types: ```python -from gradientai.types import ChatCompletionTokenLogprob +from gradientai.types.chat import CompletionCreateResponse ``` +Methods: + +- client.chat.completions.create(\*\*params) -> CompletionCreateResponse + # Models Types: diff --git a/src/gradientai/_client.py b/src/gradientai/_client.py index e050112e..6927ff10 100644 --- a/src/gradientai/_client.py +++ b/src/gradientai/_client.py @@ -31,9 +31,10 @@ ) if TYPE_CHECKING: - from .resources import agents, models, regions, api_keys, providers, indexing_jobs, knowledge_bases + from .resources import chat, agents, models, regions, api_keys, providers, indexing_jobs, knowledge_bases from .resources.models import ModelsResource, AsyncModelsResource from .resources.regions import RegionsResource, AsyncRegionsResource + from .resources.chat.chat import ChatResource, AsyncChatResource from .resources.agents.agents import AgentsResource, AsyncAgentsResource from .resources.indexing_jobs import IndexingJobsResource, AsyncIndexingJobsResource from .resources.api_keys.api_keys import APIKeysResource, AsyncAPIKeysResource @@ -144,6 +145,12 @@ def api_keys(self) -> APIKeysResource: return APIKeysResource(self) + @cached_property + def chat(self) -> ChatResource: + from .resources.chat import ChatResource + + return ChatResource(self) + @cached_property def models(self) -> ModelsResource: from .resources.models import ModelsResource @@ -357,6 +364,12 @@ def api_keys(self) -> AsyncAPIKeysResource: return AsyncAPIKeysResource(self) + @cached_property + def chat(self) -> AsyncChatResource: + from .resources.chat import AsyncChatResource + + return AsyncChatResource(self) + @cached_property def models(self) -> AsyncModelsResource: from .resources.models import AsyncModelsResource @@ -520,6 +533,12 @@ def api_keys(self) -> api_keys.APIKeysResourceWithRawResponse: return APIKeysResourceWithRawResponse(self._client.api_keys) + @cached_property + def chat(self) -> chat.ChatResourceWithRawResponse: + from .resources.chat import ChatResourceWithRawResponse + + return ChatResourceWithRawResponse(self._client.chat) + @cached_property def models(self) -> models.ModelsResourceWithRawResponse: from .resources.models import ModelsResourceWithRawResponse @@ -569,6 +588,12 @@ def api_keys(self) -> api_keys.AsyncAPIKeysResourceWithRawResponse: return AsyncAPIKeysResourceWithRawResponse(self._client.api_keys) + @cached_property + def chat(self) -> chat.AsyncChatResourceWithRawResponse: + from .resources.chat import AsyncChatResourceWithRawResponse + + return AsyncChatResourceWithRawResponse(self._client.chat) + @cached_property def models(self) -> models.AsyncModelsResourceWithRawResponse: from .resources.models import AsyncModelsResourceWithRawResponse @@ -618,6 +643,12 @@ def api_keys(self) -> api_keys.APIKeysResourceWithStreamingResponse: return APIKeysResourceWithStreamingResponse(self._client.api_keys) + @cached_property + def chat(self) -> chat.ChatResourceWithStreamingResponse: + from .resources.chat import ChatResourceWithStreamingResponse + + return ChatResourceWithStreamingResponse(self._client.chat) + @cached_property def models(self) -> models.ModelsResourceWithStreamingResponse: from .resources.models import ModelsResourceWithStreamingResponse @@ -667,6 +698,12 @@ def api_keys(self) -> api_keys.AsyncAPIKeysResourceWithStreamingResponse: return AsyncAPIKeysResourceWithStreamingResponse(self._client.api_keys) + @cached_property + def chat(self) -> chat.AsyncChatResourceWithStreamingResponse: + from .resources.chat import AsyncChatResourceWithStreamingResponse + + return AsyncChatResourceWithStreamingResponse(self._client.chat) + @cached_property def models(self) -> models.AsyncModelsResourceWithStreamingResponse: from .resources.models import AsyncModelsResourceWithStreamingResponse diff --git a/src/gradientai/resources/__init__.py b/src/gradientai/resources/__init__.py index 82f79bc7..de26662c 100644 --- a/src/gradientai/resources/__init__.py +++ b/src/gradientai/resources/__init__.py @@ -1,5 +1,13 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +from .chat import ( + ChatResource, + AsyncChatResource, + ChatResourceWithRawResponse, + AsyncChatResourceWithRawResponse, + ChatResourceWithStreamingResponse, + AsyncChatResourceWithStreamingResponse, +) from .agents import ( AgentsResource, AsyncAgentsResource, @@ -94,6 +102,12 @@ "AsyncAPIKeysResourceWithRawResponse", "APIKeysResourceWithStreamingResponse", "AsyncAPIKeysResourceWithStreamingResponse", + "ChatResource", + "AsyncChatResource", + "ChatResourceWithRawResponse", + "AsyncChatResourceWithRawResponse", + "ChatResourceWithStreamingResponse", + "AsyncChatResourceWithStreamingResponse", "ModelsResource", "AsyncModelsResource", "ModelsResourceWithRawResponse", diff --git a/src/gradientai/resources/chat/__init__.py b/src/gradientai/resources/chat/__init__.py new file mode 100644 index 00000000..ec960eb4 --- /dev/null +++ b/src/gradientai/resources/chat/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .chat import ( + ChatResource, + AsyncChatResource, + ChatResourceWithRawResponse, + AsyncChatResourceWithRawResponse, + ChatResourceWithStreamingResponse, + AsyncChatResourceWithStreamingResponse, +) +from .completions import ( + CompletionsResource, + AsyncCompletionsResource, + CompletionsResourceWithRawResponse, + AsyncCompletionsResourceWithRawResponse, + CompletionsResourceWithStreamingResponse, + AsyncCompletionsResourceWithStreamingResponse, +) + +__all__ = [ + "CompletionsResource", + "AsyncCompletionsResource", + "CompletionsResourceWithRawResponse", + "AsyncCompletionsResourceWithRawResponse", + "CompletionsResourceWithStreamingResponse", + "AsyncCompletionsResourceWithStreamingResponse", + "ChatResource", + "AsyncChatResource", + "ChatResourceWithRawResponse", + "AsyncChatResourceWithRawResponse", + "ChatResourceWithStreamingResponse", + "AsyncChatResourceWithStreamingResponse", +] diff --git a/src/gradientai/resources/chat/chat.py b/src/gradientai/resources/chat/chat.py new file mode 100644 index 00000000..ac19d849 --- /dev/null +++ b/src/gradientai/resources/chat/chat.py @@ -0,0 +1,102 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from .completions import ( + CompletionsResource, + AsyncCompletionsResource, + CompletionsResourceWithRawResponse, + AsyncCompletionsResourceWithRawResponse, + CompletionsResourceWithStreamingResponse, + AsyncCompletionsResourceWithStreamingResponse, +) + +__all__ = ["ChatResource", "AsyncChatResource"] + + +class ChatResource(SyncAPIResource): + @cached_property + def completions(self) -> CompletionsResource: + return CompletionsResource(self._client) + + @cached_property + def with_raw_response(self) -> ChatResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + """ + return ChatResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ChatResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + """ + return ChatResourceWithStreamingResponse(self) + + +class AsyncChatResource(AsyncAPIResource): + @cached_property + def completions(self) -> AsyncCompletionsResource: + return AsyncCompletionsResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncChatResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + """ + return AsyncChatResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncChatResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + """ + return AsyncChatResourceWithStreamingResponse(self) + + +class ChatResourceWithRawResponse: + def __init__(self, chat: ChatResource) -> None: + self._chat = chat + + @cached_property + def completions(self) -> CompletionsResourceWithRawResponse: + return CompletionsResourceWithRawResponse(self._chat.completions) + + +class AsyncChatResourceWithRawResponse: + def __init__(self, chat: AsyncChatResource) -> None: + self._chat = chat + + @cached_property + def completions(self) -> AsyncCompletionsResourceWithRawResponse: + return AsyncCompletionsResourceWithRawResponse(self._chat.completions) + + +class ChatResourceWithStreamingResponse: + def __init__(self, chat: ChatResource) -> None: + self._chat = chat + + @cached_property + def completions(self) -> CompletionsResourceWithStreamingResponse: + return CompletionsResourceWithStreamingResponse(self._chat.completions) + + +class AsyncChatResourceWithStreamingResponse: + def __init__(self, chat: AsyncChatResource) -> None: + self._chat = chat + + @cached_property + def completions(self) -> AsyncCompletionsResourceWithStreamingResponse: + return AsyncCompletionsResourceWithStreamingResponse(self._chat.completions) diff --git a/src/gradientai/resources/chat/completions.py b/src/gradientai/resources/chat/completions.py new file mode 100644 index 00000000..62ab8f0d --- /dev/null +++ b/src/gradientai/resources/chat/completions.py @@ -0,0 +1,385 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, List, Union, Iterable, Optional + +import httpx + +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...types.chat import completion_create_params +from ..._base_client import make_request_options +from ...types.chat.completion_create_response import CompletionCreateResponse + +__all__ = ["CompletionsResource", "AsyncCompletionsResource"] + + +class CompletionsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> CompletionsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + """ + return CompletionsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> CompletionsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + """ + return CompletionsResourceWithStreamingResponse(self) + + def create( + self, + *, + messages: Iterable[completion_create_params.Message], + model: str, + frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, + logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, + logprobs: Optional[bool] | NotGiven = NOT_GIVEN, + max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, + max_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + stream: Optional[bool] | NotGiven = NOT_GIVEN, + stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, + temperature: Optional[float] | NotGiven = NOT_GIVEN, + top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, + top_p: Optional[float] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> CompletionCreateResponse: + """ + Creates a model response for the given chat conversation. + + Args: + messages: A list of messages comprising the conversation so far. + + model: Model ID used to generate the response. + + frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their + existing frequency in the text so far, decreasing the model's likelihood to + repeat the same line verbatim. + + logit_bias: Modify the likelihood of specified tokens appearing in the completion. + + Accepts a JSON object that maps tokens (specified by their token ID in the + tokenizer) to an associated bias value from -100 to 100. Mathematically, the + bias is added to the logits generated by the model prior to sampling. The exact + effect will vary per model, but values between -1 and 1 should decrease or + increase likelihood of selection; values like -100 or 100 should result in a ban + or exclusive selection of the relevant token. + + logprobs: Whether to return log probabilities of the output tokens or not. If true, + returns the log probabilities of each output token returned in the `content` of + `message`. + + max_completion_tokens: The maximum number of completion tokens that may be used over the course of the + run. The run will make a best effort to use only the number of completion tokens + specified, across multiple turns of the run. + + max_tokens: The maximum number of tokens that can be generated in the completion. + + The token count of your prompt plus `max_tokens` cannot exceed the model's + context length. + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + n: How many chat completion choices to generate for each input message. Note that + you will be charged based on the number of generated tokens across all of the + choices. Keep `n` as `1` to minimize costs. + + presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on + whether they appear in the text so far, increasing the model's likelihood to + talk about new topics. + + stop: Up to 4 sequences where the API will stop generating further tokens. The + returned text will not contain the stop sequence. + + stream: If set to true, the model response data will be streamed to the client as it is + generated using server-sent events. + + stream_options: Options for streaming response. Only set this when you set `stream: true`. + + temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + make the output more random, while lower values like 0.2 will make it more + focused and deterministic. We generally recommend altering this or `top_p` but + not both. + + top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to + return at each token position, each with an associated log probability. + `logprobs` must be set to `true` if this parameter is used. + + top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + + user: A unique identifier representing your end-user, which can help DigitalOcean to + monitor and detect abuse. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/chat/completions" + if self._client._base_url_overridden + else "https://inference.do-ai.run/v1/chat/completions", + body=maybe_transform( + { + "messages": messages, + "model": model, + "frequency_penalty": frequency_penalty, + "logit_bias": logit_bias, + "logprobs": logprobs, + "max_completion_tokens": max_completion_tokens, + "max_tokens": max_tokens, + "metadata": metadata, + "n": n, + "presence_penalty": presence_penalty, + "stop": stop, + "stream": stream, + "stream_options": stream_options, + "temperature": temperature, + "top_logprobs": top_logprobs, + "top_p": top_p, + "user": user, + }, + completion_create_params.CompletionCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=CompletionCreateResponse, + ) + + +class AsyncCompletionsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncCompletionsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + """ + return AsyncCompletionsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncCompletionsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + """ + return AsyncCompletionsResourceWithStreamingResponse(self) + + async def create( + self, + *, + messages: Iterable[completion_create_params.Message], + model: str, + frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, + logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, + logprobs: Optional[bool] | NotGiven = NOT_GIVEN, + max_completion_tokens: Optional[int] | NotGiven = NOT_GIVEN, + max_tokens: Optional[int] | NotGiven = NOT_GIVEN, + metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, + stream: Optional[bool] | NotGiven = NOT_GIVEN, + stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, + temperature: Optional[float] | NotGiven = NOT_GIVEN, + top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, + top_p: Optional[float] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> CompletionCreateResponse: + """ + Creates a model response for the given chat conversation. + + Args: + messages: A list of messages comprising the conversation so far. + + model: Model ID used to generate the response. + + frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their + existing frequency in the text so far, decreasing the model's likelihood to + repeat the same line verbatim. + + logit_bias: Modify the likelihood of specified tokens appearing in the completion. + + Accepts a JSON object that maps tokens (specified by their token ID in the + tokenizer) to an associated bias value from -100 to 100. Mathematically, the + bias is added to the logits generated by the model prior to sampling. The exact + effect will vary per model, but values between -1 and 1 should decrease or + increase likelihood of selection; values like -100 or 100 should result in a ban + or exclusive selection of the relevant token. + + logprobs: Whether to return log probabilities of the output tokens or not. If true, + returns the log probabilities of each output token returned in the `content` of + `message`. + + max_completion_tokens: The maximum number of completion tokens that may be used over the course of the + run. The run will make a best effort to use only the number of completion tokens + specified, across multiple turns of the run. + + max_tokens: The maximum number of tokens that can be generated in the completion. + + The token count of your prompt plus `max_tokens` cannot exceed the model's + context length. + + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + + n: How many chat completion choices to generate for each input message. Note that + you will be charged based on the number of generated tokens across all of the + choices. Keep `n` as `1` to minimize costs. + + presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on + whether they appear in the text so far, increasing the model's likelihood to + talk about new topics. + + stop: Up to 4 sequences where the API will stop generating further tokens. The + returned text will not contain the stop sequence. + + stream: If set to true, the model response data will be streamed to the client as it is + generated using server-sent events. + + stream_options: Options for streaming response. Only set this when you set `stream: true`. + + temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + make the output more random, while lower values like 0.2 will make it more + focused and deterministic. We generally recommend altering this or `top_p` but + not both. + + top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to + return at each token position, each with an associated log probability. + `logprobs` must be set to `true` if this parameter is used. + + top_p: An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + + user: A unique identifier representing your end-user, which can help DigitalOcean to + monitor and detect abuse. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/chat/completions" + if self._client._base_url_overridden + else "https://inference.do-ai.run/v1/chat/completions", + body=await async_maybe_transform( + { + "messages": messages, + "model": model, + "frequency_penalty": frequency_penalty, + "logit_bias": logit_bias, + "logprobs": logprobs, + "max_completion_tokens": max_completion_tokens, + "max_tokens": max_tokens, + "metadata": metadata, + "n": n, + "presence_penalty": presence_penalty, + "stop": stop, + "stream": stream, + "stream_options": stream_options, + "temperature": temperature, + "top_logprobs": top_logprobs, + "top_p": top_p, + "user": user, + }, + completion_create_params.CompletionCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=CompletionCreateResponse, + ) + + +class CompletionsResourceWithRawResponse: + def __init__(self, completions: CompletionsResource) -> None: + self._completions = completions + + self.create = to_raw_response_wrapper( + completions.create, + ) + + +class AsyncCompletionsResourceWithRawResponse: + def __init__(self, completions: AsyncCompletionsResource) -> None: + self._completions = completions + + self.create = async_to_raw_response_wrapper( + completions.create, + ) + + +class CompletionsResourceWithStreamingResponse: + def __init__(self, completions: CompletionsResource) -> None: + self._completions = completions + + self.create = to_streamed_response_wrapper( + completions.create, + ) + + +class AsyncCompletionsResourceWithStreamingResponse: + def __init__(self, completions: AsyncCompletionsResource) -> None: + self._completions = completions + + self.create = async_to_streamed_response_wrapper( + completions.create, + ) diff --git a/src/gradientai/types/auth/agents/__init__.py b/src/gradientai/types/auth/agents/__init__.py deleted file mode 100644 index f8ee8b14..00000000 --- a/src/gradientai/types/auth/agents/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations diff --git a/src/gradientai/types/chat/__init__.py b/src/gradientai/types/chat/__init__.py new file mode 100644 index 00000000..9384ac14 --- /dev/null +++ b/src/gradientai/types/chat/__init__.py @@ -0,0 +1,6 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .completion_create_params import CompletionCreateParams as CompletionCreateParams +from .completion_create_response import CompletionCreateResponse as CompletionCreateResponse diff --git a/src/gradientai/types/chat/completion_create_params.py b/src/gradientai/types/chat/completion_create_params.py new file mode 100644 index 00000000..11d032ff --- /dev/null +++ b/src/gradientai/types/chat/completion_create_params.py @@ -0,0 +1,185 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, List, Union, Iterable, Optional +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +__all__ = [ + "CompletionCreateParams", + "Message", + "MessageChatCompletionRequestSystemMessage", + "MessageChatCompletionRequestDeveloperMessage", + "MessageChatCompletionRequestUserMessage", + "MessageChatCompletionRequestAssistantMessage", + "StreamOptions", +] + + +class CompletionCreateParams(TypedDict, total=False): + messages: Required[Iterable[Message]] + """A list of messages comprising the conversation so far.""" + + model: Required[str] + """Model ID used to generate the response.""" + + frequency_penalty: Optional[float] + """Number between -2.0 and 2.0. + + Positive values penalize new tokens based on their existing frequency in the + text so far, decreasing the model's likelihood to repeat the same line verbatim. + """ + + logit_bias: Optional[Dict[str, int]] + """Modify the likelihood of specified tokens appearing in the completion. + + Accepts a JSON object that maps tokens (specified by their token ID in the + tokenizer) to an associated bias value from -100 to 100. Mathematically, the + bias is added to the logits generated by the model prior to sampling. The exact + effect will vary per model, but values between -1 and 1 should decrease or + increase likelihood of selection; values like -100 or 100 should result in a ban + or exclusive selection of the relevant token. + """ + + logprobs: Optional[bool] + """Whether to return log probabilities of the output tokens or not. + + If true, returns the log probabilities of each output token returned in the + `content` of `message`. + """ + + max_completion_tokens: Optional[int] + """ + The maximum number of completion tokens that may be used over the course of the + run. The run will make a best effort to use only the number of completion tokens + specified, across multiple turns of the run. + """ + + max_tokens: Optional[int] + """The maximum number of tokens that can be generated in the completion. + + The token count of your prompt plus `max_tokens` cannot exceed the model's + context length. + """ + + metadata: Optional[Dict[str, str]] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + n: Optional[int] + """How many chat completion choices to generate for each input message. + + Note that you will be charged based on the number of generated tokens across all + of the choices. Keep `n` as `1` to minimize costs. + """ + + presence_penalty: Optional[float] + """Number between -2.0 and 2.0. + + Positive values penalize new tokens based on whether they appear in the text so + far, increasing the model's likelihood to talk about new topics. + """ + + stop: Union[Optional[str], List[str], None] + """Up to 4 sequences where the API will stop generating further tokens. + + The returned text will not contain the stop sequence. + """ + + stream: Optional[bool] + """ + If set to true, the model response data will be streamed to the client as it is + generated using server-sent events. + """ + + stream_options: Optional[StreamOptions] + """Options for streaming response. Only set this when you set `stream: true`.""" + + temperature: Optional[float] + """What sampling temperature to use, between 0 and 2. + + Higher values like 0.8 will make the output more random, while lower values like + 0.2 will make it more focused and deterministic. We generally recommend altering + this or `top_p` but not both. + """ + + top_logprobs: Optional[int] + """ + An integer between 0 and 20 specifying the number of most likely tokens to + return at each token position, each with an associated log probability. + `logprobs` must be set to `true` if this parameter is used. + """ + + top_p: Optional[float] + """ + An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + """ + + user: str + """ + A unique identifier representing your end-user, which can help DigitalOcean to + monitor and detect abuse. + """ + + +class MessageChatCompletionRequestSystemMessage(TypedDict, total=False): + content: Required[Union[str, List[str]]] + """The contents of the system message.""" + + role: Required[Literal["system"]] + """The role of the messages author, in this case `system`.""" + + +class MessageChatCompletionRequestDeveloperMessage(TypedDict, total=False): + content: Required[Union[str, List[str]]] + """The contents of the developer message.""" + + role: Required[Literal["developer"]] + """The role of the messages author, in this case `developer`.""" + + +class MessageChatCompletionRequestUserMessage(TypedDict, total=False): + content: Required[Union[str, List[str]]] + """The contents of the user message.""" + + role: Required[Literal["user"]] + """The role of the messages author, in this case `user`.""" + + +class MessageChatCompletionRequestAssistantMessage(TypedDict, total=False): + role: Required[Literal["assistant"]] + """The role of the messages author, in this case `assistant`.""" + + content: Union[str, List[str], None] + """The contents of the assistant message.""" + + +Message: TypeAlias = Union[ + MessageChatCompletionRequestSystemMessage, + MessageChatCompletionRequestDeveloperMessage, + MessageChatCompletionRequestUserMessage, + MessageChatCompletionRequestAssistantMessage, +] + + +class StreamOptions(TypedDict, total=False): + include_usage: bool + """If set, an additional chunk will be streamed before the `data: [DONE]` message. + + The `usage` field on this chunk shows the token usage statistics for the entire + request, and the `choices` field will always be an empty array. + + All other chunks will also include a `usage` field, but with a null value. + **NOTE:** If the stream is interrupted, you may not receive the final usage + chunk which contains the total token usage for the request. + """ diff --git a/src/gradientai/types/chat/completion_create_response.py b/src/gradientai/types/chat/completion_create_response.py new file mode 100644 index 00000000..5a25ac7c --- /dev/null +++ b/src/gradientai/types/chat/completion_create_response.py @@ -0,0 +1,190 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = [ + "CompletionCreateResponse", + "Choice", + "ChoiceLogprobs", + "ChoiceLogprobsContent", + "ChoiceLogprobsContentTopLogprob", + "ChoiceLogprobsRefusal", + "ChoiceLogprobsRefusalTopLogprob", + "ChoiceMessage", + "Usage", +] + + +class ChoiceLogprobsContentTopLogprob(BaseModel): + token: str + """The token.""" + + bytes: Optional[List[int]] = None + """A list of integers representing the UTF-8 bytes representation of the token. + + Useful in instances where characters are represented by multiple tokens and + their byte representations must be combined to generate the correct text + representation. Can be `null` if there is no bytes representation for the token. + """ + + logprob: float + """The log probability of this token, if it is within the top 20 most likely + tokens. + + Otherwise, the value `-9999.0` is used to signify that the token is very + unlikely. + """ + + +class ChoiceLogprobsContent(BaseModel): + token: str + """The token.""" + + bytes: Optional[List[int]] = None + """A list of integers representing the UTF-8 bytes representation of the token. + + Useful in instances where characters are represented by multiple tokens and + their byte representations must be combined to generate the correct text + representation. Can be `null` if there is no bytes representation for the token. + """ + + logprob: float + """The log probability of this token, if it is within the top 20 most likely + tokens. + + Otherwise, the value `-9999.0` is used to signify that the token is very + unlikely. + """ + + top_logprobs: List[ChoiceLogprobsContentTopLogprob] + """List of the most likely tokens and their log probability, at this token + position. + + In rare cases, there may be fewer than the number of requested `top_logprobs` + returned. + """ + + +class ChoiceLogprobsRefusalTopLogprob(BaseModel): + token: str + """The token.""" + + bytes: Optional[List[int]] = None + """A list of integers representing the UTF-8 bytes representation of the token. + + Useful in instances where characters are represented by multiple tokens and + their byte representations must be combined to generate the correct text + representation. Can be `null` if there is no bytes representation for the token. + """ + + logprob: float + """The log probability of this token, if it is within the top 20 most likely + tokens. + + Otherwise, the value `-9999.0` is used to signify that the token is very + unlikely. + """ + + +class ChoiceLogprobsRefusal(BaseModel): + token: str + """The token.""" + + bytes: Optional[List[int]] = None + """A list of integers representing the UTF-8 bytes representation of the token. + + Useful in instances where characters are represented by multiple tokens and + their byte representations must be combined to generate the correct text + representation. Can be `null` if there is no bytes representation for the token. + """ + + logprob: float + """The log probability of this token, if it is within the top 20 most likely + tokens. + + Otherwise, the value `-9999.0` is used to signify that the token is very + unlikely. + """ + + top_logprobs: List[ChoiceLogprobsRefusalTopLogprob] + """List of the most likely tokens and their log probability, at this token + position. + + In rare cases, there may be fewer than the number of requested `top_logprobs` + returned. + """ + + +class ChoiceLogprobs(BaseModel): + content: Optional[List[ChoiceLogprobsContent]] = None + """A list of message content tokens with log probability information.""" + + refusal: Optional[List[ChoiceLogprobsRefusal]] = None + """A list of message refusal tokens with log probability information.""" + + +class ChoiceMessage(BaseModel): + content: Optional[str] = None + """The contents of the message.""" + + refusal: Optional[str] = None + """The refusal message generated by the model.""" + + role: Literal["assistant"] + """The role of the author of this message.""" + + +class Choice(BaseModel): + finish_reason: Literal["stop", "length"] + """The reason the model stopped generating tokens. + + This will be `stop` if the model hit a natural stop point or a provided stop + sequence, or `length` if the maximum number of tokens specified in the request + was reached. + """ + + index: int + """The index of the choice in the list of choices.""" + + logprobs: Optional[ChoiceLogprobs] = None + """Log probability information for the choice.""" + + message: ChoiceMessage + """A chat completion message generated by the model.""" + + +class Usage(BaseModel): + completion_tokens: int + """Number of tokens in the generated completion.""" + + prompt_tokens: int + """Number of tokens in the prompt.""" + + total_tokens: int + """Total number of tokens used in the request (prompt + completion).""" + + +class CompletionCreateResponse(BaseModel): + id: str + """A unique identifier for the chat completion.""" + + choices: List[Choice] + """A list of chat completion choices. + + Can be more than one if `n` is greater than 1. + """ + + created: int + """The Unix timestamp (in seconds) of when the chat completion was created.""" + + model: str + """The model used for the chat completion.""" + + object: Literal["chat.completion"] + """The object type, which is always `chat.completion`.""" + + usage: Optional[Usage] = None + """Usage statistics for the completion request.""" diff --git a/src/gradientai/types/auth/__init__.py b/tests/api_resources/chat/__init__.py similarity index 70% rename from src/gradientai/types/auth/__init__.py rename to tests/api_resources/chat/__init__.py index f8ee8b14..fd8019a9 100644 --- a/src/gradientai/types/auth/__init__.py +++ b/tests/api_resources/chat/__init__.py @@ -1,3 +1 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py new file mode 100644 index 00000000..17319d86 --- /dev/null +++ b/tests/api_resources/chat/test_completions.py @@ -0,0 +1,184 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.chat import CompletionCreateResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestCompletions: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: GradientAI) -> None: + completion = client.chat.completions.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + ) + assert_matches_type(CompletionCreateResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: GradientAI) -> None: + completion = client.chat.completions.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + frequency_penalty=-2, + logit_bias={"foo": 0}, + logprobs=True, + max_completion_tokens=256, + max_tokens=0, + metadata={"foo": "string"}, + n=1, + presence_penalty=-2, + stop="\n", + stream=True, + stream_options={"include_usage": True}, + temperature=1, + top_logprobs=0, + top_p=1, + user="user-1234", + ) + assert_matches_type(CompletionCreateResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: GradientAI) -> None: + response = client.chat.completions.with_raw_response.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + completion = response.parse() + assert_matches_type(CompletionCreateResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: GradientAI) -> None: + with client.chat.completions.with_streaming_response.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + completion = response.parse() + assert_matches_type(CompletionCreateResponse, completion, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncCompletions: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncGradientAI) -> None: + completion = await async_client.chat.completions.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + ) + assert_matches_type(CompletionCreateResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + completion = await async_client.chat.completions.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + frequency_penalty=-2, + logit_bias={"foo": 0}, + logprobs=True, + max_completion_tokens=256, + max_tokens=0, + metadata={"foo": "string"}, + n=1, + presence_penalty=-2, + stop="\n", + stream=True, + stream_options={"include_usage": True}, + temperature=1, + top_logprobs=0, + top_p=1, + user="user-1234", + ) + assert_matches_type(CompletionCreateResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + response = await async_client.chat.completions.with_raw_response.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + completion = await response.parse() + assert_matches_type(CompletionCreateResponse, completion, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async with async_client.chat.completions.with_streaming_response.create( + messages=[ + { + "content": "string", + "role": "system", + } + ], + model="llama3-8b-instruct", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + completion = await response.parse() + assert_matches_type(CompletionCreateResponse, completion, path=["response"]) + + assert cast(Any, response.is_closed) is True From aa2610afe7da79429e05bff64b4796de7f525681 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 18 Jun 2025 23:07:22 +0000 Subject: [PATCH 21/41] feat(api): update via SDK Studio --- .stats.yml | 4 +- api.md | 25 +- src/gradientai/_client.py | 2 +- .../{api_keys/api_keys_.py => api_keys.py} | 24 +- src/gradientai/resources/api_keys/__init__.py | 19 - src/gradientai/resources/api_keys/api_keys.py | 279 ----------- src/gradientai/resources/models.py | 85 ---- src/gradientai/types/__init__.py | 10 +- src/gradientai/types/api_agreement.py | 17 - .../{api_keys => }/api_key_create_params.py | 0 .../{api_keys => }/api_key_create_response.py | 2 +- .../{api_keys => }/api_key_delete_response.py | 2 +- src/gradientai/types/api_key_list_params.py | 29 +- src/gradientai/types/api_key_list_response.py | 32 +- .../{api_keys => }/api_key_update_params.py | 2 +- .../api_key_update_regenerate_response.py | 2 +- .../{api_keys => }/api_key_update_response.py | 2 +- src/gradientai/types/api_keys/__init__.py | 13 - .../types/api_keys/api_key_list_params.py | 15 - .../types/api_keys/api_key_list_response.py | 18 - src/gradientai/types/api_model.py | 26 +- .../{api_keys => }/api_model_api_key_info.py | 2 +- src/gradientai/types/api_model_version.py | 15 - src/gradientai/types/model.py | 21 - src/gradientai/types/model_list_response.py | 26 +- tests/api_resources/api_keys/__init__.py | 1 - .../api_resources/api_keys/test_api_keys_.py | 446 ------------------ tests/api_resources/test_api_keys.py | 356 +++++++++++++- tests/api_resources/test_models.py | 86 +--- 29 files changed, 433 insertions(+), 1128 deletions(-) rename src/gradientai/resources/{api_keys/api_keys_.py => api_keys.py} (96%) delete mode 100644 src/gradientai/resources/api_keys/__init__.py delete mode 100644 src/gradientai/resources/api_keys/api_keys.py delete mode 100644 src/gradientai/types/api_agreement.py rename src/gradientai/types/{api_keys => }/api_key_create_params.py (100%) rename src/gradientai/types/{api_keys => }/api_key_create_response.py (90%) rename src/gradientai/types/{api_keys => }/api_key_delete_response.py (90%) rename src/gradientai/types/{api_keys => }/api_key_update_params.py (90%) rename src/gradientai/types/{api_keys => }/api_key_update_regenerate_response.py (90%) rename src/gradientai/types/{api_keys => }/api_key_update_response.py (90%) delete mode 100644 src/gradientai/types/api_keys/__init__.py delete mode 100644 src/gradientai/types/api_keys/api_key_list_params.py delete mode 100644 src/gradientai/types/api_keys/api_key_list_response.py rename src/gradientai/types/{api_keys => }/api_model_api_key_info.py (93%) delete mode 100644 src/gradientai/types/api_model_version.py delete mode 100644 src/gradientai/types/model.py delete mode 100644 tests/api_resources/api_keys/__init__.py delete mode 100644 tests/api_resources/api_keys/test_api_keys_.py diff --git a/.stats.yml b/.stats.yml index bd458f47..01cc76ec 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 57 +configured_endpoints: 56 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 -config_hash: c880014064b4d19b42254d47f1bb2758 +config_hash: 6abb2ff94db8b1b61321606275ba3e80 diff --git a/api.md b/api.md index ed9acf78..bac163c8 100644 --- a/api.md +++ b/api.md @@ -255,19 +255,7 @@ Methods: Types: ```python -from gradientai.types import APIAgreement, APIModelVersion, APIKeyListResponse -``` - -Methods: - -- client.api_keys.list(\*\*params) -> APIKeyListResponse - -## APIKeys - -Types: - -```python -from gradientai.types.api_keys import ( +from gradientai.types import ( APIModelAPIKeyInfo, APIKeyCreateResponse, APIKeyUpdateResponse, @@ -279,11 +267,11 @@ from gradientai.types.api_keys import ( Methods: -- client.api*keys.api_keys.create(\*\*params) -> APIKeyCreateResponse -- client.api*keys.api_keys.update(path_api_key_uuid, \*\*params) -> APIKeyUpdateResponse -- client.api*keys.api_keys.list(\*\*params) -> APIKeyListResponse -- client.api*keys.api_keys.delete(api_key_uuid) -> APIKeyDeleteResponse -- client.api*keys.api_keys.update_regenerate(api_key_uuid) -> APIKeyUpdateRegenerateResponse +- client.api_keys.create(\*\*params) -> APIKeyCreateResponse +- client.api_keys.update(path_api_key_uuid, \*\*params) -> APIKeyUpdateResponse +- client.api_keys.list(\*\*params) -> APIKeyListResponse +- client.api_keys.delete(api_key_uuid) -> APIKeyDeleteResponse +- client.api_keys.update_regenerate(api_key_uuid) -> APIKeyUpdateRegenerateResponse # Chat @@ -309,5 +297,4 @@ from gradientai.types import Model, ModelListResponse Methods: -- client.models.retrieve(model) -> Model - client.models.list(\*\*params) -> ModelListResponse diff --git a/src/gradientai/_client.py b/src/gradientai/_client.py index 6927ff10..bec52a23 100644 --- a/src/gradientai/_client.py +++ b/src/gradientai/_client.py @@ -34,10 +34,10 @@ from .resources import chat, agents, models, regions, api_keys, providers, indexing_jobs, knowledge_bases from .resources.models import ModelsResource, AsyncModelsResource from .resources.regions import RegionsResource, AsyncRegionsResource + from .resources.api_keys import APIKeysResource, AsyncAPIKeysResource from .resources.chat.chat import ChatResource, AsyncChatResource from .resources.agents.agents import AgentsResource, AsyncAgentsResource from .resources.indexing_jobs import IndexingJobsResource, AsyncIndexingJobsResource - from .resources.api_keys.api_keys import APIKeysResource, AsyncAPIKeysResource from .resources.providers.providers import ProvidersResource, AsyncProvidersResource from .resources.knowledge_bases.knowledge_bases import KnowledgeBasesResource, AsyncKnowledgeBasesResource diff --git a/src/gradientai/resources/api_keys/api_keys_.py b/src/gradientai/resources/api_keys.py similarity index 96% rename from src/gradientai/resources/api_keys/api_keys_.py rename to src/gradientai/resources/api_keys.py index 7bea219b..be1e346b 100644 --- a/src/gradientai/resources/api_keys/api_keys_.py +++ b/src/gradientai/resources/api_keys.py @@ -4,23 +4,23 @@ import httpx -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._utils import maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( +from ..types import api_key_list_params, api_key_create_params, api_key_update_params +from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from .._utils import maybe_transform, async_maybe_transform +from .._compat import cached_property +from .._resource import SyncAPIResource, AsyncAPIResource +from .._response import ( to_raw_response_wrapper, to_streamed_response_wrapper, async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) -from ..._base_client import make_request_options -from ...types.api_keys import api_key_list_params, api_key_create_params, api_key_update_params -from ...types.api_keys.api_key_list_response import APIKeyListResponse -from ...types.api_keys.api_key_create_response import APIKeyCreateResponse -from ...types.api_keys.api_key_delete_response import APIKeyDeleteResponse -from ...types.api_keys.api_key_update_response import APIKeyUpdateResponse -from ...types.api_keys.api_key_update_regenerate_response import APIKeyUpdateRegenerateResponse +from .._base_client import make_request_options +from ..types.api_key_list_response import APIKeyListResponse +from ..types.api_key_create_response import APIKeyCreateResponse +from ..types.api_key_delete_response import APIKeyDeleteResponse +from ..types.api_key_update_response import APIKeyUpdateResponse +from ..types.api_key_update_regenerate_response import APIKeyUpdateRegenerateResponse __all__ = ["APIKeysResource", "AsyncAPIKeysResource"] diff --git a/src/gradientai/resources/api_keys/__init__.py b/src/gradientai/resources/api_keys/__init__.py deleted file mode 100644 index ed14565c..00000000 --- a/src/gradientai/resources/api_keys/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .api_keys import ( - APIKeysResource, - AsyncAPIKeysResource, - APIKeysResourceWithRawResponse, - AsyncAPIKeysResourceWithRawResponse, - APIKeysResourceWithStreamingResponse, - AsyncAPIKeysResourceWithStreamingResponse, -) - -__all__ = [ - "APIKeysResource", - "AsyncAPIKeysResource", - "APIKeysResourceWithRawResponse", - "AsyncAPIKeysResourceWithRawResponse", - "APIKeysResourceWithStreamingResponse", - "AsyncAPIKeysResourceWithStreamingResponse", -] diff --git a/src/gradientai/resources/api_keys/api_keys.py b/src/gradientai/resources/api_keys/api_keys.py deleted file mode 100644 index ee94a02d..00000000 --- a/src/gradientai/resources/api_keys/api_keys.py +++ /dev/null @@ -1,279 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import Literal - -import httpx - -from . import api_keys_ as api_keys -from ...types import api_key_list_params -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._utils import maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ..._base_client import make_request_options -from ...types.api_key_list_response import APIKeyListResponse - -__all__ = ["APIKeysResource", "AsyncAPIKeysResource"] - - -class APIKeysResource(SyncAPIResource): - @cached_property - def api_keys(self) -> api_keys.APIKeysResource: - return api_keys.APIKeysResource(self._client) - - @cached_property - def with_raw_response(self) -> APIKeysResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return APIKeysResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> APIKeysResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return APIKeysResourceWithStreamingResponse(self) - - def list( - self, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - public_only: bool | NotGiven = NOT_GIVEN, - usecases: List[ - Literal[ - "MODEL_USECASE_UNKNOWN", - "MODEL_USECASE_AGENT", - "MODEL_USECASE_FINETUNED", - "MODEL_USECASE_KNOWLEDGEBASE", - "MODEL_USECASE_GUARDRAIL", - "MODEL_USECASE_REASONING", - "MODEL_USECASE_SERVERLESS", - ] - ] - | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyListResponse: - """ - To list all models, send a GET request to `/v2/gen-ai/models`. - - Args: - page: page number. - - per_page: items per page. - - public_only: only include models that are publicly available. - - usecases: include only models defined for the listed usecases. - - - MODEL_USECASE_UNKNOWN: The use case of the model is unknown - - MODEL_USECASE_AGENT: The model maybe used in an agent - - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning - - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases - (embedding models) - - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails - - MODEL_USECASE_REASONING: The model usecase for reasoning - - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/v2/gen-ai/models" - if self._client._base_url_overridden - else "https://api.digitalocean.com/v2/gen-ai/models", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - "public_only": public_only, - "usecases": usecases, - }, - api_key_list_params.APIKeyListParams, - ), - ), - cast_to=APIKeyListResponse, - ) - - -class AsyncAPIKeysResource(AsyncAPIResource): - @cached_property - def api_keys(self) -> api_keys.AsyncAPIKeysResource: - return api_keys.AsyncAPIKeysResource(self._client) - - @cached_property - def with_raw_response(self) -> AsyncAPIKeysResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers - """ - return AsyncAPIKeysResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncAPIKeysResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response - """ - return AsyncAPIKeysResourceWithStreamingResponse(self) - - async def list( - self, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - public_only: bool | NotGiven = NOT_GIVEN, - usecases: List[ - Literal[ - "MODEL_USECASE_UNKNOWN", - "MODEL_USECASE_AGENT", - "MODEL_USECASE_FINETUNED", - "MODEL_USECASE_KNOWLEDGEBASE", - "MODEL_USECASE_GUARDRAIL", - "MODEL_USECASE_REASONING", - "MODEL_USECASE_SERVERLESS", - ] - ] - | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> APIKeyListResponse: - """ - To list all models, send a GET request to `/v2/gen-ai/models`. - - Args: - page: page number. - - per_page: items per page. - - public_only: only include models that are publicly available. - - usecases: include only models defined for the listed usecases. - - - MODEL_USECASE_UNKNOWN: The use case of the model is unknown - - MODEL_USECASE_AGENT: The model maybe used in an agent - - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning - - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases - (embedding models) - - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails - - MODEL_USECASE_REASONING: The model usecase for reasoning - - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/v2/gen-ai/models" - if self._client._base_url_overridden - else "https://api.digitalocean.com/v2/gen-ai/models", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - "public_only": public_only, - "usecases": usecases, - }, - api_key_list_params.APIKeyListParams, - ), - ), - cast_to=APIKeyListResponse, - ) - - -class APIKeysResourceWithRawResponse: - def __init__(self, api_keys: APIKeysResource) -> None: - self._api_keys = api_keys - - self.list = to_raw_response_wrapper( - api_keys.list, - ) - - @cached_property - def api_keys(self) -> api_keys.APIKeysResourceWithRawResponse: - return api_keys.APIKeysResourceWithRawResponse(self._api_keys.api_keys) - - -class AsyncAPIKeysResourceWithRawResponse: - def __init__(self, api_keys: AsyncAPIKeysResource) -> None: - self._api_keys = api_keys - - self.list = async_to_raw_response_wrapper( - api_keys.list, - ) - - @cached_property - def api_keys(self) -> api_keys.AsyncAPIKeysResourceWithRawResponse: - return api_keys.AsyncAPIKeysResourceWithRawResponse(self._api_keys.api_keys) - - -class APIKeysResourceWithStreamingResponse: - def __init__(self, api_keys: APIKeysResource) -> None: - self._api_keys = api_keys - - self.list = to_streamed_response_wrapper( - api_keys.list, - ) - - @cached_property - def api_keys(self) -> api_keys.APIKeysResourceWithStreamingResponse: - return api_keys.APIKeysResourceWithStreamingResponse(self._api_keys.api_keys) - - -class AsyncAPIKeysResourceWithStreamingResponse: - def __init__(self, api_keys: AsyncAPIKeysResource) -> None: - self._api_keys = api_keys - - self.list = async_to_streamed_response_wrapper( - api_keys.list, - ) - - @cached_property - def api_keys(self) -> api_keys.AsyncAPIKeysResourceWithStreamingResponse: - return api_keys.AsyncAPIKeysResourceWithStreamingResponse(self._api_keys.api_keys) diff --git a/src/gradientai/resources/models.py b/src/gradientai/resources/models.py index d8b6b385..2c7b40ab 100644 --- a/src/gradientai/resources/models.py +++ b/src/gradientai/resources/models.py @@ -18,7 +18,6 @@ async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) -from ..types.model import Model from .._base_client import make_request_options from ..types.model_list_response import ModelListResponse @@ -45,42 +44,6 @@ def with_streaming_response(self) -> ModelsResourceWithStreamingResponse: """ return ModelsResourceWithStreamingResponse(self) - def retrieve( - self, - model: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Model: - """ - Retrieves a model instance, providing basic information about the model such as - the owner and permissioning. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not model: - raise ValueError(f"Expected a non-empty value for `model` but received {model!r}") - return self._get( - f"/models/{model}" - if self._client._base_url_overridden - else f"https://inference.do-ai.run/v1/models/{model}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Model, - ) - def list( self, *, @@ -178,42 +141,6 @@ def with_streaming_response(self) -> AsyncModelsResourceWithStreamingResponse: """ return AsyncModelsResourceWithStreamingResponse(self) - async def retrieve( - self, - model: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Model: - """ - Retrieves a model instance, providing basic information about the model such as - the owner and permissioning. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not model: - raise ValueError(f"Expected a non-empty value for `model` but received {model!r}") - return await self._get( - f"/models/{model}" - if self._client._base_url_overridden - else f"https://inference.do-ai.run/v1/models/{model}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Model, - ) - async def list( self, *, @@ -295,9 +222,6 @@ class ModelsResourceWithRawResponse: def __init__(self, models: ModelsResource) -> None: self._models = models - self.retrieve = to_raw_response_wrapper( - models.retrieve, - ) self.list = to_raw_response_wrapper( models.list, ) @@ -307,9 +231,6 @@ class AsyncModelsResourceWithRawResponse: def __init__(self, models: AsyncModelsResource) -> None: self._models = models - self.retrieve = async_to_raw_response_wrapper( - models.retrieve, - ) self.list = async_to_raw_response_wrapper( models.list, ) @@ -319,9 +240,6 @@ class ModelsResourceWithStreamingResponse: def __init__(self, models: ModelsResource) -> None: self._models = models - self.retrieve = to_streamed_response_wrapper( - models.retrieve, - ) self.list = to_streamed_response_wrapper( models.list, ) @@ -331,9 +249,6 @@ class AsyncModelsResourceWithStreamingResponse: def __init__(self, models: AsyncModelsResource) -> None: self._models = models - self.retrieve = async_to_streamed_response_wrapper( - models.retrieve, - ) self.list = async_to_streamed_response_wrapper( models.list, ) diff --git a/src/gradientai/types/__init__.py b/src/gradientai/types/__init__.py index 4b12d65c..ddbbe52d 100644 --- a/src/gradientai/types/__init__.py +++ b/src/gradientai/types/__init__.py @@ -2,13 +2,10 @@ from __future__ import annotations -from .model import Model as Model from .api_agent import APIAgent as APIAgent from .api_model import APIModel as APIModel -from .api_agreement import APIAgreement as APIAgreement from .api_indexing_job import APIIndexingJob as APIIndexingJob from .agent_list_params import AgentListParams as AgentListParams -from .api_model_version import APIModelVersion as APIModelVersion from .model_list_params import ModelListParams as ModelListParams from .api_knowledge_base import APIKnowledgeBase as APIKnowledgeBase from .region_list_params import RegionListParams as RegionListParams @@ -22,9 +19,15 @@ from .agent_create_response import AgentCreateResponse as AgentCreateResponse from .agent_delete_response import AgentDeleteResponse as AgentDeleteResponse from .agent_update_response import AgentUpdateResponse as AgentUpdateResponse +from .api_key_create_params import APIKeyCreateParams as APIKeyCreateParams from .api_key_list_response import APIKeyListResponse as APIKeyListResponse +from .api_key_update_params import APIKeyUpdateParams as APIKeyUpdateParams from .api_agent_api_key_info import APIAgentAPIKeyInfo as APIAgentAPIKeyInfo +from .api_model_api_key_info import APIModelAPIKeyInfo as APIModelAPIKeyInfo from .agent_retrieve_response import AgentRetrieveResponse as AgentRetrieveResponse +from .api_key_create_response import APIKeyCreateResponse as APIKeyCreateResponse +from .api_key_delete_response import APIKeyDeleteResponse as APIKeyDeleteResponse +from .api_key_update_response import APIKeyUpdateResponse as APIKeyUpdateResponse from .api_openai_api_key_info import APIOpenAIAPIKeyInfo as APIOpenAIAPIKeyInfo from .indexing_job_list_params import IndexingJobListParams as IndexingJobListParams from .api_deployment_visibility import APIDeploymentVisibility as APIDeploymentVisibility @@ -44,6 +47,7 @@ from .knowledge_base_update_response import KnowledgeBaseUpdateResponse as KnowledgeBaseUpdateResponse from .knowledge_base_retrieve_response import KnowledgeBaseRetrieveResponse as KnowledgeBaseRetrieveResponse from .indexing_job_update_cancel_params import IndexingJobUpdateCancelParams as IndexingJobUpdateCancelParams +from .api_key_update_regenerate_response import APIKeyUpdateRegenerateResponse as APIKeyUpdateRegenerateResponse from .indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse as IndexingJobUpdateCancelResponse from .indexing_job_retrieve_data_sources_response import ( IndexingJobRetrieveDataSourcesResponse as IndexingJobRetrieveDataSourcesResponse, diff --git a/src/gradientai/types/api_agreement.py b/src/gradientai/types/api_agreement.py deleted file mode 100644 index c4359f1f..00000000 --- a/src/gradientai/types/api_agreement.py +++ /dev/null @@ -1,17 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from .._models import BaseModel - -__all__ = ["APIAgreement"] - - -class APIAgreement(BaseModel): - description: Optional[str] = None - - name: Optional[str] = None - - url: Optional[str] = None - - uuid: Optional[str] = None diff --git a/src/gradientai/types/api_keys/api_key_create_params.py b/src/gradientai/types/api_key_create_params.py similarity index 100% rename from src/gradientai/types/api_keys/api_key_create_params.py rename to src/gradientai/types/api_key_create_params.py diff --git a/src/gradientai/types/api_keys/api_key_create_response.py b/src/gradientai/types/api_key_create_response.py similarity index 90% rename from src/gradientai/types/api_keys/api_key_create_response.py rename to src/gradientai/types/api_key_create_response.py index 654e9f1e..2d6024cf 100644 --- a/src/gradientai/types/api_keys/api_key_create_response.py +++ b/src/gradientai/types/api_key_create_response.py @@ -2,7 +2,7 @@ from typing import Optional -from ..._models import BaseModel +from .._models import BaseModel from .api_model_api_key_info import APIModelAPIKeyInfo __all__ = ["APIKeyCreateResponse"] diff --git a/src/gradientai/types/api_keys/api_key_delete_response.py b/src/gradientai/types/api_key_delete_response.py similarity index 90% rename from src/gradientai/types/api_keys/api_key_delete_response.py rename to src/gradientai/types/api_key_delete_response.py index 4d81d047..d65286c8 100644 --- a/src/gradientai/types/api_keys/api_key_delete_response.py +++ b/src/gradientai/types/api_key_delete_response.py @@ -2,7 +2,7 @@ from typing import Optional -from ..._models import BaseModel +from .._models import BaseModel from .api_model_api_key_info import APIModelAPIKeyInfo __all__ = ["APIKeyDeleteResponse"] diff --git a/src/gradientai/types/api_key_list_params.py b/src/gradientai/types/api_key_list_params.py index a1ab60dc..11da9398 100644 --- a/src/gradientai/types/api_key_list_params.py +++ b/src/gradientai/types/api_key_list_params.py @@ -2,8 +2,7 @@ from __future__ import annotations -from typing import List -from typing_extensions import Literal, TypedDict +from typing_extensions import TypedDict __all__ = ["APIKeyListParams"] @@ -14,29 +13,3 @@ class APIKeyListParams(TypedDict, total=False): per_page: int """items per page.""" - - public_only: bool - """only include models that are publicly available.""" - - usecases: List[ - Literal[ - "MODEL_USECASE_UNKNOWN", - "MODEL_USECASE_AGENT", - "MODEL_USECASE_FINETUNED", - "MODEL_USECASE_KNOWLEDGEBASE", - "MODEL_USECASE_GUARDRAIL", - "MODEL_USECASE_REASONING", - "MODEL_USECASE_SERVERLESS", - ] - ] - """include only models defined for the listed usecases. - - - MODEL_USECASE_UNKNOWN: The use case of the model is unknown - - MODEL_USECASE_AGENT: The model maybe used in an agent - - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning - - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases - (embedding models) - - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails - - MODEL_USECASE_REASONING: The model usecase for reasoning - - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference - """ diff --git a/src/gradientai/types/api_key_list_response.py b/src/gradientai/types/api_key_list_response.py index 360de7a4..db45102b 100644 --- a/src/gradientai/types/api_key_list_response.py +++ b/src/gradientai/types/api_key_list_response.py @@ -1,42 +1,18 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from typing import List, Optional -from datetime import datetime from .._models import BaseModel -from .api_agreement import APIAgreement from .agents.api_meta import APIMeta from .agents.api_links import APILinks -from .api_model_version import APIModelVersion +from .api_model_api_key_info import APIModelAPIKeyInfo -__all__ = ["APIKeyListResponse", "Model"] - - -class Model(BaseModel): - agreement: Optional[APIAgreement] = None - - created_at: Optional[datetime] = None - - is_foundational: Optional[bool] = None - - name: Optional[str] = None - - parent_uuid: Optional[str] = None - - updated_at: Optional[datetime] = None - - upload_complete: Optional[bool] = None - - url: Optional[str] = None - - uuid: Optional[str] = None - - version: Optional[APIModelVersion] = None +__all__ = ["APIKeyListResponse"] class APIKeyListResponse(BaseModel): + api_key_infos: Optional[List[APIModelAPIKeyInfo]] = None + links: Optional[APILinks] = None meta: Optional[APIMeta] = None - - models: Optional[List[Model]] = None diff --git a/src/gradientai/types/api_keys/api_key_update_params.py b/src/gradientai/types/api_key_update_params.py similarity index 90% rename from src/gradientai/types/api_keys/api_key_update_params.py rename to src/gradientai/types/api_key_update_params.py index 23c1c0b9..1678304f 100644 --- a/src/gradientai/types/api_keys/api_key_update_params.py +++ b/src/gradientai/types/api_key_update_params.py @@ -4,7 +4,7 @@ from typing_extensions import Annotated, TypedDict -from ..._utils import PropertyInfo +from .._utils import PropertyInfo __all__ = ["APIKeyUpdateParams"] diff --git a/src/gradientai/types/api_keys/api_key_update_regenerate_response.py b/src/gradientai/types/api_key_update_regenerate_response.py similarity index 90% rename from src/gradientai/types/api_keys/api_key_update_regenerate_response.py rename to src/gradientai/types/api_key_update_regenerate_response.py index 44a316dc..eaf19b6e 100644 --- a/src/gradientai/types/api_keys/api_key_update_regenerate_response.py +++ b/src/gradientai/types/api_key_update_regenerate_response.py @@ -2,7 +2,7 @@ from typing import Optional -from ..._models import BaseModel +from .._models import BaseModel from .api_model_api_key_info import APIModelAPIKeyInfo __all__ = ["APIKeyUpdateRegenerateResponse"] diff --git a/src/gradientai/types/api_keys/api_key_update_response.py b/src/gradientai/types/api_key_update_response.py similarity index 90% rename from src/gradientai/types/api_keys/api_key_update_response.py rename to src/gradientai/types/api_key_update_response.py index 3671addf..a8d79898 100644 --- a/src/gradientai/types/api_keys/api_key_update_response.py +++ b/src/gradientai/types/api_key_update_response.py @@ -2,7 +2,7 @@ from typing import Optional -from ..._models import BaseModel +from .._models import BaseModel from .api_model_api_key_info import APIModelAPIKeyInfo __all__ = ["APIKeyUpdateResponse"] diff --git a/src/gradientai/types/api_keys/__init__.py b/src/gradientai/types/api_keys/__init__.py deleted file mode 100644 index c3cbcd6d..00000000 --- a/src/gradientai/types/api_keys/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from .api_key_list_params import APIKeyListParams as APIKeyListParams -from .api_key_create_params import APIKeyCreateParams as APIKeyCreateParams -from .api_key_list_response import APIKeyListResponse as APIKeyListResponse -from .api_key_update_params import APIKeyUpdateParams as APIKeyUpdateParams -from .api_model_api_key_info import APIModelAPIKeyInfo as APIModelAPIKeyInfo -from .api_key_create_response import APIKeyCreateResponse as APIKeyCreateResponse -from .api_key_delete_response import APIKeyDeleteResponse as APIKeyDeleteResponse -from .api_key_update_response import APIKeyUpdateResponse as APIKeyUpdateResponse -from .api_key_update_regenerate_response import APIKeyUpdateRegenerateResponse as APIKeyUpdateRegenerateResponse diff --git a/src/gradientai/types/api_keys/api_key_list_params.py b/src/gradientai/types/api_keys/api_key_list_params.py deleted file mode 100644 index 11da9398..00000000 --- a/src/gradientai/types/api_keys/api_key_list_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["APIKeyListParams"] - - -class APIKeyListParams(TypedDict, total=False): - page: int - """page number.""" - - per_page: int - """items per page.""" diff --git a/src/gradientai/types/api_keys/api_key_list_response.py b/src/gradientai/types/api_keys/api_key_list_response.py deleted file mode 100644 index 535e2f96..00000000 --- a/src/gradientai/types/api_keys/api_key_list_response.py +++ /dev/null @@ -1,18 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from ..._models import BaseModel -from ..agents.api_meta import APIMeta -from ..agents.api_links import APILinks -from .api_model_api_key_info import APIModelAPIKeyInfo - -__all__ = ["APIKeyListResponse"] - - -class APIKeyListResponse(BaseModel): - api_key_infos: Optional[List[APIModelAPIKeyInfo]] = None - - links: Optional[APILinks] = None - - meta: Optional[APIMeta] = None diff --git a/src/gradientai/types/api_model.py b/src/gradientai/types/api_model.py index d680a638..82120454 100644 --- a/src/gradientai/types/api_model.py +++ b/src/gradientai/types/api_model.py @@ -5,14 +5,30 @@ from typing_extensions import Literal from .._models import BaseModel -from .api_agreement import APIAgreement -from .api_model_version import APIModelVersion -__all__ = ["APIModel"] +__all__ = ["APIModel", "Agreement", "Version"] + + +class Agreement(BaseModel): + description: Optional[str] = None + + name: Optional[str] = None + + url: Optional[str] = None + + uuid: Optional[str] = None + + +class Version(BaseModel): + major: Optional[int] = None + + minor: Optional[int] = None + + patch: Optional[int] = None class APIModel(BaseModel): - agreement: Optional[APIAgreement] = None + agreement: Optional[Agreement] = None created_at: Optional[datetime] = None @@ -54,4 +70,4 @@ class APIModel(BaseModel): uuid: Optional[str] = None - version: Optional[APIModelVersion] = None + version: Optional[Version] = None diff --git a/src/gradientai/types/api_keys/api_model_api_key_info.py b/src/gradientai/types/api_model_api_key_info.py similarity index 93% rename from src/gradientai/types/api_keys/api_model_api_key_info.py rename to src/gradientai/types/api_model_api_key_info.py index bf354a47..c05c9cef 100644 --- a/src/gradientai/types/api_keys/api_model_api_key_info.py +++ b/src/gradientai/types/api_model_api_key_info.py @@ -3,7 +3,7 @@ from typing import Optional from datetime import datetime -from ..._models import BaseModel +from .._models import BaseModel __all__ = ["APIModelAPIKeyInfo"] diff --git a/src/gradientai/types/api_model_version.py b/src/gradientai/types/api_model_version.py deleted file mode 100644 index 2e118632..00000000 --- a/src/gradientai/types/api_model_version.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from .._models import BaseModel - -__all__ = ["APIModelVersion"] - - -class APIModelVersion(BaseModel): - major: Optional[int] = None - - minor: Optional[int] = None - - patch: Optional[int] = None diff --git a/src/gradientai/types/model.py b/src/gradientai/types/model.py deleted file mode 100644 index 2631ee8d..00000000 --- a/src/gradientai/types/model.py +++ /dev/null @@ -1,21 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from .._models import BaseModel - -__all__ = ["Model"] - - -class Model(BaseModel): - id: str - """The model identifier, which can be referenced in the API endpoints.""" - - created: int - """The Unix timestamp (in seconds) when the model was created.""" - - object: Literal["model"] - """The object type, which is always "model".""" - - owned_by: str - """The organization that owns the model.""" diff --git a/src/gradientai/types/model_list_response.py b/src/gradientai/types/model_list_response.py index 1d0e5eee..29d6a34e 100644 --- a/src/gradientai/types/model_list_response.py +++ b/src/gradientai/types/model_list_response.py @@ -4,16 +4,32 @@ from datetime import datetime from .._models import BaseModel -from .api_agreement import APIAgreement from .agents.api_meta import APIMeta from .agents.api_links import APILinks -from .api_model_version import APIModelVersion -__all__ = ["ModelListResponse", "Model"] +__all__ = ["ModelListResponse", "Model", "ModelAgreement", "ModelVersion"] + + +class ModelAgreement(BaseModel): + description: Optional[str] = None + + name: Optional[str] = None + + url: Optional[str] = None + + uuid: Optional[str] = None + + +class ModelVersion(BaseModel): + major: Optional[int] = None + + minor: Optional[int] = None + + patch: Optional[int] = None class Model(BaseModel): - agreement: Optional[APIAgreement] = None + agreement: Optional[ModelAgreement] = None created_at: Optional[datetime] = None @@ -31,7 +47,7 @@ class Model(BaseModel): uuid: Optional[str] = None - version: Optional[APIModelVersion] = None + version: Optional[ModelVersion] = None class ModelListResponse(BaseModel): diff --git a/tests/api_resources/api_keys/__init__.py b/tests/api_resources/api_keys/__init__.py deleted file mode 100644 index fd8019a9..00000000 --- a/tests/api_resources/api_keys/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/api_keys/test_api_keys_.py b/tests/api_resources/api_keys/test_api_keys_.py deleted file mode 100644 index 01e8dcfa..00000000 --- a/tests/api_resources/api_keys/test_api_keys_.py +++ /dev/null @@ -1,446 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from gradientai import GradientAI, AsyncGradientAI -from tests.utils import assert_matches_type -from gradientai.types.api_keys import ( - APIKeyListResponse, - APIKeyCreateResponse, - APIKeyDeleteResponse, - APIKeyUpdateResponse, - APIKeyUpdateRegenerateResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestAPIKeys: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create(self, client: GradientAI) -> None: - api_key = client.api_keys.api_keys.create() - assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params(self, client: GradientAI) -> None: - api_key = client.api_keys.api_keys.create( - name="name", - ) - assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create(self, client: GradientAI) -> None: - response = client.api_keys.api_keys.with_raw_response.create() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = response.parse() - assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create(self, client: GradientAI) -> None: - with client.api_keys.api_keys.with_streaming_response.create() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = response.parse() - assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_update(self, client: GradientAI) -> None: - api_key = client.api_keys.api_keys.update( - path_api_key_uuid="api_key_uuid", - ) - assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_update_with_all_params(self, client: GradientAI) -> None: - api_key = client.api_keys.api_keys.update( - path_api_key_uuid="api_key_uuid", - body_api_key_uuid="api_key_uuid", - name="name", - ) - assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_update(self, client: GradientAI) -> None: - response = client.api_keys.api_keys.with_raw_response.update( - path_api_key_uuid="api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = response.parse() - assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_update(self, client: GradientAI) -> None: - with client.api_keys.api_keys.with_streaming_response.update( - path_api_key_uuid="api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = response.parse() - assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_update(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - client.api_keys.api_keys.with_raw_response.update( - path_api_key_uuid="", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: GradientAI) -> None: - api_key = client.api_keys.api_keys.list() - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: - api_key = client.api_keys.api_keys.list( - page=0, - per_page=0, - ) - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: - response = client.api_keys.api_keys.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = response.parse() - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: - with client.api_keys.api_keys.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = response.parse() - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_delete(self, client: GradientAI) -> None: - api_key = client.api_keys.api_keys.delete( - "api_key_uuid", - ) - assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete(self, client: GradientAI) -> None: - response = client.api_keys.api_keys.with_raw_response.delete( - "api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = response.parse() - assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete(self, client: GradientAI) -> None: - with client.api_keys.api_keys.with_streaming_response.delete( - "api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = response.parse() - assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_delete(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.api_keys.api_keys.with_raw_response.delete( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_update_regenerate(self, client: GradientAI) -> None: - api_key = client.api_keys.api_keys.update_regenerate( - "api_key_uuid", - ) - assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_update_regenerate(self, client: GradientAI) -> None: - response = client.api_keys.api_keys.with_raw_response.update_regenerate( - "api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = response.parse() - assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_update_regenerate(self, client: GradientAI) -> None: - with client.api_keys.api_keys.with_streaming_response.update_regenerate( - "api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = response.parse() - assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_update_regenerate(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.api_keys.api_keys.with_raw_response.update_regenerate( - "", - ) - - -class TestAsyncAPIKeys: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.api_keys.api_keys.create() - assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.api_keys.api_keys.create( - name="name", - ) - assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: - response = await async_client.api_keys.api_keys.with_raw_response.create() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = await response.parse() - assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: - async with async_client.api_keys.api_keys.with_streaming_response.create() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = await response.parse() - assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_update(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.api_keys.api_keys.update( - path_api_key_uuid="api_key_uuid", - ) - assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.api_keys.api_keys.update( - path_api_key_uuid="api_key_uuid", - body_api_key_uuid="api_key_uuid", - name="name", - ) - assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: - response = await async_client.api_keys.api_keys.with_raw_response.update( - path_api_key_uuid="api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = await response.parse() - assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: - async with async_client.api_keys.api_keys.with_streaming_response.update( - path_api_key_uuid="api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = await response.parse() - assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - await async_client.api_keys.api_keys.with_raw_response.update( - path_api_key_uuid="", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.api_keys.api_keys.list() - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.api_keys.api_keys.list( - page=0, - per_page=0, - ) - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: - response = await async_client.api_keys.api_keys.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = await response.parse() - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: - async with async_client.api_keys.api_keys.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = await response.parse() - assert_matches_type(APIKeyListResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_delete(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.api_keys.api_keys.delete( - "api_key_uuid", - ) - assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: - response = await async_client.api_keys.api_keys.with_raw_response.delete( - "api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = await response.parse() - assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: - async with async_client.api_keys.api_keys.with_streaming_response.delete( - "api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = await response.parse() - assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.api_keys.api_keys.with_raw_response.delete( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_update_regenerate(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.api_keys.api_keys.update_regenerate( - "api_key_uuid", - ) - assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_update_regenerate(self, async_client: AsyncGradientAI) -> None: - response = await async_client.api_keys.api_keys.with_raw_response.update_regenerate( - "api_key_uuid", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - api_key = await response.parse() - assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_update_regenerate(self, async_client: AsyncGradientAI) -> None: - async with async_client.api_keys.api_keys.with_streaming_response.update_regenerate( - "api_key_uuid", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - api_key = await response.parse() - assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_update_regenerate(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.api_keys.api_keys.with_raw_response.update_regenerate( - "", - ) diff --git a/tests/api_resources/test_api_keys.py b/tests/api_resources/test_api_keys.py index fa1895c9..a06e0b3a 100644 --- a/tests/api_resources/test_api_keys.py +++ b/tests/api_resources/test_api_keys.py @@ -9,7 +9,13 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types import APIKeyListResponse +from gradientai.types import ( + APIKeyListResponse, + APIKeyCreateResponse, + APIKeyDeleteResponse, + APIKeyUpdateResponse, + APIKeyUpdateRegenerateResponse, +) base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -17,6 +23,94 @@ class TestAPIKeys: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: GradientAI) -> None: + api_key = client.api_keys.create() + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: GradientAI) -> None: + api_key = client.api_keys.create( + name="name", + ) + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: GradientAI) -> None: + response = client.api_keys.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = response.parse() + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: GradientAI) -> None: + with client.api_keys.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = response.parse() + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: GradientAI) -> None: + api_key = client.api_keys.update( + path_api_key_uuid="api_key_uuid", + ) + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params(self, client: GradientAI) -> None: + api_key = client.api_keys.update( + path_api_key_uuid="api_key_uuid", + body_api_key_uuid="api_key_uuid", + name="name", + ) + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: GradientAI) -> None: + response = client.api_keys.with_raw_response.update( + path_api_key_uuid="api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = response.parse() + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: GradientAI) -> None: + with client.api_keys.with_streaming_response.update( + path_api_key_uuid="api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = response.parse() + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): + client.api_keys.with_raw_response.update( + path_api_key_uuid="", + ) + @pytest.mark.skip() @parametrize def test_method_list(self, client: GradientAI) -> None: @@ -29,8 +123,6 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: api_key = client.api_keys.list( page=0, per_page=0, - public_only=True, - usecases=["MODEL_USECASE_UNKNOWN"], ) assert_matches_type(APIKeyListResponse, api_key, path=["response"]) @@ -56,10 +148,182 @@ def test_streaming_response_list(self, client: GradientAI) -> None: assert cast(Any, response.is_closed) is True + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: GradientAI) -> None: + api_key = client.api_keys.delete( + "api_key_uuid", + ) + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: GradientAI) -> None: + response = client.api_keys.with_raw_response.delete( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = response.parse() + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: GradientAI) -> None: + with client.api_keys.with_streaming_response.delete( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = response.parse() + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + client.api_keys.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update_regenerate(self, client: GradientAI) -> None: + api_key = client.api_keys.update_regenerate( + "api_key_uuid", + ) + assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update_regenerate(self, client: GradientAI) -> None: + response = client.api_keys.with_raw_response.update_regenerate( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = response.parse() + assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update_regenerate(self, client: GradientAI) -> None: + with client.api_keys.with_streaming_response.update_regenerate( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = response.parse() + assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update_regenerate(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + client.api_keys.with_raw_response.update_regenerate( + "", + ) + class TestAsyncAPIKeys: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncGradientAI) -> None: + api_key = await async_client.api_keys.create() + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + api_key = await async_client.api_keys.create( + name="name", + ) + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + response = await async_client.api_keys.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = await response.parse() + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async with async_client.api_keys.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = await response.parse() + assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncGradientAI) -> None: + api_key = await async_client.api_keys.update( + path_api_key_uuid="api_key_uuid", + ) + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: + api_key = await async_client.api_keys.update( + path_api_key_uuid="api_key_uuid", + body_api_key_uuid="api_key_uuid", + name="name", + ) + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + response = await async_client.api_keys.with_raw_response.update( + path_api_key_uuid="api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = await response.parse() + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async with async_client.api_keys.with_streaming_response.update( + path_api_key_uuid="api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = await response.parse() + assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): + await async_client.api_keys.with_raw_response.update( + path_api_key_uuid="", + ) + @pytest.mark.skip() @parametrize async def test_method_list(self, async_client: AsyncGradientAI) -> None: @@ -72,8 +336,6 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) api_key = await async_client.api_keys.list( page=0, per_page=0, - public_only=True, - usecases=["MODEL_USECASE_UNKNOWN"], ) assert_matches_type(APIKeyListResponse, api_key, path=["response"]) @@ -98,3 +360,87 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N assert_matches_type(APIKeyListResponse, api_key, path=["response"]) assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + api_key = await async_client.api_keys.delete( + "api_key_uuid", + ) + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + response = await async_client.api_keys.with_raw_response.delete( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = await response.parse() + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async with async_client.api_keys.with_streaming_response.delete( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = await response.parse() + assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + await async_client.api_keys.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update_regenerate(self, async_client: AsyncGradientAI) -> None: + api_key = await async_client.api_keys.update_regenerate( + "api_key_uuid", + ) + assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update_regenerate(self, async_client: AsyncGradientAI) -> None: + response = await async_client.api_keys.with_raw_response.update_regenerate( + "api_key_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + api_key = await response.parse() + assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update_regenerate(self, async_client: AsyncGradientAI) -> None: + async with async_client.api_keys.with_streaming_response.update_regenerate( + "api_key_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + api_key = await response.parse() + assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update_regenerate(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): + await async_client.api_keys.with_raw_response.update_regenerate( + "", + ) diff --git a/tests/api_resources/test_models.py b/tests/api_resources/test_models.py index 04133ed4..946b2eb9 100644 --- a/tests/api_resources/test_models.py +++ b/tests/api_resources/test_models.py @@ -9,7 +9,7 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types import Model, ModelListResponse +from gradientai.types import ModelListResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -17,48 +17,6 @@ class TestModels: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: GradientAI) -> None: - model = client.models.retrieve( - "llama3-8b-instruct", - ) - assert_matches_type(Model, model, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: GradientAI) -> None: - response = client.models.with_raw_response.retrieve( - "llama3-8b-instruct", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - model = response.parse() - assert_matches_type(Model, model, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: GradientAI) -> None: - with client.models.with_streaming_response.retrieve( - "llama3-8b-instruct", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - model = response.parse() - assert_matches_type(Model, model, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `model` but received ''"): - client.models.with_raw_response.retrieve( - "", - ) - @pytest.mark.skip() @parametrize def test_method_list(self, client: GradientAI) -> None: @@ -102,48 +60,6 @@ def test_streaming_response_list(self, client: GradientAI) -> None: class TestAsyncModels: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: - model = await async_client.models.retrieve( - "llama3-8b-instruct", - ) - assert_matches_type(Model, model, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: - response = await async_client.models.with_raw_response.retrieve( - "llama3-8b-instruct", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - model = await response.parse() - assert_matches_type(Model, model, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: - async with async_client.models.with_streaming_response.retrieve( - "llama3-8b-instruct", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - model = await response.parse() - assert_matches_type(Model, model, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `model` but received ''"): - await async_client.models.with_raw_response.retrieve( - "", - ) - @pytest.mark.skip() @parametrize async def test_method_list(self, async_client: AsyncGradientAI) -> None: From 5d452d7245af6c80f47f8395f1c03493dfb53a52 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 19 Jun 2025 02:56:29 +0000 Subject: [PATCH 22/41] docs(client): fix httpx.Timeout documentation reference --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 465a7a0c..36edcfbd 100644 --- a/README.md +++ b/README.md @@ -166,7 +166,7 @@ client.with_options(max_retries=5).agents.versions.list( ### Timeouts By default requests time out after 1 minute. You can configure this with a `timeout` option, -which accepts a float or an [`httpx.Timeout`](https://www.python-httpx.org/advanced/#fine-tuning-the-configuration) object: +which accepts a float or an [`httpx.Timeout`](https://www.python-httpx.org/advanced/timeouts/#fine-tuning-the-configuration) object: ```python from gradientai import GradientAI From 1fa7ebb0080db9087b82d29e7197e44dfbb1ebed Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 19 Jun 2025 10:50:33 +0000 Subject: [PATCH 23/41] feat(api): update via SDK Studio --- .stats.yml | 4 +- api.md | 39 ++- src/gradientai/_client.py | 76 +++--- src/gradientai/resources/__init__.py | 26 +- .../resources/inference/__init__.py | 47 ++++ .../resources/{ => inference}/api_keys.py | 24 +- .../resources/inference/inference.py | 134 +++++++++++ src/gradientai/resources/inference/models.py | 226 ++++++++++++++++++ src/gradientai/types/__init__.py | 10 +- src/gradientai/types/inference/__init__.py | 15 ++ .../{ => inference}/api_key_create_params.py | 0 .../api_key_create_response.py | 2 +- .../api_key_delete_response.py | 2 +- .../{ => inference}/api_key_list_params.py | 0 .../{ => inference}/api_key_list_response.py | 6 +- .../{ => inference}/api_key_update_params.py | 2 +- .../api_key_update_regenerate_response.py | 2 +- .../api_key_update_response.py | 2 +- .../{ => inference}/api_model_api_key_info.py | 2 +- .../types/inference/model_list_response.py | 28 +++ .../inference/model_retrieve_response.py | 21 ++ src/gradientai/types/model.py | 48 ++++ src/gradientai/types/model_list_response.py | 44 +--- tests/api_resources/inference/__init__.py | 1 + .../{ => inference}/test_api_keys.py | 86 +++---- tests/api_resources/inference/test_models.py | 162 +++++++++++++ 26 files changed, 829 insertions(+), 180 deletions(-) create mode 100644 src/gradientai/resources/inference/__init__.py rename src/gradientai/resources/{ => inference}/api_keys.py (96%) create mode 100644 src/gradientai/resources/inference/inference.py create mode 100644 src/gradientai/resources/inference/models.py create mode 100644 src/gradientai/types/inference/__init__.py rename src/gradientai/types/{ => inference}/api_key_create_params.py (100%) rename src/gradientai/types/{ => inference}/api_key_create_response.py (90%) rename src/gradientai/types/{ => inference}/api_key_delete_response.py (90%) rename src/gradientai/types/{ => inference}/api_key_list_params.py (100%) rename src/gradientai/types/{ => inference}/api_key_list_response.py (77%) rename src/gradientai/types/{ => inference}/api_key_update_params.py (90%) rename src/gradientai/types/{ => inference}/api_key_update_regenerate_response.py (90%) rename src/gradientai/types/{ => inference}/api_key_update_response.py (90%) rename src/gradientai/types/{ => inference}/api_model_api_key_info.py (93%) create mode 100644 src/gradientai/types/inference/model_list_response.py create mode 100644 src/gradientai/types/inference/model_retrieve_response.py create mode 100644 src/gradientai/types/model.py create mode 100644 tests/api_resources/inference/__init__.py rename tests/api_resources/{ => inference}/test_api_keys.py (82%) create mode 100644 tests/api_resources/inference/test_models.py diff --git a/.stats.yml b/.stats.yml index 01cc76ec..3f5210da 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 56 +configured_endpoints: 58 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 -config_hash: 6abb2ff94db8b1b61321606275ba3e80 +config_hash: 01ce4f461115cf14fd2b26a7d08a3a6a diff --git a/api.md b/api.md index bac163c8..3f713c24 100644 --- a/api.md +++ b/api.md @@ -250,12 +250,28 @@ Methods: - client.knowledge_bases.data_sources.list(knowledge_base_uuid, \*\*params) -> DataSourceListResponse - client.knowledge_bases.data_sources.delete(data_source_uuid, \*, knowledge_base_uuid) -> DataSourceDeleteResponse -# APIKeys +# Chat + +## Completions Types: ```python -from gradientai.types import ( +from gradientai.types.chat import CompletionCreateResponse +``` + +Methods: + +- client.chat.completions.create(\*\*params) -> CompletionCreateResponse + +# Inference + +## APIKeys + +Types: + +```python +from gradientai.types.inference import ( APIModelAPIKeyInfo, APIKeyCreateResponse, APIKeyUpdateResponse, @@ -267,25 +283,24 @@ from gradientai.types import ( Methods: -- client.api_keys.create(\*\*params) -> APIKeyCreateResponse -- client.api_keys.update(path_api_key_uuid, \*\*params) -> APIKeyUpdateResponse -- client.api_keys.list(\*\*params) -> APIKeyListResponse -- client.api_keys.delete(api_key_uuid) -> APIKeyDeleteResponse -- client.api_keys.update_regenerate(api_key_uuid) -> APIKeyUpdateRegenerateResponse +- client.inference.api_keys.create(\*\*params) -> APIKeyCreateResponse +- client.inference.api_keys.update(path_api_key_uuid, \*\*params) -> APIKeyUpdateResponse +- client.inference.api_keys.list(\*\*params) -> APIKeyListResponse +- client.inference.api_keys.delete(api_key_uuid) -> APIKeyDeleteResponse +- client.inference.api_keys.update_regenerate(api_key_uuid) -> APIKeyUpdateRegenerateResponse -# Chat - -## Completions +## Models Types: ```python -from gradientai.types.chat import CompletionCreateResponse +from gradientai.types.inference import ModelRetrieveResponse, ModelListResponse ``` Methods: -- client.chat.completions.create(\*\*params) -> CompletionCreateResponse +- client.inference.models.retrieve(model) -> ModelRetrieveResponse +- client.inference.models.list() -> ModelListResponse # Models diff --git a/src/gradientai/_client.py b/src/gradientai/_client.py index bec52a23..0a5eb9a1 100644 --- a/src/gradientai/_client.py +++ b/src/gradientai/_client.py @@ -31,13 +31,13 @@ ) if TYPE_CHECKING: - from .resources import chat, agents, models, regions, api_keys, providers, indexing_jobs, knowledge_bases + from .resources import chat, agents, models, regions, inference, providers, indexing_jobs, knowledge_bases from .resources.models import ModelsResource, AsyncModelsResource from .resources.regions import RegionsResource, AsyncRegionsResource - from .resources.api_keys import APIKeysResource, AsyncAPIKeysResource from .resources.chat.chat import ChatResource, AsyncChatResource from .resources.agents.agents import AgentsResource, AsyncAgentsResource from .resources.indexing_jobs import IndexingJobsResource, AsyncIndexingJobsResource + from .resources.inference.inference import InferenceResource, AsyncInferenceResource from .resources.providers.providers import ProvidersResource, AsyncProvidersResource from .resources.knowledge_bases.knowledge_bases import KnowledgeBasesResource, AsyncKnowledgeBasesResource @@ -139,18 +139,18 @@ def knowledge_bases(self) -> KnowledgeBasesResource: return KnowledgeBasesResource(self) - @cached_property - def api_keys(self) -> APIKeysResource: - from .resources.api_keys import APIKeysResource - - return APIKeysResource(self) - @cached_property def chat(self) -> ChatResource: from .resources.chat import ChatResource return ChatResource(self) + @cached_property + def inference(self) -> InferenceResource: + from .resources.inference import InferenceResource + + return InferenceResource(self) + @cached_property def models(self) -> ModelsResource: from .resources.models import ModelsResource @@ -358,18 +358,18 @@ def knowledge_bases(self) -> AsyncKnowledgeBasesResource: return AsyncKnowledgeBasesResource(self) - @cached_property - def api_keys(self) -> AsyncAPIKeysResource: - from .resources.api_keys import AsyncAPIKeysResource - - return AsyncAPIKeysResource(self) - @cached_property def chat(self) -> AsyncChatResource: from .resources.chat import AsyncChatResource return AsyncChatResource(self) + @cached_property + def inference(self) -> AsyncInferenceResource: + from .resources.inference import AsyncInferenceResource + + return AsyncInferenceResource(self) + @cached_property def models(self) -> AsyncModelsResource: from .resources.models import AsyncModelsResource @@ -527,18 +527,18 @@ def knowledge_bases(self) -> knowledge_bases.KnowledgeBasesResourceWithRawRespon return KnowledgeBasesResourceWithRawResponse(self._client.knowledge_bases) - @cached_property - def api_keys(self) -> api_keys.APIKeysResourceWithRawResponse: - from .resources.api_keys import APIKeysResourceWithRawResponse - - return APIKeysResourceWithRawResponse(self._client.api_keys) - @cached_property def chat(self) -> chat.ChatResourceWithRawResponse: from .resources.chat import ChatResourceWithRawResponse return ChatResourceWithRawResponse(self._client.chat) + @cached_property + def inference(self) -> inference.InferenceResourceWithRawResponse: + from .resources.inference import InferenceResourceWithRawResponse + + return InferenceResourceWithRawResponse(self._client.inference) + @cached_property def models(self) -> models.ModelsResourceWithRawResponse: from .resources.models import ModelsResourceWithRawResponse @@ -582,18 +582,18 @@ def knowledge_bases(self) -> knowledge_bases.AsyncKnowledgeBasesResourceWithRawR return AsyncKnowledgeBasesResourceWithRawResponse(self._client.knowledge_bases) - @cached_property - def api_keys(self) -> api_keys.AsyncAPIKeysResourceWithRawResponse: - from .resources.api_keys import AsyncAPIKeysResourceWithRawResponse - - return AsyncAPIKeysResourceWithRawResponse(self._client.api_keys) - @cached_property def chat(self) -> chat.AsyncChatResourceWithRawResponse: from .resources.chat import AsyncChatResourceWithRawResponse return AsyncChatResourceWithRawResponse(self._client.chat) + @cached_property + def inference(self) -> inference.AsyncInferenceResourceWithRawResponse: + from .resources.inference import AsyncInferenceResourceWithRawResponse + + return AsyncInferenceResourceWithRawResponse(self._client.inference) + @cached_property def models(self) -> models.AsyncModelsResourceWithRawResponse: from .resources.models import AsyncModelsResourceWithRawResponse @@ -637,18 +637,18 @@ def knowledge_bases(self) -> knowledge_bases.KnowledgeBasesResourceWithStreaming return KnowledgeBasesResourceWithStreamingResponse(self._client.knowledge_bases) - @cached_property - def api_keys(self) -> api_keys.APIKeysResourceWithStreamingResponse: - from .resources.api_keys import APIKeysResourceWithStreamingResponse - - return APIKeysResourceWithStreamingResponse(self._client.api_keys) - @cached_property def chat(self) -> chat.ChatResourceWithStreamingResponse: from .resources.chat import ChatResourceWithStreamingResponse return ChatResourceWithStreamingResponse(self._client.chat) + @cached_property + def inference(self) -> inference.InferenceResourceWithStreamingResponse: + from .resources.inference import InferenceResourceWithStreamingResponse + + return InferenceResourceWithStreamingResponse(self._client.inference) + @cached_property def models(self) -> models.ModelsResourceWithStreamingResponse: from .resources.models import ModelsResourceWithStreamingResponse @@ -692,18 +692,18 @@ def knowledge_bases(self) -> knowledge_bases.AsyncKnowledgeBasesResourceWithStre return AsyncKnowledgeBasesResourceWithStreamingResponse(self._client.knowledge_bases) - @cached_property - def api_keys(self) -> api_keys.AsyncAPIKeysResourceWithStreamingResponse: - from .resources.api_keys import AsyncAPIKeysResourceWithStreamingResponse - - return AsyncAPIKeysResourceWithStreamingResponse(self._client.api_keys) - @cached_property def chat(self) -> chat.AsyncChatResourceWithStreamingResponse: from .resources.chat import AsyncChatResourceWithStreamingResponse return AsyncChatResourceWithStreamingResponse(self._client.chat) + @cached_property + def inference(self) -> inference.AsyncInferenceResourceWithStreamingResponse: + from .resources.inference import AsyncInferenceResourceWithStreamingResponse + + return AsyncInferenceResourceWithStreamingResponse(self._client.inference) + @cached_property def models(self) -> models.AsyncModelsResourceWithStreamingResponse: from .resources.models import AsyncModelsResourceWithStreamingResponse diff --git a/src/gradientai/resources/__init__.py b/src/gradientai/resources/__init__.py index de26662c..1763a13e 100644 --- a/src/gradientai/resources/__init__.py +++ b/src/gradientai/resources/__init__.py @@ -32,13 +32,13 @@ RegionsResourceWithStreamingResponse, AsyncRegionsResourceWithStreamingResponse, ) -from .api_keys import ( - APIKeysResource, - AsyncAPIKeysResource, - APIKeysResourceWithRawResponse, - AsyncAPIKeysResourceWithRawResponse, - APIKeysResourceWithStreamingResponse, - AsyncAPIKeysResourceWithStreamingResponse, +from .inference import ( + InferenceResource, + AsyncInferenceResource, + InferenceResourceWithRawResponse, + AsyncInferenceResourceWithRawResponse, + InferenceResourceWithStreamingResponse, + AsyncInferenceResourceWithStreamingResponse, ) from .providers import ( ProvidersResource, @@ -96,18 +96,18 @@ "AsyncKnowledgeBasesResourceWithRawResponse", "KnowledgeBasesResourceWithStreamingResponse", "AsyncKnowledgeBasesResourceWithStreamingResponse", - "APIKeysResource", - "AsyncAPIKeysResource", - "APIKeysResourceWithRawResponse", - "AsyncAPIKeysResourceWithRawResponse", - "APIKeysResourceWithStreamingResponse", - "AsyncAPIKeysResourceWithStreamingResponse", "ChatResource", "AsyncChatResource", "ChatResourceWithRawResponse", "AsyncChatResourceWithRawResponse", "ChatResourceWithStreamingResponse", "AsyncChatResourceWithStreamingResponse", + "InferenceResource", + "AsyncInferenceResource", + "InferenceResourceWithRawResponse", + "AsyncInferenceResourceWithRawResponse", + "InferenceResourceWithStreamingResponse", + "AsyncInferenceResourceWithStreamingResponse", "ModelsResource", "AsyncModelsResource", "ModelsResourceWithRawResponse", diff --git a/src/gradientai/resources/inference/__init__.py b/src/gradientai/resources/inference/__init__.py new file mode 100644 index 00000000..0e5631ce --- /dev/null +++ b/src/gradientai/resources/inference/__init__.py @@ -0,0 +1,47 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .models import ( + ModelsResource, + AsyncModelsResource, + ModelsResourceWithRawResponse, + AsyncModelsResourceWithRawResponse, + ModelsResourceWithStreamingResponse, + AsyncModelsResourceWithStreamingResponse, +) +from .api_keys import ( + APIKeysResource, + AsyncAPIKeysResource, + APIKeysResourceWithRawResponse, + AsyncAPIKeysResourceWithRawResponse, + APIKeysResourceWithStreamingResponse, + AsyncAPIKeysResourceWithStreamingResponse, +) +from .inference import ( + InferenceResource, + AsyncInferenceResource, + InferenceResourceWithRawResponse, + AsyncInferenceResourceWithRawResponse, + InferenceResourceWithStreamingResponse, + AsyncInferenceResourceWithStreamingResponse, +) + +__all__ = [ + "APIKeysResource", + "AsyncAPIKeysResource", + "APIKeysResourceWithRawResponse", + "AsyncAPIKeysResourceWithRawResponse", + "APIKeysResourceWithStreamingResponse", + "AsyncAPIKeysResourceWithStreamingResponse", + "ModelsResource", + "AsyncModelsResource", + "ModelsResourceWithRawResponse", + "AsyncModelsResourceWithRawResponse", + "ModelsResourceWithStreamingResponse", + "AsyncModelsResourceWithStreamingResponse", + "InferenceResource", + "AsyncInferenceResource", + "InferenceResourceWithRawResponse", + "AsyncInferenceResourceWithRawResponse", + "InferenceResourceWithStreamingResponse", + "AsyncInferenceResourceWithStreamingResponse", +] diff --git a/src/gradientai/resources/api_keys.py b/src/gradientai/resources/inference/api_keys.py similarity index 96% rename from src/gradientai/resources/api_keys.py rename to src/gradientai/resources/inference/api_keys.py index be1e346b..c00212f8 100644 --- a/src/gradientai/resources/api_keys.py +++ b/src/gradientai/resources/inference/api_keys.py @@ -4,23 +4,23 @@ import httpx -from ..types import api_key_list_params, api_key_create_params, api_key_update_params -from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from .._utils import maybe_transform, async_maybe_transform -from .._compat import cached_property -from .._resource import SyncAPIResource, AsyncAPIResource -from .._response import ( +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( to_raw_response_wrapper, to_streamed_response_wrapper, async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) -from .._base_client import make_request_options -from ..types.api_key_list_response import APIKeyListResponse -from ..types.api_key_create_response import APIKeyCreateResponse -from ..types.api_key_delete_response import APIKeyDeleteResponse -from ..types.api_key_update_response import APIKeyUpdateResponse -from ..types.api_key_update_regenerate_response import APIKeyUpdateRegenerateResponse +from ..._base_client import make_request_options +from ...types.inference import api_key_list_params, api_key_create_params, api_key_update_params +from ...types.inference.api_key_list_response import APIKeyListResponse +from ...types.inference.api_key_create_response import APIKeyCreateResponse +from ...types.inference.api_key_delete_response import APIKeyDeleteResponse +from ...types.inference.api_key_update_response import APIKeyUpdateResponse +from ...types.inference.api_key_update_regenerate_response import APIKeyUpdateRegenerateResponse __all__ = ["APIKeysResource", "AsyncAPIKeysResource"] diff --git a/src/gradientai/resources/inference/inference.py b/src/gradientai/resources/inference/inference.py new file mode 100644 index 00000000..325353dc --- /dev/null +++ b/src/gradientai/resources/inference/inference.py @@ -0,0 +1,134 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .models import ( + ModelsResource, + AsyncModelsResource, + ModelsResourceWithRawResponse, + AsyncModelsResourceWithRawResponse, + ModelsResourceWithStreamingResponse, + AsyncModelsResourceWithStreamingResponse, +) +from .api_keys import ( + APIKeysResource, + AsyncAPIKeysResource, + APIKeysResourceWithRawResponse, + AsyncAPIKeysResourceWithRawResponse, + APIKeysResourceWithStreamingResponse, + AsyncAPIKeysResourceWithStreamingResponse, +) +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource + +__all__ = ["InferenceResource", "AsyncInferenceResource"] + + +class InferenceResource(SyncAPIResource): + @cached_property + def api_keys(self) -> APIKeysResource: + return APIKeysResource(self._client) + + @cached_property + def models(self) -> ModelsResource: + return ModelsResource(self._client) + + @cached_property + def with_raw_response(self) -> InferenceResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + """ + return InferenceResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> InferenceResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + """ + return InferenceResourceWithStreamingResponse(self) + + +class AsyncInferenceResource(AsyncAPIResource): + @cached_property + def api_keys(self) -> AsyncAPIKeysResource: + return AsyncAPIKeysResource(self._client) + + @cached_property + def models(self) -> AsyncModelsResource: + return AsyncModelsResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncInferenceResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + """ + return AsyncInferenceResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncInferenceResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + """ + return AsyncInferenceResourceWithStreamingResponse(self) + + +class InferenceResourceWithRawResponse: + def __init__(self, inference: InferenceResource) -> None: + self._inference = inference + + @cached_property + def api_keys(self) -> APIKeysResourceWithRawResponse: + return APIKeysResourceWithRawResponse(self._inference.api_keys) + + @cached_property + def models(self) -> ModelsResourceWithRawResponse: + return ModelsResourceWithRawResponse(self._inference.models) + + +class AsyncInferenceResourceWithRawResponse: + def __init__(self, inference: AsyncInferenceResource) -> None: + self._inference = inference + + @cached_property + def api_keys(self) -> AsyncAPIKeysResourceWithRawResponse: + return AsyncAPIKeysResourceWithRawResponse(self._inference.api_keys) + + @cached_property + def models(self) -> AsyncModelsResourceWithRawResponse: + return AsyncModelsResourceWithRawResponse(self._inference.models) + + +class InferenceResourceWithStreamingResponse: + def __init__(self, inference: InferenceResource) -> None: + self._inference = inference + + @cached_property + def api_keys(self) -> APIKeysResourceWithStreamingResponse: + return APIKeysResourceWithStreamingResponse(self._inference.api_keys) + + @cached_property + def models(self) -> ModelsResourceWithStreamingResponse: + return ModelsResourceWithStreamingResponse(self._inference.models) + + +class AsyncInferenceResourceWithStreamingResponse: + def __init__(self, inference: AsyncInferenceResource) -> None: + self._inference = inference + + @cached_property + def api_keys(self) -> AsyncAPIKeysResourceWithStreamingResponse: + return AsyncAPIKeysResourceWithStreamingResponse(self._inference.api_keys) + + @cached_property + def models(self) -> AsyncModelsResourceWithStreamingResponse: + return AsyncModelsResourceWithStreamingResponse(self._inference.models) diff --git a/src/gradientai/resources/inference/models.py b/src/gradientai/resources/inference/models.py new file mode 100644 index 00000000..c36f6cee --- /dev/null +++ b/src/gradientai/resources/inference/models.py @@ -0,0 +1,226 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.inference.model_list_response import ModelListResponse +from ...types.inference.model_retrieve_response import ModelRetrieveResponse + +__all__ = ["ModelsResource", "AsyncModelsResource"] + + +class ModelsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> ModelsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + """ + return ModelsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ModelsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + """ + return ModelsResourceWithStreamingResponse(self) + + def retrieve( + self, + model: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ModelRetrieveResponse: + """ + Retrieves a model instance, providing basic information about the model such as + the owner and permissioning. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not model: + raise ValueError(f"Expected a non-empty value for `model` but received {model!r}") + return self._get( + f"/models/{model}" + if self._client._base_url_overridden + else f"https://inference.do-ai.run/v1/models/{model}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ModelRetrieveResponse, + ) + + def list( + self, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ModelListResponse: + """ + Lists the currently available models, and provides basic information about each + one such as the owner and availability. + """ + return self._get( + "/models" if self._client._base_url_overridden else "https://inference.do-ai.run/v1/models", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ModelListResponse, + ) + + +class AsyncModelsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncModelsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + """ + return AsyncModelsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncModelsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + """ + return AsyncModelsResourceWithStreamingResponse(self) + + async def retrieve( + self, + model: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ModelRetrieveResponse: + """ + Retrieves a model instance, providing basic information about the model such as + the owner and permissioning. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not model: + raise ValueError(f"Expected a non-empty value for `model` but received {model!r}") + return await self._get( + f"/models/{model}" + if self._client._base_url_overridden + else f"https://inference.do-ai.run/v1/models/{model}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ModelRetrieveResponse, + ) + + async def list( + self, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ModelListResponse: + """ + Lists the currently available models, and provides basic information about each + one such as the owner and availability. + """ + return await self._get( + "/models" if self._client._base_url_overridden else "https://inference.do-ai.run/v1/models", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ModelListResponse, + ) + + +class ModelsResourceWithRawResponse: + def __init__(self, models: ModelsResource) -> None: + self._models = models + + self.retrieve = to_raw_response_wrapper( + models.retrieve, + ) + self.list = to_raw_response_wrapper( + models.list, + ) + + +class AsyncModelsResourceWithRawResponse: + def __init__(self, models: AsyncModelsResource) -> None: + self._models = models + + self.retrieve = async_to_raw_response_wrapper( + models.retrieve, + ) + self.list = async_to_raw_response_wrapper( + models.list, + ) + + +class ModelsResourceWithStreamingResponse: + def __init__(self, models: ModelsResource) -> None: + self._models = models + + self.retrieve = to_streamed_response_wrapper( + models.retrieve, + ) + self.list = to_streamed_response_wrapper( + models.list, + ) + + +class AsyncModelsResourceWithStreamingResponse: + def __init__(self, models: AsyncModelsResource) -> None: + self._models = models + + self.retrieve = async_to_streamed_response_wrapper( + models.retrieve, + ) + self.list = async_to_streamed_response_wrapper( + models.list, + ) diff --git a/src/gradientai/types/__init__.py b/src/gradientai/types/__init__.py index ddbbe52d..091fe110 100644 --- a/src/gradientai/types/__init__.py +++ b/src/gradientai/types/__init__.py @@ -2,6 +2,7 @@ from __future__ import annotations +from .model import Model as Model from .api_agent import APIAgent as APIAgent from .api_model import APIModel as APIModel from .api_indexing_job import APIIndexingJob as APIIndexingJob @@ -12,22 +13,14 @@ from .agent_create_params import AgentCreateParams as AgentCreateParams from .agent_list_response import AgentListResponse as AgentListResponse from .agent_update_params import AgentUpdateParams as AgentUpdateParams -from .api_key_list_params import APIKeyListParams as APIKeyListParams from .model_list_response import ModelListResponse as ModelListResponse from .api_retrieval_method import APIRetrievalMethod as APIRetrievalMethod from .region_list_response import RegionListResponse as RegionListResponse from .agent_create_response import AgentCreateResponse as AgentCreateResponse from .agent_delete_response import AgentDeleteResponse as AgentDeleteResponse from .agent_update_response import AgentUpdateResponse as AgentUpdateResponse -from .api_key_create_params import APIKeyCreateParams as APIKeyCreateParams -from .api_key_list_response import APIKeyListResponse as APIKeyListResponse -from .api_key_update_params import APIKeyUpdateParams as APIKeyUpdateParams from .api_agent_api_key_info import APIAgentAPIKeyInfo as APIAgentAPIKeyInfo -from .api_model_api_key_info import APIModelAPIKeyInfo as APIModelAPIKeyInfo from .agent_retrieve_response import AgentRetrieveResponse as AgentRetrieveResponse -from .api_key_create_response import APIKeyCreateResponse as APIKeyCreateResponse -from .api_key_delete_response import APIKeyDeleteResponse as APIKeyDeleteResponse -from .api_key_update_response import APIKeyUpdateResponse as APIKeyUpdateResponse from .api_openai_api_key_info import APIOpenAIAPIKeyInfo as APIOpenAIAPIKeyInfo from .indexing_job_list_params import IndexingJobListParams as IndexingJobListParams from .api_deployment_visibility import APIDeploymentVisibility as APIDeploymentVisibility @@ -47,7 +40,6 @@ from .knowledge_base_update_response import KnowledgeBaseUpdateResponse as KnowledgeBaseUpdateResponse from .knowledge_base_retrieve_response import KnowledgeBaseRetrieveResponse as KnowledgeBaseRetrieveResponse from .indexing_job_update_cancel_params import IndexingJobUpdateCancelParams as IndexingJobUpdateCancelParams -from .api_key_update_regenerate_response import APIKeyUpdateRegenerateResponse as APIKeyUpdateRegenerateResponse from .indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse as IndexingJobUpdateCancelResponse from .indexing_job_retrieve_data_sources_response import ( IndexingJobRetrieveDataSourcesResponse as IndexingJobRetrieveDataSourcesResponse, diff --git a/src/gradientai/types/inference/__init__.py b/src/gradientai/types/inference/__init__.py new file mode 100644 index 00000000..d1ccb71b --- /dev/null +++ b/src/gradientai/types/inference/__init__.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .api_key_list_params import APIKeyListParams as APIKeyListParams +from .model_list_response import ModelListResponse as ModelListResponse +from .api_key_create_params import APIKeyCreateParams as APIKeyCreateParams +from .api_key_list_response import APIKeyListResponse as APIKeyListResponse +from .api_key_update_params import APIKeyUpdateParams as APIKeyUpdateParams +from .api_model_api_key_info import APIModelAPIKeyInfo as APIModelAPIKeyInfo +from .api_key_create_response import APIKeyCreateResponse as APIKeyCreateResponse +from .api_key_delete_response import APIKeyDeleteResponse as APIKeyDeleteResponse +from .api_key_update_response import APIKeyUpdateResponse as APIKeyUpdateResponse +from .model_retrieve_response import ModelRetrieveResponse as ModelRetrieveResponse +from .api_key_update_regenerate_response import APIKeyUpdateRegenerateResponse as APIKeyUpdateRegenerateResponse diff --git a/src/gradientai/types/api_key_create_params.py b/src/gradientai/types/inference/api_key_create_params.py similarity index 100% rename from src/gradientai/types/api_key_create_params.py rename to src/gradientai/types/inference/api_key_create_params.py diff --git a/src/gradientai/types/api_key_create_response.py b/src/gradientai/types/inference/api_key_create_response.py similarity index 90% rename from src/gradientai/types/api_key_create_response.py rename to src/gradientai/types/inference/api_key_create_response.py index 2d6024cf..654e9f1e 100644 --- a/src/gradientai/types/api_key_create_response.py +++ b/src/gradientai/types/inference/api_key_create_response.py @@ -2,7 +2,7 @@ from typing import Optional -from .._models import BaseModel +from ..._models import BaseModel from .api_model_api_key_info import APIModelAPIKeyInfo __all__ = ["APIKeyCreateResponse"] diff --git a/src/gradientai/types/api_key_delete_response.py b/src/gradientai/types/inference/api_key_delete_response.py similarity index 90% rename from src/gradientai/types/api_key_delete_response.py rename to src/gradientai/types/inference/api_key_delete_response.py index d65286c8..4d81d047 100644 --- a/src/gradientai/types/api_key_delete_response.py +++ b/src/gradientai/types/inference/api_key_delete_response.py @@ -2,7 +2,7 @@ from typing import Optional -from .._models import BaseModel +from ..._models import BaseModel from .api_model_api_key_info import APIModelAPIKeyInfo __all__ = ["APIKeyDeleteResponse"] diff --git a/src/gradientai/types/api_key_list_params.py b/src/gradientai/types/inference/api_key_list_params.py similarity index 100% rename from src/gradientai/types/api_key_list_params.py rename to src/gradientai/types/inference/api_key_list_params.py diff --git a/src/gradientai/types/api_key_list_response.py b/src/gradientai/types/inference/api_key_list_response.py similarity index 77% rename from src/gradientai/types/api_key_list_response.py rename to src/gradientai/types/inference/api_key_list_response.py index db45102b..535e2f96 100644 --- a/src/gradientai/types/api_key_list_response.py +++ b/src/gradientai/types/inference/api_key_list_response.py @@ -2,9 +2,9 @@ from typing import List, Optional -from .._models import BaseModel -from .agents.api_meta import APIMeta -from .agents.api_links import APILinks +from ..._models import BaseModel +from ..agents.api_meta import APIMeta +from ..agents.api_links import APILinks from .api_model_api_key_info import APIModelAPIKeyInfo __all__ = ["APIKeyListResponse"] diff --git a/src/gradientai/types/api_key_update_params.py b/src/gradientai/types/inference/api_key_update_params.py similarity index 90% rename from src/gradientai/types/api_key_update_params.py rename to src/gradientai/types/inference/api_key_update_params.py index 1678304f..23c1c0b9 100644 --- a/src/gradientai/types/api_key_update_params.py +++ b/src/gradientai/types/inference/api_key_update_params.py @@ -4,7 +4,7 @@ from typing_extensions import Annotated, TypedDict -from .._utils import PropertyInfo +from ..._utils import PropertyInfo __all__ = ["APIKeyUpdateParams"] diff --git a/src/gradientai/types/api_key_update_regenerate_response.py b/src/gradientai/types/inference/api_key_update_regenerate_response.py similarity index 90% rename from src/gradientai/types/api_key_update_regenerate_response.py rename to src/gradientai/types/inference/api_key_update_regenerate_response.py index eaf19b6e..44a316dc 100644 --- a/src/gradientai/types/api_key_update_regenerate_response.py +++ b/src/gradientai/types/inference/api_key_update_regenerate_response.py @@ -2,7 +2,7 @@ from typing import Optional -from .._models import BaseModel +from ..._models import BaseModel from .api_model_api_key_info import APIModelAPIKeyInfo __all__ = ["APIKeyUpdateRegenerateResponse"] diff --git a/src/gradientai/types/api_key_update_response.py b/src/gradientai/types/inference/api_key_update_response.py similarity index 90% rename from src/gradientai/types/api_key_update_response.py rename to src/gradientai/types/inference/api_key_update_response.py index a8d79898..3671addf 100644 --- a/src/gradientai/types/api_key_update_response.py +++ b/src/gradientai/types/inference/api_key_update_response.py @@ -2,7 +2,7 @@ from typing import Optional -from .._models import BaseModel +from ..._models import BaseModel from .api_model_api_key_info import APIModelAPIKeyInfo __all__ = ["APIKeyUpdateResponse"] diff --git a/src/gradientai/types/api_model_api_key_info.py b/src/gradientai/types/inference/api_model_api_key_info.py similarity index 93% rename from src/gradientai/types/api_model_api_key_info.py rename to src/gradientai/types/inference/api_model_api_key_info.py index c05c9cef..bf354a47 100644 --- a/src/gradientai/types/api_model_api_key_info.py +++ b/src/gradientai/types/inference/api_model_api_key_info.py @@ -3,7 +3,7 @@ from typing import Optional from datetime import datetime -from .._models import BaseModel +from ..._models import BaseModel __all__ = ["APIModelAPIKeyInfo"] diff --git a/src/gradientai/types/inference/model_list_response.py b/src/gradientai/types/inference/model_list_response.py new file mode 100644 index 00000000..64f1e5b4 --- /dev/null +++ b/src/gradientai/types/inference/model_list_response.py @@ -0,0 +1,28 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ModelListResponse", "Data"] + + +class Data(BaseModel): + id: str + """The model identifier, which can be referenced in the API endpoints.""" + + created: int + """The Unix timestamp (in seconds) when the model was created.""" + + object: Literal["model"] + """The object type, which is always "model".""" + + owned_by: str + """The organization that owns the model.""" + + +class ModelListResponse(BaseModel): + data: List[Data] + + object: Literal["list"] diff --git a/src/gradientai/types/inference/model_retrieve_response.py b/src/gradientai/types/inference/model_retrieve_response.py new file mode 100644 index 00000000..1b8fca25 --- /dev/null +++ b/src/gradientai/types/inference/model_retrieve_response.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ModelRetrieveResponse"] + + +class ModelRetrieveResponse(BaseModel): + id: str + """The model identifier, which can be referenced in the API endpoints.""" + + created: int + """The Unix timestamp (in seconds) when the model was created.""" + + object: Literal["model"] + """The object type, which is always "model".""" + + owned_by: str + """The organization that owns the model.""" diff --git a/src/gradientai/types/model.py b/src/gradientai/types/model.py new file mode 100644 index 00000000..cba51b07 --- /dev/null +++ b/src/gradientai/types/model.py @@ -0,0 +1,48 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from datetime import datetime + +from .._models import BaseModel + +__all__ = ["Model", "Agreement", "Version"] + + +class Agreement(BaseModel): + description: Optional[str] = None + + name: Optional[str] = None + + url: Optional[str] = None + + uuid: Optional[str] = None + + +class Version(BaseModel): + major: Optional[int] = None + + minor: Optional[int] = None + + patch: Optional[int] = None + + +class Model(BaseModel): + agreement: Optional[Agreement] = None + + created_at: Optional[datetime] = None + + is_foundational: Optional[bool] = None + + name: Optional[str] = None + + parent_uuid: Optional[str] = None + + updated_at: Optional[datetime] = None + + upload_complete: Optional[bool] = None + + url: Optional[str] = None + + uuid: Optional[str] = None + + version: Optional[Version] = None diff --git a/src/gradientai/types/model_list_response.py b/src/gradientai/types/model_list_response.py index 29d6a34e..93d9ae04 100644 --- a/src/gradientai/types/model_list_response.py +++ b/src/gradientai/types/model_list_response.py @@ -1,53 +1,13 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from typing import List, Optional -from datetime import datetime +from .model import Model from .._models import BaseModel from .agents.api_meta import APIMeta from .agents.api_links import APILinks -__all__ = ["ModelListResponse", "Model", "ModelAgreement", "ModelVersion"] - - -class ModelAgreement(BaseModel): - description: Optional[str] = None - - name: Optional[str] = None - - url: Optional[str] = None - - uuid: Optional[str] = None - - -class ModelVersion(BaseModel): - major: Optional[int] = None - - minor: Optional[int] = None - - patch: Optional[int] = None - - -class Model(BaseModel): - agreement: Optional[ModelAgreement] = None - - created_at: Optional[datetime] = None - - is_foundational: Optional[bool] = None - - name: Optional[str] = None - - parent_uuid: Optional[str] = None - - updated_at: Optional[datetime] = None - - upload_complete: Optional[bool] = None - - url: Optional[str] = None - - uuid: Optional[str] = None - - version: Optional[ModelVersion] = None +__all__ = ["ModelListResponse"] class ModelListResponse(BaseModel): diff --git a/tests/api_resources/inference/__init__.py b/tests/api_resources/inference/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/inference/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/test_api_keys.py b/tests/api_resources/inference/test_api_keys.py similarity index 82% rename from tests/api_resources/test_api_keys.py rename to tests/api_resources/inference/test_api_keys.py index a06e0b3a..d84572c7 100644 --- a/tests/api_resources/test_api_keys.py +++ b/tests/api_resources/inference/test_api_keys.py @@ -9,7 +9,7 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types import ( +from gradientai.types.inference import ( APIKeyListResponse, APIKeyCreateResponse, APIKeyDeleteResponse, @@ -26,13 +26,13 @@ class TestAPIKeys: @pytest.mark.skip() @parametrize def test_method_create(self, client: GradientAI) -> None: - api_key = client.api_keys.create() + api_key = client.inference.api_keys.create() assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) @pytest.mark.skip() @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: - api_key = client.api_keys.create( + api_key = client.inference.api_keys.create( name="name", ) assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) @@ -40,7 +40,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_create(self, client: GradientAI) -> None: - response = client.api_keys.with_raw_response.create() + response = client.inference.api_keys.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -50,7 +50,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_create(self, client: GradientAI) -> None: - with client.api_keys.with_streaming_response.create() as response: + with client.inference.api_keys.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -62,7 +62,7 @@ def test_streaming_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_update(self, client: GradientAI) -> None: - api_key = client.api_keys.update( + api_key = client.inference.api_keys.update( path_api_key_uuid="api_key_uuid", ) assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) @@ -70,7 +70,7 @@ def test_method_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: - api_key = client.api_keys.update( + api_key = client.inference.api_keys.update( path_api_key_uuid="api_key_uuid", body_api_key_uuid="api_key_uuid", name="name", @@ -80,7 +80,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_update(self, client: GradientAI) -> None: - response = client.api_keys.with_raw_response.update( + response = client.inference.api_keys.with_raw_response.update( path_api_key_uuid="api_key_uuid", ) @@ -92,7 +92,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: - with client.api_keys.with_streaming_response.update( + with client.inference.api_keys.with_streaming_response.update( path_api_key_uuid="api_key_uuid", ) as response: assert not response.is_closed @@ -107,20 +107,20 @@ def test_streaming_response_update(self, client: GradientAI) -> None: @parametrize def test_path_params_update(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - client.api_keys.with_raw_response.update( + client.inference.api_keys.with_raw_response.update( path_api_key_uuid="", ) @pytest.mark.skip() @parametrize def test_method_list(self, client: GradientAI) -> None: - api_key = client.api_keys.list() + api_key = client.inference.api_keys.list() assert_matches_type(APIKeyListResponse, api_key, path=["response"]) @pytest.mark.skip() @parametrize def test_method_list_with_all_params(self, client: GradientAI) -> None: - api_key = client.api_keys.list( + api_key = client.inference.api_keys.list( page=0, per_page=0, ) @@ -129,7 +129,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_list(self, client: GradientAI) -> None: - response = client.api_keys.with_raw_response.list() + response = client.inference.api_keys.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -139,7 +139,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_list(self, client: GradientAI) -> None: - with client.api_keys.with_streaming_response.list() as response: + with client.inference.api_keys.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -151,7 +151,7 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_delete(self, client: GradientAI) -> None: - api_key = client.api_keys.delete( + api_key = client.inference.api_keys.delete( "api_key_uuid", ) assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) @@ -159,7 +159,7 @@ def test_method_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_delete(self, client: GradientAI) -> None: - response = client.api_keys.with_raw_response.delete( + response = client.inference.api_keys.with_raw_response.delete( "api_key_uuid", ) @@ -171,7 +171,7 @@ def test_raw_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_delete(self, client: GradientAI) -> None: - with client.api_keys.with_streaming_response.delete( + with client.inference.api_keys.with_streaming_response.delete( "api_key_uuid", ) as response: assert not response.is_closed @@ -186,14 +186,14 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: @parametrize def test_path_params_delete(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.api_keys.with_raw_response.delete( + client.inference.api_keys.with_raw_response.delete( "", ) @pytest.mark.skip() @parametrize def test_method_update_regenerate(self, client: GradientAI) -> None: - api_key = client.api_keys.update_regenerate( + api_key = client.inference.api_keys.update_regenerate( "api_key_uuid", ) assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) @@ -201,7 +201,7 @@ def test_method_update_regenerate(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_update_regenerate(self, client: GradientAI) -> None: - response = client.api_keys.with_raw_response.update_regenerate( + response = client.inference.api_keys.with_raw_response.update_regenerate( "api_key_uuid", ) @@ -213,7 +213,7 @@ def test_raw_response_update_regenerate(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_update_regenerate(self, client: GradientAI) -> None: - with client.api_keys.with_streaming_response.update_regenerate( + with client.inference.api_keys.with_streaming_response.update_regenerate( "api_key_uuid", ) as response: assert not response.is_closed @@ -228,7 +228,7 @@ def test_streaming_response_update_regenerate(self, client: GradientAI) -> None: @parametrize def test_path_params_update_regenerate(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.api_keys.with_raw_response.update_regenerate( + client.inference.api_keys.with_raw_response.update_regenerate( "", ) @@ -239,13 +239,13 @@ class TestAsyncAPIKeys: @pytest.mark.skip() @parametrize async def test_method_create(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.api_keys.create() + api_key = await async_client.inference.api_keys.create() assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.api_keys.create( + api_key = await async_client.inference.api_keys.create( name="name", ) assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) @@ -253,7 +253,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: - response = await async_client.api_keys.with_raw_response.create() + response = await async_client.inference.api_keys.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -263,7 +263,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: - async with async_client.api_keys.with_streaming_response.create() as response: + async with async_client.inference.api_keys.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -275,7 +275,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.api_keys.update( + api_key = await async_client.inference.api_keys.update( path_api_key_uuid="api_key_uuid", ) assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) @@ -283,7 +283,7 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.api_keys.update( + api_key = await async_client.inference.api_keys.update( path_api_key_uuid="api_key_uuid", body_api_key_uuid="api_key_uuid", name="name", @@ -293,7 +293,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: - response = await async_client.api_keys.with_raw_response.update( + response = await async_client.inference.api_keys.with_raw_response.update( path_api_key_uuid="api_key_uuid", ) @@ -305,7 +305,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: - async with async_client.api_keys.with_streaming_response.update( + async with async_client.inference.api_keys.with_streaming_response.update( path_api_key_uuid="api_key_uuid", ) as response: assert not response.is_closed @@ -320,20 +320,20 @@ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - await async_client.api_keys.with_raw_response.update( + await async_client.inference.api_keys.with_raw_response.update( path_api_key_uuid="", ) @pytest.mark.skip() @parametrize async def test_method_list(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.api_keys.list() + api_key = await async_client.inference.api_keys.list() assert_matches_type(APIKeyListResponse, api_key, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.api_keys.list( + api_key = await async_client.inference.api_keys.list( page=0, per_page=0, ) @@ -342,7 +342,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: - response = await async_client.api_keys.with_raw_response.list() + response = await async_client.inference.api_keys.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -352,7 +352,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: - async with async_client.api_keys.with_streaming_response.list() as response: + async with async_client.inference.api_keys.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -364,7 +364,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize async def test_method_delete(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.api_keys.delete( + api_key = await async_client.inference.api_keys.delete( "api_key_uuid", ) assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) @@ -372,7 +372,7 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: - response = await async_client.api_keys.with_raw_response.delete( + response = await async_client.inference.api_keys.with_raw_response.delete( "api_key_uuid", ) @@ -384,7 +384,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: - async with async_client.api_keys.with_streaming_response.delete( + async with async_client.inference.api_keys.with_streaming_response.delete( "api_key_uuid", ) as response: assert not response.is_closed @@ -399,14 +399,14 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.api_keys.with_raw_response.delete( + await async_client.inference.api_keys.with_raw_response.delete( "", ) @pytest.mark.skip() @parametrize async def test_method_update_regenerate(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.api_keys.update_regenerate( + api_key = await async_client.inference.api_keys.update_regenerate( "api_key_uuid", ) assert_matches_type(APIKeyUpdateRegenerateResponse, api_key, path=["response"]) @@ -414,7 +414,7 @@ async def test_method_update_regenerate(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize async def test_raw_response_update_regenerate(self, async_client: AsyncGradientAI) -> None: - response = await async_client.api_keys.with_raw_response.update_regenerate( + response = await async_client.inference.api_keys.with_raw_response.update_regenerate( "api_key_uuid", ) @@ -426,7 +426,7 @@ async def test_raw_response_update_regenerate(self, async_client: AsyncGradientA @pytest.mark.skip() @parametrize async def test_streaming_response_update_regenerate(self, async_client: AsyncGradientAI) -> None: - async with async_client.api_keys.with_streaming_response.update_regenerate( + async with async_client.inference.api_keys.with_streaming_response.update_regenerate( "api_key_uuid", ) as response: assert not response.is_closed @@ -441,6 +441,6 @@ async def test_streaming_response_update_regenerate(self, async_client: AsyncGra @parametrize async def test_path_params_update_regenerate(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.api_keys.with_raw_response.update_regenerate( + await async_client.inference.api_keys.with_raw_response.update_regenerate( "", ) diff --git a/tests/api_resources/inference/test_models.py b/tests/api_resources/inference/test_models.py new file mode 100644 index 00000000..7fb735fb --- /dev/null +++ b/tests/api_resources/inference/test_models.py @@ -0,0 +1,162 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.inference import ModelListResponse, ModelRetrieveResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestModels: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + model = client.inference.models.retrieve( + "llama3-8b-instruct", + ) + assert_matches_type(ModelRetrieveResponse, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.inference.models.with_raw_response.retrieve( + "llama3-8b-instruct", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + model = response.parse() + assert_matches_type(ModelRetrieveResponse, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.inference.models.with_streaming_response.retrieve( + "llama3-8b-instruct", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + model = response.parse() + assert_matches_type(ModelRetrieveResponse, model, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `model` but received ''"): + client.inference.models.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + model = client.inference.models.list() + assert_matches_type(ModelListResponse, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.inference.models.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + model = response.parse() + assert_matches_type(ModelListResponse, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.inference.models.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + model = response.parse() + assert_matches_type(ModelListResponse, model, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncModels: + parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + model = await async_client.inference.models.retrieve( + "llama3-8b-instruct", + ) + assert_matches_type(ModelRetrieveResponse, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.inference.models.with_raw_response.retrieve( + "llama3-8b-instruct", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + model = await response.parse() + assert_matches_type(ModelRetrieveResponse, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.inference.models.with_streaming_response.retrieve( + "llama3-8b-instruct", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + model = await response.parse() + assert_matches_type(ModelRetrieveResponse, model, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `model` but received ''"): + await async_client.inference.models.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + model = await async_client.inference.models.list() + assert_matches_type(ModelListResponse, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.inference.models.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + model = await response.parse() + assert_matches_type(ModelListResponse, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.inference.models.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + model = await response.parse() + assert_matches_type(ModelListResponse, model, path=["response"]) + + assert cast(Any, response.is_closed) is True From eac41f12912b8d32ffa23d225f4ca56fa5c72505 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 19 Jun 2025 10:51:29 +0000 Subject: [PATCH 24/41] feat(api): update via SDK Studio --- .stats.yml | 2 +- api.md | 4 ++-- src/gradientai/resources/inference/models.py | 10 +++++----- src/gradientai/types/inference/__init__.py | 2 +- .../{model_retrieve_response.py => model.py} | 4 ++-- .../types/inference/model_list_response.py | 19 +++---------------- tests/api_resources/inference/test_models.py | 14 +++++++------- 7 files changed, 21 insertions(+), 34 deletions(-) rename src/gradientai/types/inference/{model_retrieve_response.py => model.py} (86%) diff --git a/.stats.yml b/.stats.yml index 3f5210da..b82a0dc7 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 58 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 -config_hash: 01ce4f461115cf14fd2b26a7d08a3a6a +config_hash: f8b4e76be8bb430b0a00d2fd04d71615 diff --git a/api.md b/api.md index 3f713c24..adc47296 100644 --- a/api.md +++ b/api.md @@ -294,12 +294,12 @@ Methods: Types: ```python -from gradientai.types.inference import ModelRetrieveResponse, ModelListResponse +from gradientai.types.inference import Model, ModelListResponse ``` Methods: -- client.inference.models.retrieve(model) -> ModelRetrieveResponse +- client.inference.models.retrieve(model) -> Model - client.inference.models.list() -> ModelListResponse # Models diff --git a/src/gradientai/resources/inference/models.py b/src/gradientai/resources/inference/models.py index c36f6cee..da327695 100644 --- a/src/gradientai/resources/inference/models.py +++ b/src/gradientai/resources/inference/models.py @@ -14,8 +14,8 @@ async_to_streamed_response_wrapper, ) from ..._base_client import make_request_options +from ...types.inference.model import Model from ...types.inference.model_list_response import ModelListResponse -from ...types.inference.model_retrieve_response import ModelRetrieveResponse __all__ = ["ModelsResource", "AsyncModelsResource"] @@ -50,7 +50,7 @@ def retrieve( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ModelRetrieveResponse: + ) -> Model: """ Retrieves a model instance, providing basic information about the model such as the owner and permissioning. @@ -73,7 +73,7 @@ def retrieve( options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=ModelRetrieveResponse, + cast_to=Model, ) def list( @@ -129,7 +129,7 @@ async def retrieve( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ModelRetrieveResponse: + ) -> Model: """ Retrieves a model instance, providing basic information about the model such as the owner and permissioning. @@ -152,7 +152,7 @@ async def retrieve( options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=ModelRetrieveResponse, + cast_to=Model, ) async def list( diff --git a/src/gradientai/types/inference/__init__.py b/src/gradientai/types/inference/__init__.py index d1ccb71b..829340d7 100644 --- a/src/gradientai/types/inference/__init__.py +++ b/src/gradientai/types/inference/__init__.py @@ -2,6 +2,7 @@ from __future__ import annotations +from .model import Model as Model from .api_key_list_params import APIKeyListParams as APIKeyListParams from .model_list_response import ModelListResponse as ModelListResponse from .api_key_create_params import APIKeyCreateParams as APIKeyCreateParams @@ -11,5 +12,4 @@ from .api_key_create_response import APIKeyCreateResponse as APIKeyCreateResponse from .api_key_delete_response import APIKeyDeleteResponse as APIKeyDeleteResponse from .api_key_update_response import APIKeyUpdateResponse as APIKeyUpdateResponse -from .model_retrieve_response import ModelRetrieveResponse as ModelRetrieveResponse from .api_key_update_regenerate_response import APIKeyUpdateRegenerateResponse as APIKeyUpdateRegenerateResponse diff --git a/src/gradientai/types/inference/model_retrieve_response.py b/src/gradientai/types/inference/model.py similarity index 86% rename from src/gradientai/types/inference/model_retrieve_response.py rename to src/gradientai/types/inference/model.py index 1b8fca25..ed8843e8 100644 --- a/src/gradientai/types/inference/model_retrieve_response.py +++ b/src/gradientai/types/inference/model.py @@ -4,10 +4,10 @@ from ..._models import BaseModel -__all__ = ["ModelRetrieveResponse"] +__all__ = ["Model"] -class ModelRetrieveResponse(BaseModel): +class Model(BaseModel): id: str """The model identifier, which can be referenced in the API endpoints.""" diff --git a/src/gradientai/types/inference/model_list_response.py b/src/gradientai/types/inference/model_list_response.py index 64f1e5b4..01bf3b62 100644 --- a/src/gradientai/types/inference/model_list_response.py +++ b/src/gradientai/types/inference/model_list_response.py @@ -3,26 +3,13 @@ from typing import List from typing_extensions import Literal +from .model import Model from ..._models import BaseModel -__all__ = ["ModelListResponse", "Data"] - - -class Data(BaseModel): - id: str - """The model identifier, which can be referenced in the API endpoints.""" - - created: int - """The Unix timestamp (in seconds) when the model was created.""" - - object: Literal["model"] - """The object type, which is always "model".""" - - owned_by: str - """The organization that owns the model.""" +__all__ = ["ModelListResponse"] class ModelListResponse(BaseModel): - data: List[Data] + data: List[Model] object: Literal["list"] diff --git a/tests/api_resources/inference/test_models.py b/tests/api_resources/inference/test_models.py index 7fb735fb..936801cb 100644 --- a/tests/api_resources/inference/test_models.py +++ b/tests/api_resources/inference/test_models.py @@ -9,7 +9,7 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.inference import ModelListResponse, ModelRetrieveResponse +from gradientai.types.inference import Model, ModelListResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -23,7 +23,7 @@ def test_method_retrieve(self, client: GradientAI) -> None: model = client.inference.models.retrieve( "llama3-8b-instruct", ) - assert_matches_type(ModelRetrieveResponse, model, path=["response"]) + assert_matches_type(Model, model, path=["response"]) @pytest.mark.skip() @parametrize @@ -35,7 +35,7 @@ def test_raw_response_retrieve(self, client: GradientAI) -> None: assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" model = response.parse() - assert_matches_type(ModelRetrieveResponse, model, path=["response"]) + assert_matches_type(Model, model, path=["response"]) @pytest.mark.skip() @parametrize @@ -47,7 +47,7 @@ def test_streaming_response_retrieve(self, client: GradientAI) -> None: assert response.http_request.headers.get("X-Stainless-Lang") == "python" model = response.parse() - assert_matches_type(ModelRetrieveResponse, model, path=["response"]) + assert_matches_type(Model, model, path=["response"]) assert cast(Any, response.is_closed) is True @@ -97,7 +97,7 @@ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: model = await async_client.inference.models.retrieve( "llama3-8b-instruct", ) - assert_matches_type(ModelRetrieveResponse, model, path=["response"]) + assert_matches_type(Model, model, path=["response"]) @pytest.mark.skip() @parametrize @@ -109,7 +109,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> Non assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" model = await response.parse() - assert_matches_type(ModelRetrieveResponse, model, path=["response"]) + assert_matches_type(Model, model, path=["response"]) @pytest.mark.skip() @parametrize @@ -121,7 +121,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) assert response.http_request.headers.get("X-Stainless-Lang") == "python" model = await response.parse() - assert_matches_type(ModelRetrieveResponse, model, path=["response"]) + assert_matches_type(Model, model, path=["response"]) assert cast(Any, response.is_closed) is True From 97e17687a348b8ef218c23a06729b6edb1ac5ea9 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 19 Jun 2025 10:52:47 +0000 Subject: [PATCH 25/41] feat(api): update via SDK Studio --- .stats.yml | 2 +- api.md | 3 +- src/gradientai/types/__init__.py | 1 - src/gradientai/types/agent_list_response.py | 139 +++++++++++++++++- src/gradientai/types/api_agent.py | 139 +++++++++++++++++- src/gradientai/types/api_model.py | 27 +--- .../types/api_openai_api_key_info.py | 70 ++++++++- src/gradientai/types/model.py | 48 ------ src/gradientai/types/model_list_response.py | 4 +- 9 files changed, 344 insertions(+), 89 deletions(-) delete mode 100644 src/gradientai/types/model.py diff --git a/.stats.yml b/.stats.yml index b82a0dc7..8f85d58c 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 58 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 -config_hash: f8b4e76be8bb430b0a00d2fd04d71615 +config_hash: a2c4c23eb1e8655fbfb2b6930ce0fd46 diff --git a/api.md b/api.md index adc47296..2376a11f 100644 --- a/api.md +++ b/api.md @@ -8,7 +8,6 @@ from gradientai.types import ( APIAgentAPIKeyInfo, APIAnthropicAPIKeyInfo, APIDeploymentVisibility, - APIModel, APIOpenAIAPIKeyInfo, APIRetrievalMethod, AgentCreateResponse, @@ -307,7 +306,7 @@ Methods: Types: ```python -from gradientai.types import Model, ModelListResponse +from gradientai.types import APIModel, ModelListResponse ``` Methods: diff --git a/src/gradientai/types/__init__.py b/src/gradientai/types/__init__.py index 091fe110..5ee961c6 100644 --- a/src/gradientai/types/__init__.py +++ b/src/gradientai/types/__init__.py @@ -2,7 +2,6 @@ from __future__ import annotations -from .model import Model as Model from .api_agent import APIAgent as APIAgent from .api_model import APIModel as APIModel from .api_indexing_job import APIIndexingJob as APIIndexingJob diff --git a/src/gradientai/types/agent_list_response.py b/src/gradientai/types/agent_list_response.py index 4cedbb39..6af9cd51 100644 --- a/src/gradientai/types/agent_list_response.py +++ b/src/gradientai/types/agent_list_response.py @@ -5,7 +5,6 @@ from typing_extensions import Literal from .._models import BaseModel -from .api_model import APIModel from .agents.api_meta import APIMeta from .agents.api_links import APILinks from .api_knowledge_base import APIKnowledgeBase @@ -18,8 +17,14 @@ "AgentChatbot", "AgentChatbotIdentifier", "AgentDeployment", + "AgentModel", + "AgentModelAgreement", + "AgentModelVersion", "AgentTemplate", "AgentTemplateGuardrail", + "AgentTemplateModel", + "AgentTemplateModelAgreement", + "AgentTemplateModelVersion", ] @@ -69,12 +74,140 @@ class AgentDeployment(BaseModel): visibility: Optional[APIDeploymentVisibility] = None +class AgentModelAgreement(BaseModel): + description: Optional[str] = None + + name: Optional[str] = None + + url: Optional[str] = None + + uuid: Optional[str] = None + + +class AgentModelVersion(BaseModel): + major: Optional[int] = None + + minor: Optional[int] = None + + patch: Optional[int] = None + + +class AgentModel(BaseModel): + agreement: Optional[AgentModelAgreement] = None + + created_at: Optional[datetime] = None + + inference_name: Optional[str] = None + + inference_version: Optional[str] = None + + is_foundational: Optional[bool] = None + + metadata: Optional[object] = None + + name: Optional[str] = None + + parent_uuid: Optional[str] = None + + provider: Optional[Literal["MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", "MODEL_PROVIDER_OPENAI"]] = ( + None + ) + + updated_at: Optional[datetime] = None + + upload_complete: Optional[bool] = None + + url: Optional[str] = None + + usecases: Optional[ + List[ + Literal[ + "MODEL_USECASE_UNKNOWN", + "MODEL_USECASE_AGENT", + "MODEL_USECASE_FINETUNED", + "MODEL_USECASE_KNOWLEDGEBASE", + "MODEL_USECASE_GUARDRAIL", + "MODEL_USECASE_REASONING", + "MODEL_USECASE_SERVERLESS", + ] + ] + ] = None + + uuid: Optional[str] = None + + version: Optional[AgentModelVersion] = None + + class AgentTemplateGuardrail(BaseModel): priority: Optional[int] = None uuid: Optional[str] = None +class AgentTemplateModelAgreement(BaseModel): + description: Optional[str] = None + + name: Optional[str] = None + + url: Optional[str] = None + + uuid: Optional[str] = None + + +class AgentTemplateModelVersion(BaseModel): + major: Optional[int] = None + + minor: Optional[int] = None + + patch: Optional[int] = None + + +class AgentTemplateModel(BaseModel): + agreement: Optional[AgentTemplateModelAgreement] = None + + created_at: Optional[datetime] = None + + inference_name: Optional[str] = None + + inference_version: Optional[str] = None + + is_foundational: Optional[bool] = None + + metadata: Optional[object] = None + + name: Optional[str] = None + + parent_uuid: Optional[str] = None + + provider: Optional[Literal["MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", "MODEL_PROVIDER_OPENAI"]] = ( + None + ) + + updated_at: Optional[datetime] = None + + upload_complete: Optional[bool] = None + + url: Optional[str] = None + + usecases: Optional[ + List[ + Literal[ + "MODEL_USECASE_UNKNOWN", + "MODEL_USECASE_AGENT", + "MODEL_USECASE_FINETUNED", + "MODEL_USECASE_KNOWLEDGEBASE", + "MODEL_USECASE_GUARDRAIL", + "MODEL_USECASE_REASONING", + "MODEL_USECASE_SERVERLESS", + ] + ] + ] = None + + uuid: Optional[str] = None + + version: Optional[AgentTemplateModelVersion] = None + + class AgentTemplate(BaseModel): created_at: Optional[datetime] = None @@ -92,7 +225,7 @@ class AgentTemplate(BaseModel): max_tokens: Optional[int] = None - model: Optional[APIModel] = None + model: Optional[AgentTemplateModel] = None name: Optional[str] = None @@ -143,7 +276,7 @@ class Agent(BaseModel): response. """ - model: Optional[APIModel] = None + model: Optional[AgentModel] = None name: Optional[str] = None diff --git a/src/gradientai/types/api_agent.py b/src/gradientai/types/api_agent.py index 0a8df679..3eb01fc7 100644 --- a/src/gradientai/types/api_agent.py +++ b/src/gradientai/types/api_agent.py @@ -7,7 +7,6 @@ from typing_extensions import Literal from .._models import BaseModel -from .api_model import APIModel from .api_knowledge_base import APIKnowledgeBase from .api_retrieval_method import APIRetrievalMethod from .api_agent_api_key_info import APIAgentAPIKeyInfo @@ -23,8 +22,14 @@ "Deployment", "Function", "Guardrail", + "Model", + "ModelAgreement", + "ModelVersion", "Template", "TemplateGuardrail", + "TemplateModel", + "TemplateModelAgreement", + "TemplateModelVersion", ] @@ -139,12 +144,140 @@ class Guardrail(BaseModel): uuid: Optional[str] = None +class ModelAgreement(BaseModel): + description: Optional[str] = None + + name: Optional[str] = None + + url: Optional[str] = None + + uuid: Optional[str] = None + + +class ModelVersion(BaseModel): + major: Optional[int] = None + + minor: Optional[int] = None + + patch: Optional[int] = None + + +class Model(BaseModel): + agreement: Optional[ModelAgreement] = None + + created_at: Optional[datetime] = None + + inference_name: Optional[str] = None + + inference_version: Optional[str] = None + + is_foundational: Optional[bool] = None + + metadata: Optional[object] = None + + name: Optional[str] = None + + parent_uuid: Optional[str] = None + + provider: Optional[Literal["MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", "MODEL_PROVIDER_OPENAI"]] = ( + None + ) + + updated_at: Optional[datetime] = None + + upload_complete: Optional[bool] = None + + url: Optional[str] = None + + usecases: Optional[ + List[ + Literal[ + "MODEL_USECASE_UNKNOWN", + "MODEL_USECASE_AGENT", + "MODEL_USECASE_FINETUNED", + "MODEL_USECASE_KNOWLEDGEBASE", + "MODEL_USECASE_GUARDRAIL", + "MODEL_USECASE_REASONING", + "MODEL_USECASE_SERVERLESS", + ] + ] + ] = None + + uuid: Optional[str] = None + + version: Optional[ModelVersion] = None + + class TemplateGuardrail(BaseModel): priority: Optional[int] = None uuid: Optional[str] = None +class TemplateModelAgreement(BaseModel): + description: Optional[str] = None + + name: Optional[str] = None + + url: Optional[str] = None + + uuid: Optional[str] = None + + +class TemplateModelVersion(BaseModel): + major: Optional[int] = None + + minor: Optional[int] = None + + patch: Optional[int] = None + + +class TemplateModel(BaseModel): + agreement: Optional[TemplateModelAgreement] = None + + created_at: Optional[datetime] = None + + inference_name: Optional[str] = None + + inference_version: Optional[str] = None + + is_foundational: Optional[bool] = None + + metadata: Optional[object] = None + + name: Optional[str] = None + + parent_uuid: Optional[str] = None + + provider: Optional[Literal["MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", "MODEL_PROVIDER_OPENAI"]] = ( + None + ) + + updated_at: Optional[datetime] = None + + upload_complete: Optional[bool] = None + + url: Optional[str] = None + + usecases: Optional[ + List[ + Literal[ + "MODEL_USECASE_UNKNOWN", + "MODEL_USECASE_AGENT", + "MODEL_USECASE_FINETUNED", + "MODEL_USECASE_KNOWLEDGEBASE", + "MODEL_USECASE_GUARDRAIL", + "MODEL_USECASE_REASONING", + "MODEL_USECASE_SERVERLESS", + ] + ] + ] = None + + uuid: Optional[str] = None + + version: Optional[TemplateModelVersion] = None + + class Template(BaseModel): created_at: Optional[datetime] = None @@ -162,7 +295,7 @@ class Template(BaseModel): max_tokens: Optional[int] = None - model: Optional[APIModel] = None + model: Optional[TemplateModel] = None name: Optional[str] = None @@ -222,7 +355,7 @@ class APIAgent(BaseModel): max_tokens: Optional[int] = None - model: Optional[APIModel] = None + model: Optional[Model] = None name: Optional[str] = None diff --git a/src/gradientai/types/api_model.py b/src/gradientai/types/api_model.py index 82120454..ac6f9c55 100644 --- a/src/gradientai/types/api_model.py +++ b/src/gradientai/types/api_model.py @@ -1,8 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List, Optional +from typing import Optional from datetime import datetime -from typing_extensions import Literal from .._models import BaseModel @@ -32,42 +31,18 @@ class APIModel(BaseModel): created_at: Optional[datetime] = None - inference_name: Optional[str] = None - - inference_version: Optional[str] = None - is_foundational: Optional[bool] = None - metadata: Optional[object] = None - name: Optional[str] = None parent_uuid: Optional[str] = None - provider: Optional[Literal["MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", "MODEL_PROVIDER_OPENAI"]] = ( - None - ) - updated_at: Optional[datetime] = None upload_complete: Optional[bool] = None url: Optional[str] = None - usecases: Optional[ - List[ - Literal[ - "MODEL_USECASE_UNKNOWN", - "MODEL_USECASE_AGENT", - "MODEL_USECASE_FINETUNED", - "MODEL_USECASE_KNOWLEDGEBASE", - "MODEL_USECASE_GUARDRAIL", - "MODEL_USECASE_REASONING", - "MODEL_USECASE_SERVERLESS", - ] - ] - ] = None - uuid: Optional[str] = None version: Optional[Version] = None diff --git a/src/gradientai/types/api_openai_api_key_info.py b/src/gradientai/types/api_openai_api_key_info.py index 39328f80..0f57136d 100644 --- a/src/gradientai/types/api_openai_api_key_info.py +++ b/src/gradientai/types/api_openai_api_key_info.py @@ -2,11 +2,75 @@ from typing import List, Optional from datetime import datetime +from typing_extensions import Literal from .._models import BaseModel -from .api_model import APIModel -__all__ = ["APIOpenAIAPIKeyInfo"] +__all__ = ["APIOpenAIAPIKeyInfo", "Model", "ModelAgreement", "ModelVersion"] + + +class ModelAgreement(BaseModel): + description: Optional[str] = None + + name: Optional[str] = None + + url: Optional[str] = None + + uuid: Optional[str] = None + + +class ModelVersion(BaseModel): + major: Optional[int] = None + + minor: Optional[int] = None + + patch: Optional[int] = None + + +class Model(BaseModel): + agreement: Optional[ModelAgreement] = None + + created_at: Optional[datetime] = None + + inference_name: Optional[str] = None + + inference_version: Optional[str] = None + + is_foundational: Optional[bool] = None + + metadata: Optional[object] = None + + name: Optional[str] = None + + parent_uuid: Optional[str] = None + + provider: Optional[Literal["MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", "MODEL_PROVIDER_OPENAI"]] = ( + None + ) + + updated_at: Optional[datetime] = None + + upload_complete: Optional[bool] = None + + url: Optional[str] = None + + usecases: Optional[ + List[ + Literal[ + "MODEL_USECASE_UNKNOWN", + "MODEL_USECASE_AGENT", + "MODEL_USECASE_FINETUNED", + "MODEL_USECASE_KNOWLEDGEBASE", + "MODEL_USECASE_GUARDRAIL", + "MODEL_USECASE_REASONING", + "MODEL_USECASE_SERVERLESS", + ] + ] + ] = None + + uuid: Optional[str] = None + + version: Optional[ModelVersion] = None class APIOpenAIAPIKeyInfo(BaseModel): @@ -16,7 +80,7 @@ class APIOpenAIAPIKeyInfo(BaseModel): deleted_at: Optional[datetime] = None - models: Optional[List[APIModel]] = None + models: Optional[List[Model]] = None name: Optional[str] = None diff --git a/src/gradientai/types/model.py b/src/gradientai/types/model.py deleted file mode 100644 index cba51b07..00000000 --- a/src/gradientai/types/model.py +++ /dev/null @@ -1,48 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional -from datetime import datetime - -from .._models import BaseModel - -__all__ = ["Model", "Agreement", "Version"] - - -class Agreement(BaseModel): - description: Optional[str] = None - - name: Optional[str] = None - - url: Optional[str] = None - - uuid: Optional[str] = None - - -class Version(BaseModel): - major: Optional[int] = None - - minor: Optional[int] = None - - patch: Optional[int] = None - - -class Model(BaseModel): - agreement: Optional[Agreement] = None - - created_at: Optional[datetime] = None - - is_foundational: Optional[bool] = None - - name: Optional[str] = None - - parent_uuid: Optional[str] = None - - updated_at: Optional[datetime] = None - - upload_complete: Optional[bool] = None - - url: Optional[str] = None - - uuid: Optional[str] = None - - version: Optional[Version] = None diff --git a/src/gradientai/types/model_list_response.py b/src/gradientai/types/model_list_response.py index 93d9ae04..e6f5fad5 100644 --- a/src/gradientai/types/model_list_response.py +++ b/src/gradientai/types/model_list_response.py @@ -2,8 +2,8 @@ from typing import List, Optional -from .model import Model from .._models import BaseModel +from .api_model import APIModel from .agents.api_meta import APIMeta from .agents.api_links import APILinks @@ -15,4 +15,4 @@ class ModelListResponse(BaseModel): meta: Optional[APIMeta] = None - models: Optional[List[Model]] = None + models: Optional[List[APIModel]] = None From e59144c2d474a4003fd28b8eded08814ffa8d2f3 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 20 Jun 2025 14:07:44 +0000 Subject: [PATCH 26/41] feat(api): update via SDK Studio --- .stats.yml | 2 +- README.md | 16 +- api.md | 70 ++--- src/gradientai/_client.py | 40 +-- src/gradientai/resources/__init__.py | 28 +- .../{agents => doagents}/__init__.py | 28 +- .../{agents => doagents}/api_keys.py | 12 +- .../{agents => doagents}/child_agents.py | 10 +- .../agents.py => doagents/doagents.py} | 218 +++++++-------- .../{agents => doagents}/functions.py | 8 +- .../{agents => doagents}/knowledge_bases.py | 4 +- .../{agents => doagents}/versions.py | 6 +- src/gradientai/types/__init__.py | 20 +- .../types/agent_update_status_response.py | 16 -- ...ate_params.py => doagent_create_params.py} | 4 +- ...response.py => doagent_create_response.py} | 4 +- ...response.py => doagent_delete_response.py} | 4 +- ..._list_params.py => doagent_list_params.py} | 4 +- ...t_response.py => doagent_list_response.py} | 8 +- ...sponse.py => doagent_retrieve_response.py} | 4 +- ...ate_params.py => doagent_update_params.py} | 4 +- ...response.py => doagent_update_response.py} | 4 +- ...ams.py => doagent_update_status_params.py} | 4 +- .../types/doagent_update_status_response.py | 16 ++ .../types/{agents => doagents}/__init__.py | 0 .../api_key_create_params.py | 0 .../api_key_create_response.py | 0 .../api_key_delete_response.py | 0 .../api_key_list_params.py | 0 .../api_key_list_response.py | 0 .../api_key_regenerate_response.py | 0 .../api_key_update_params.py | 0 .../api_key_update_response.py | 0 .../api_link_knowledge_base_output.py | 0 .../types/{agents => doagents}/api_links.py | 0 .../types/{agents => doagents}/api_meta.py | 0 .../child_agent_add_params.py | 0 .../child_agent_add_response.py | 0 .../child_agent_delete_response.py | 0 .../child_agent_update_params.py | 0 .../child_agent_update_response.py | 0 .../child_agent_view_response.py | 0 .../function_create_params.py | 0 .../function_create_response.py | 0 .../function_delete_response.py | 0 .../function_update_params.py | 0 .../function_update_response.py | 0 .../knowledge_base_detach_response.py | 0 .../version_list_params.py | 0 .../version_list_response.py | 0 .../version_update_params.py | 0 .../version_update_response.py | 0 .../types/indexing_job_list_response.py | 4 +- .../types/inference/api_key_list_response.py | 4 +- .../types/knowledge_base_list_response.py | 4 +- .../data_source_list_response.py | 4 +- src/gradientai/types/model_list_response.py | 4 +- .../anthropic/key_list_agents_response.py | 4 +- .../providers/anthropic/key_list_response.py | 4 +- .../providers/openai/key_list_response.py | 4 +- .../openai/key_retrieve_agents_response.py | 4 +- .../{agents => doagents}/__init__.py | 0 .../{agents => doagents}/test_api_keys.py | 106 ++++---- .../{agents => doagents}/test_child_agents.py | 86 +++--- .../{agents => doagents}/test_functions.py | 66 ++--- .../test_knowledge_bases.py | 58 ++-- .../{agents => doagents}/test_versions.py | 42 +-- .../{test_agents.py => test_doagents.py} | 256 +++++++++--------- tests/test_client.py | 20 +- 69 files changed, 602 insertions(+), 602 deletions(-) rename src/gradientai/resources/{agents => doagents}/__init__.py (84%) rename src/gradientai/resources/{agents => doagents}/api_keys.py (98%) rename src/gradientai/resources/{agents => doagents}/child_agents.py (98%) rename src/gradientai/resources/{agents/agents.py => doagents/doagents.py} (87%) rename src/gradientai/resources/{agents => doagents}/functions.py (98%) rename src/gradientai/resources/{agents => doagents}/knowledge_bases.py (98%) rename src/gradientai/resources/{agents => doagents}/versions.py (98%) delete mode 100644 src/gradientai/types/agent_update_status_response.py rename src/gradientai/types/{agent_create_params.py => doagent_create_params.py} (90%) rename src/gradientai/types/{agent_update_response.py => doagent_create_response.py} (77%) rename src/gradientai/types/{agent_retrieve_response.py => doagent_delete_response.py} (77%) rename src/gradientai/types/{agent_list_params.py => doagent_list_params.py} (79%) rename src/gradientai/types/{agent_list_response.py => doagent_list_response.py} (98%) rename src/gradientai/types/{agent_delete_response.py => doagent_retrieve_response.py} (76%) rename src/gradientai/types/{agent_update_params.py => doagent_update_params.py} (95%) rename src/gradientai/types/{agent_create_response.py => doagent_update_response.py} (77%) rename src/gradientai/types/{agent_update_status_params.py => doagent_update_status_params.py} (79%) create mode 100644 src/gradientai/types/doagent_update_status_response.py rename src/gradientai/types/{agents => doagents}/__init__.py (100%) rename src/gradientai/types/{agents => doagents}/api_key_create_params.py (100%) rename src/gradientai/types/{agents => doagents}/api_key_create_response.py (100%) rename src/gradientai/types/{agents => doagents}/api_key_delete_response.py (100%) rename src/gradientai/types/{agents => doagents}/api_key_list_params.py (100%) rename src/gradientai/types/{agents => doagents}/api_key_list_response.py (100%) rename src/gradientai/types/{agents => doagents}/api_key_regenerate_response.py (100%) rename src/gradientai/types/{agents => doagents}/api_key_update_params.py (100%) rename src/gradientai/types/{agents => doagents}/api_key_update_response.py (100%) rename src/gradientai/types/{agents => doagents}/api_link_knowledge_base_output.py (100%) rename src/gradientai/types/{agents => doagents}/api_links.py (100%) rename src/gradientai/types/{agents => doagents}/api_meta.py (100%) rename src/gradientai/types/{agents => doagents}/child_agent_add_params.py (100%) rename src/gradientai/types/{agents => doagents}/child_agent_add_response.py (100%) rename src/gradientai/types/{agents => doagents}/child_agent_delete_response.py (100%) rename src/gradientai/types/{agents => doagents}/child_agent_update_params.py (100%) rename src/gradientai/types/{agents => doagents}/child_agent_update_response.py (100%) rename src/gradientai/types/{agents => doagents}/child_agent_view_response.py (100%) rename src/gradientai/types/{agents => doagents}/function_create_params.py (100%) rename src/gradientai/types/{agents => doagents}/function_create_response.py (100%) rename src/gradientai/types/{agents => doagents}/function_delete_response.py (100%) rename src/gradientai/types/{agents => doagents}/function_update_params.py (100%) rename src/gradientai/types/{agents => doagents}/function_update_response.py (100%) rename src/gradientai/types/{agents => doagents}/knowledge_base_detach_response.py (100%) rename src/gradientai/types/{agents => doagents}/version_list_params.py (100%) rename src/gradientai/types/{agents => doagents}/version_list_response.py (100%) rename src/gradientai/types/{agents => doagents}/version_update_params.py (100%) rename src/gradientai/types/{agents => doagents}/version_update_response.py (100%) rename tests/api_resources/{agents => doagents}/__init__.py (100%) rename tests/api_resources/{agents => doagents}/test_api_keys.py (84%) rename tests/api_resources/{agents => doagents}/test_child_agents.py (84%) rename tests/api_resources/{agents => doagents}/test_functions.py (85%) rename tests/api_resources/{agents => doagents}/test_knowledge_bases.py (82%) rename tests/api_resources/{agents => doagents}/test_versions.py (84%) rename tests/api_resources/{test_agents.py => test_doagents.py} (66%) diff --git a/.stats.yml b/.stats.yml index 8f85d58c..0e1ae316 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 58 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 -config_hash: a2c4c23eb1e8655fbfb2b6930ce0fd46 +config_hash: c424a9395cc2b0dbf298813e54562194 diff --git a/README.md b/README.md index 36edcfbd..d047f658 100644 --- a/README.md +++ b/README.md @@ -33,7 +33,7 @@ client = GradientAI( ), # This is the default and can be omitted ) -versions = client.agents.versions.list( +versions = client.doagents.versions.list( uuid="REPLACE_ME", ) print(versions.agent_versions) @@ -61,7 +61,7 @@ client = AsyncGradientAI( async def main() -> None: - versions = await client.agents.versions.list( + versions = await client.doagents.versions.list( uuid="REPLACE_ME", ) print(versions.agent_versions) @@ -113,7 +113,7 @@ from gradientai import GradientAI client = GradientAI() try: - client.agents.versions.list( + client.doagents.versions.list( uuid="REPLACE_ME", ) except gradientai.APIConnectionError as e: @@ -158,7 +158,7 @@ client = GradientAI( ) # Or, configure per-request: -client.with_options(max_retries=5).agents.versions.list( +client.with_options(max_retries=5).doagents.versions.list( uuid="REPLACE_ME", ) ``` @@ -183,7 +183,7 @@ client = GradientAI( ) # Override per-request: -client.with_options(timeout=5.0).agents.versions.list( +client.with_options(timeout=5.0).doagents.versions.list( uuid="REPLACE_ME", ) ``` @@ -226,12 +226,12 @@ The "raw" Response object can be accessed by prefixing `.with_raw_response.` to from gradientai import GradientAI client = GradientAI() -response = client.agents.versions.with_raw_response.list( +response = client.doagents.versions.with_raw_response.list( uuid="REPLACE_ME", ) print(response.headers.get('X-My-Header')) -version = response.parse() # get the object that `agents.versions.list()` would have returned +version = response.parse() # get the object that `doagents.versions.list()` would have returned print(version.agent_versions) ``` @@ -246,7 +246,7 @@ The above interface eagerly reads the full response body when you make the reque To stream the response body, use `.with_streaming_response` instead, which requires a context manager and only reads the response body once you call `.read()`, `.text()`, `.json()`, `.iter_bytes()`, `.iter_text()`, `.iter_lines()` or `.parse()`. In the async client, these are async methods. ```python -with client.agents.versions.with_streaming_response.list( +with client.doagents.versions.with_streaming_response.list( uuid="REPLACE_ME", ) as response: print(response.headers.get("X-My-Header")) diff --git a/api.md b/api.md index 2376a11f..0bc41bbe 100644 --- a/api.md +++ b/api.md @@ -1,4 +1,4 @@ -# Agents +# Doagents Types: @@ -10,30 +10,30 @@ from gradientai.types import ( APIDeploymentVisibility, APIOpenAIAPIKeyInfo, APIRetrievalMethod, - AgentCreateResponse, - AgentRetrieveResponse, - AgentUpdateResponse, - AgentListResponse, - AgentDeleteResponse, - AgentUpdateStatusResponse, + DoagentCreateResponse, + DoagentRetrieveResponse, + DoagentUpdateResponse, + DoagentListResponse, + DoagentDeleteResponse, + DoagentUpdateStatusResponse, ) ``` Methods: -- client.agents.create(\*\*params) -> AgentCreateResponse -- client.agents.retrieve(uuid) -> AgentRetrieveResponse -- client.agents.update(path_uuid, \*\*params) -> AgentUpdateResponse -- client.agents.list(\*\*params) -> AgentListResponse -- client.agents.delete(uuid) -> AgentDeleteResponse -- client.agents.update_status(path_uuid, \*\*params) -> AgentUpdateStatusResponse +- client.doagents.create(\*\*params) -> DoagentCreateResponse +- client.doagents.retrieve(uuid) -> DoagentRetrieveResponse +- client.doagents.update(path_uuid, \*\*params) -> DoagentUpdateResponse +- client.doagents.list(\*\*params) -> DoagentListResponse +- client.doagents.delete(uuid) -> DoagentDeleteResponse +- client.doagents.update_status(path_uuid, \*\*params) -> DoagentUpdateStatusResponse ## APIKeys Types: ```python -from gradientai.types.agents import ( +from gradientai.types.doagents import ( APIKeyCreateResponse, APIKeyUpdateResponse, APIKeyListResponse, @@ -44,18 +44,18 @@ from gradientai.types.agents import ( Methods: -- client.agents.api_keys.create(path_agent_uuid, \*\*params) -> APIKeyCreateResponse -- client.agents.api_keys.update(path_api_key_uuid, \*, path_agent_uuid, \*\*params) -> APIKeyUpdateResponse -- client.agents.api_keys.list(agent_uuid, \*\*params) -> APIKeyListResponse -- client.agents.api_keys.delete(api_key_uuid, \*, agent_uuid) -> APIKeyDeleteResponse -- client.agents.api_keys.regenerate(api_key_uuid, \*, agent_uuid) -> APIKeyRegenerateResponse +- client.doagents.api_keys.create(path_agent_uuid, \*\*params) -> APIKeyCreateResponse +- client.doagents.api_keys.update(path_api_key_uuid, \*, path_agent_uuid, \*\*params) -> APIKeyUpdateResponse +- client.doagents.api_keys.list(agent_uuid, \*\*params) -> APIKeyListResponse +- client.doagents.api_keys.delete(api_key_uuid, \*, agent_uuid) -> APIKeyDeleteResponse +- client.doagents.api_keys.regenerate(api_key_uuid, \*, agent_uuid) -> APIKeyRegenerateResponse ## Functions Types: ```python -from gradientai.types.agents import ( +from gradientai.types.doagents import ( FunctionCreateResponse, FunctionUpdateResponse, FunctionDeleteResponse, @@ -64,43 +64,43 @@ from gradientai.types.agents import ( Methods: -- client.agents.functions.create(path_agent_uuid, \*\*params) -> FunctionCreateResponse -- client.agents.functions.update(path_function_uuid, \*, path_agent_uuid, \*\*params) -> FunctionUpdateResponse -- client.agents.functions.delete(function_uuid, \*, agent_uuid) -> FunctionDeleteResponse +- client.doagents.functions.create(path_agent_uuid, \*\*params) -> FunctionCreateResponse +- client.doagents.functions.update(path_function_uuid, \*, path_agent_uuid, \*\*params) -> FunctionUpdateResponse +- client.doagents.functions.delete(function_uuid, \*, agent_uuid) -> FunctionDeleteResponse ## Versions Types: ```python -from gradientai.types.agents import APILinks, APIMeta, VersionUpdateResponse, VersionListResponse +from gradientai.types.doagents import APILinks, APIMeta, VersionUpdateResponse, VersionListResponse ``` Methods: -- client.agents.versions.update(path_uuid, \*\*params) -> VersionUpdateResponse -- client.agents.versions.list(uuid, \*\*params) -> VersionListResponse +- client.doagents.versions.update(path_uuid, \*\*params) -> VersionUpdateResponse +- client.doagents.versions.list(uuid, \*\*params) -> VersionListResponse ## KnowledgeBases Types: ```python -from gradientai.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse +from gradientai.types.doagents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse ``` Methods: -- client.agents.knowledge_bases.attach(agent_uuid) -> APILinkKnowledgeBaseOutput -- client.agents.knowledge_bases.attach_single(knowledge_base_uuid, \*, agent_uuid) -> APILinkKnowledgeBaseOutput -- client.agents.knowledge_bases.detach(knowledge_base_uuid, \*, agent_uuid) -> KnowledgeBaseDetachResponse +- client.doagents.knowledge_bases.attach(agent_uuid) -> APILinkKnowledgeBaseOutput +- client.doagents.knowledge_bases.attach_single(knowledge_base_uuid, \*, agent_uuid) -> APILinkKnowledgeBaseOutput +- client.doagents.knowledge_bases.detach(knowledge_base_uuid, \*, agent_uuid) -> KnowledgeBaseDetachResponse ## ChildAgents Types: ```python -from gradientai.types.agents import ( +from gradientai.types.doagents import ( ChildAgentUpdateResponse, ChildAgentDeleteResponse, ChildAgentAddResponse, @@ -110,10 +110,10 @@ from gradientai.types.agents import ( Methods: -- client.agents.child_agents.update(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> ChildAgentUpdateResponse -- client.agents.child_agents.delete(child_agent_uuid, \*, parent_agent_uuid) -> ChildAgentDeleteResponse -- client.agents.child_agents.add(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> ChildAgentAddResponse -- client.agents.child_agents.view(uuid) -> ChildAgentViewResponse +- client.doagents.child_agents.update(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> ChildAgentUpdateResponse +- client.doagents.child_agents.delete(child_agent_uuid, \*, parent_agent_uuid) -> ChildAgentDeleteResponse +- client.doagents.child_agents.add(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> ChildAgentAddResponse +- client.doagents.child_agents.view(uuid) -> ChildAgentViewResponse # Providers diff --git a/src/gradientai/_client.py b/src/gradientai/_client.py index 0a5eb9a1..992559a2 100644 --- a/src/gradientai/_client.py +++ b/src/gradientai/_client.py @@ -31,12 +31,12 @@ ) if TYPE_CHECKING: - from .resources import chat, agents, models, regions, inference, providers, indexing_jobs, knowledge_bases + from .resources import chat, models, regions, doagents, inference, providers, indexing_jobs, knowledge_bases from .resources.models import ModelsResource, AsyncModelsResource from .resources.regions import RegionsResource, AsyncRegionsResource from .resources.chat.chat import ChatResource, AsyncChatResource - from .resources.agents.agents import AgentsResource, AsyncAgentsResource from .resources.indexing_jobs import IndexingJobsResource, AsyncIndexingJobsResource + from .resources.doagents.doagents import DoagentsResource, AsyncDoagentsResource from .resources.inference.inference import InferenceResource, AsyncInferenceResource from .resources.providers.providers import ProvidersResource, AsyncProvidersResource from .resources.knowledge_bases.knowledge_bases import KnowledgeBasesResource, AsyncKnowledgeBasesResource @@ -110,10 +110,10 @@ def __init__( ) @cached_property - def agents(self) -> AgentsResource: - from .resources.agents import AgentsResource + def doagents(self) -> DoagentsResource: + from .resources.doagents import DoagentsResource - return AgentsResource(self) + return DoagentsResource(self) @cached_property def providers(self) -> ProvidersResource: @@ -329,10 +329,10 @@ def __init__( ) @cached_property - def agents(self) -> AsyncAgentsResource: - from .resources.agents import AsyncAgentsResource + def doagents(self) -> AsyncDoagentsResource: + from .resources.doagents import AsyncDoagentsResource - return AsyncAgentsResource(self) + return AsyncDoagentsResource(self) @cached_property def providers(self) -> AsyncProvidersResource: @@ -498,10 +498,10 @@ def __init__(self, client: GradientAI) -> None: self._client = client @cached_property - def agents(self) -> agents.AgentsResourceWithRawResponse: - from .resources.agents import AgentsResourceWithRawResponse + def doagents(self) -> doagents.DoagentsResourceWithRawResponse: + from .resources.doagents import DoagentsResourceWithRawResponse - return AgentsResourceWithRawResponse(self._client.agents) + return DoagentsResourceWithRawResponse(self._client.doagents) @cached_property def providers(self) -> providers.ProvidersResourceWithRawResponse: @@ -553,10 +553,10 @@ def __init__(self, client: AsyncGradientAI) -> None: self._client = client @cached_property - def agents(self) -> agents.AsyncAgentsResourceWithRawResponse: - from .resources.agents import AsyncAgentsResourceWithRawResponse + def doagents(self) -> doagents.AsyncDoagentsResourceWithRawResponse: + from .resources.doagents import AsyncDoagentsResourceWithRawResponse - return AsyncAgentsResourceWithRawResponse(self._client.agents) + return AsyncDoagentsResourceWithRawResponse(self._client.doagents) @cached_property def providers(self) -> providers.AsyncProvidersResourceWithRawResponse: @@ -608,10 +608,10 @@ def __init__(self, client: GradientAI) -> None: self._client = client @cached_property - def agents(self) -> agents.AgentsResourceWithStreamingResponse: - from .resources.agents import AgentsResourceWithStreamingResponse + def doagents(self) -> doagents.DoagentsResourceWithStreamingResponse: + from .resources.doagents import DoagentsResourceWithStreamingResponse - return AgentsResourceWithStreamingResponse(self._client.agents) + return DoagentsResourceWithStreamingResponse(self._client.doagents) @cached_property def providers(self) -> providers.ProvidersResourceWithStreamingResponse: @@ -663,10 +663,10 @@ def __init__(self, client: AsyncGradientAI) -> None: self._client = client @cached_property - def agents(self) -> agents.AsyncAgentsResourceWithStreamingResponse: - from .resources.agents import AsyncAgentsResourceWithStreamingResponse + def doagents(self) -> doagents.AsyncDoagentsResourceWithStreamingResponse: + from .resources.doagents import AsyncDoagentsResourceWithStreamingResponse - return AsyncAgentsResourceWithStreamingResponse(self._client.agents) + return AsyncDoagentsResourceWithStreamingResponse(self._client.doagents) @cached_property def providers(self) -> providers.AsyncProvidersResourceWithStreamingResponse: diff --git a/src/gradientai/resources/__init__.py b/src/gradientai/resources/__init__.py index 1763a13e..17791967 100644 --- a/src/gradientai/resources/__init__.py +++ b/src/gradientai/resources/__init__.py @@ -8,14 +8,6 @@ ChatResourceWithStreamingResponse, AsyncChatResourceWithStreamingResponse, ) -from .agents import ( - AgentsResource, - AsyncAgentsResource, - AgentsResourceWithRawResponse, - AsyncAgentsResourceWithRawResponse, - AgentsResourceWithStreamingResponse, - AsyncAgentsResourceWithStreamingResponse, -) from .models import ( ModelsResource, AsyncModelsResource, @@ -32,6 +24,14 @@ RegionsResourceWithStreamingResponse, AsyncRegionsResourceWithStreamingResponse, ) +from .doagents import ( + DoagentsResource, + AsyncDoagentsResource, + DoagentsResourceWithRawResponse, + AsyncDoagentsResourceWithRawResponse, + DoagentsResourceWithStreamingResponse, + AsyncDoagentsResourceWithStreamingResponse, +) from .inference import ( InferenceResource, AsyncInferenceResource, @@ -66,12 +66,12 @@ ) __all__ = [ - "AgentsResource", - "AsyncAgentsResource", - "AgentsResourceWithRawResponse", - "AsyncAgentsResourceWithRawResponse", - "AgentsResourceWithStreamingResponse", - "AsyncAgentsResourceWithStreamingResponse", + "DoagentsResource", + "AsyncDoagentsResource", + "DoagentsResourceWithRawResponse", + "AsyncDoagentsResourceWithRawResponse", + "DoagentsResourceWithStreamingResponse", + "AsyncDoagentsResourceWithStreamingResponse", "ProvidersResource", "AsyncProvidersResource", "ProvidersResourceWithRawResponse", diff --git a/src/gradientai/resources/agents/__init__.py b/src/gradientai/resources/doagents/__init__.py similarity index 84% rename from src/gradientai/resources/agents/__init__.py rename to src/gradientai/resources/doagents/__init__.py index f41a0408..5ee3485f 100644 --- a/src/gradientai/resources/agents/__init__.py +++ b/src/gradientai/resources/doagents/__init__.py @@ -1,13 +1,5 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from .agents import ( - AgentsResource, - AsyncAgentsResource, - AgentsResourceWithRawResponse, - AsyncAgentsResourceWithRawResponse, - AgentsResourceWithStreamingResponse, - AsyncAgentsResourceWithStreamingResponse, -) from .api_keys import ( APIKeysResource, AsyncAPIKeysResource, @@ -16,6 +8,14 @@ APIKeysResourceWithStreamingResponse, AsyncAPIKeysResourceWithStreamingResponse, ) +from .doagents import ( + DoagentsResource, + AsyncDoagentsResource, + DoagentsResourceWithRawResponse, + AsyncDoagentsResourceWithRawResponse, + DoagentsResourceWithStreamingResponse, + AsyncDoagentsResourceWithStreamingResponse, +) from .versions import ( VersionsResource, AsyncVersionsResource, @@ -80,10 +80,10 @@ "AsyncChildAgentsResourceWithRawResponse", "ChildAgentsResourceWithStreamingResponse", "AsyncChildAgentsResourceWithStreamingResponse", - "AgentsResource", - "AsyncAgentsResource", - "AgentsResourceWithRawResponse", - "AsyncAgentsResourceWithRawResponse", - "AgentsResourceWithStreamingResponse", - "AsyncAgentsResourceWithStreamingResponse", + "DoagentsResource", + "AsyncDoagentsResource", + "DoagentsResourceWithRawResponse", + "AsyncDoagentsResourceWithRawResponse", + "DoagentsResourceWithStreamingResponse", + "AsyncDoagentsResourceWithStreamingResponse", ] diff --git a/src/gradientai/resources/agents/api_keys.py b/src/gradientai/resources/doagents/api_keys.py similarity index 98% rename from src/gradientai/resources/agents/api_keys.py rename to src/gradientai/resources/doagents/api_keys.py index 155e3adc..c55249be 100644 --- a/src/gradientai/resources/agents/api_keys.py +++ b/src/gradientai/resources/doagents/api_keys.py @@ -15,12 +15,12 @@ async_to_streamed_response_wrapper, ) from ..._base_client import make_request_options -from ...types.agents import api_key_list_params, api_key_create_params, api_key_update_params -from ...types.agents.api_key_list_response import APIKeyListResponse -from ...types.agents.api_key_create_response import APIKeyCreateResponse -from ...types.agents.api_key_delete_response import APIKeyDeleteResponse -from ...types.agents.api_key_update_response import APIKeyUpdateResponse -from ...types.agents.api_key_regenerate_response import APIKeyRegenerateResponse +from ...types.doagents import api_key_list_params, api_key_create_params, api_key_update_params +from ...types.doagents.api_key_list_response import APIKeyListResponse +from ...types.doagents.api_key_create_response import APIKeyCreateResponse +from ...types.doagents.api_key_delete_response import APIKeyDeleteResponse +from ...types.doagents.api_key_update_response import APIKeyUpdateResponse +from ...types.doagents.api_key_regenerate_response import APIKeyRegenerateResponse __all__ = ["APIKeysResource", "AsyncAPIKeysResource"] diff --git a/src/gradientai/resources/agents/child_agents.py b/src/gradientai/resources/doagents/child_agents.py similarity index 98% rename from src/gradientai/resources/agents/child_agents.py rename to src/gradientai/resources/doagents/child_agents.py index 9031d8ce..6e8abfb7 100644 --- a/src/gradientai/resources/agents/child_agents.py +++ b/src/gradientai/resources/doagents/child_agents.py @@ -15,11 +15,11 @@ async_to_streamed_response_wrapper, ) from ..._base_client import make_request_options -from ...types.agents import child_agent_add_params, child_agent_update_params -from ...types.agents.child_agent_add_response import ChildAgentAddResponse -from ...types.agents.child_agent_view_response import ChildAgentViewResponse -from ...types.agents.child_agent_delete_response import ChildAgentDeleteResponse -from ...types.agents.child_agent_update_response import ChildAgentUpdateResponse +from ...types.doagents import child_agent_add_params, child_agent_update_params +from ...types.doagents.child_agent_add_response import ChildAgentAddResponse +from ...types.doagents.child_agent_view_response import ChildAgentViewResponse +from ...types.doagents.child_agent_delete_response import ChildAgentDeleteResponse +from ...types.doagents.child_agent_update_response import ChildAgentUpdateResponse __all__ = ["ChildAgentsResource", "AsyncChildAgentsResource"] diff --git a/src/gradientai/resources/agents/agents.py b/src/gradientai/resources/doagents/doagents.py similarity index 87% rename from src/gradientai/resources/agents/agents.py rename to src/gradientai/resources/doagents/doagents.py index 78439d33..89951704 100644 --- a/src/gradientai/resources/agents/agents.py +++ b/src/gradientai/resources/doagents/doagents.py @@ -9,10 +9,10 @@ from ...types import ( APIRetrievalMethod, APIDeploymentVisibility, - agent_list_params, - agent_create_params, - agent_update_params, - agent_update_status_params, + doagent_list_params, + doagent_create_params, + doagent_update_params, + doagent_update_status_params, ) from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven from ..._utils import maybe_transform, async_maybe_transform @@ -65,19 +65,19 @@ KnowledgeBasesResourceWithStreamingResponse, AsyncKnowledgeBasesResourceWithStreamingResponse, ) -from ...types.agent_list_response import AgentListResponse from ...types.api_retrieval_method import APIRetrievalMethod -from ...types.agent_create_response import AgentCreateResponse -from ...types.agent_delete_response import AgentDeleteResponse -from ...types.agent_update_response import AgentUpdateResponse -from ...types.agent_retrieve_response import AgentRetrieveResponse +from ...types.doagent_list_response import DoagentListResponse +from ...types.doagent_create_response import DoagentCreateResponse +from ...types.doagent_delete_response import DoagentDeleteResponse +from ...types.doagent_update_response import DoagentUpdateResponse from ...types.api_deployment_visibility import APIDeploymentVisibility -from ...types.agent_update_status_response import AgentUpdateStatusResponse +from ...types.doagent_retrieve_response import DoagentRetrieveResponse +from ...types.doagent_update_status_response import DoagentUpdateStatusResponse -__all__ = ["AgentsResource", "AsyncAgentsResource"] +__all__ = ["DoagentsResource", "AsyncDoagentsResource"] -class AgentsResource(SyncAPIResource): +class DoagentsResource(SyncAPIResource): @cached_property def api_keys(self) -> APIKeysResource: return APIKeysResource(self._client) @@ -99,23 +99,23 @@ def child_agents(self) -> ChildAgentsResource: return ChildAgentsResource(self._client) @cached_property - def with_raw_response(self) -> AgentsResourceWithRawResponse: + def with_raw_response(self) -> DoagentsResourceWithRawResponse: """ This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ - return AgentsResourceWithRawResponse(self) + return DoagentsResourceWithRawResponse(self) @cached_property - def with_streaming_response(self) -> AgentsResourceWithStreamingResponse: + def with_streaming_response(self) -> DoagentsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ - return AgentsResourceWithStreamingResponse(self) + return DoagentsResourceWithStreamingResponse(self) def create( self, @@ -136,7 +136,7 @@ def create( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AgentCreateResponse: + ) -> DoagentCreateResponse: """To create a new agent, send a POST request to `/v2/gen-ai/agents`. The response @@ -175,12 +175,12 @@ def create( "region": region, "tags": tags, }, - agent_create_params.AgentCreateParams, + doagent_create_params.DoagentCreateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=AgentCreateResponse, + cast_to=DoagentCreateResponse, ) def retrieve( @@ -193,7 +193,7 @@ def retrieve( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AgentRetrieveResponse: + ) -> DoagentRetrieveResponse: """To retrieve details of an agent, GET request to `/v2/gen-ai/agents/{uuid}`. The @@ -217,7 +217,7 @@ def retrieve( options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=AgentRetrieveResponse, + cast_to=DoagentRetrieveResponse, ) def update( @@ -245,7 +245,7 @@ def update( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AgentUpdateResponse: + ) -> DoagentUpdateResponse: """To update an agent, send a PUT request to `/v2/gen-ai/agents/{uuid}`. The @@ -303,12 +303,12 @@ def update( "top_p": top_p, "body_uuid": body_uuid, }, - agent_update_params.AgentUpdateParams, + doagent_update_params.DoagentUpdateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=AgentUpdateResponse, + cast_to=DoagentUpdateResponse, ) def list( @@ -323,7 +323,7 @@ def list( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AgentListResponse: + ) -> DoagentListResponse: """ To list all agents, send a GET request to `/v2/gen-ai/agents`. @@ -357,10 +357,10 @@ def list( "page": page, "per_page": per_page, }, - agent_list_params.AgentListParams, + doagent_list_params.DoagentListParams, ), ), - cast_to=AgentListResponse, + cast_to=DoagentListResponse, ) def delete( @@ -373,7 +373,7 @@ def delete( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AgentDeleteResponse: + ) -> DoagentDeleteResponse: """ To delete an agent, send a DELETE request to `/v2/gen-ai/agents/{uuid}`. @@ -395,7 +395,7 @@ def delete( options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=AgentDeleteResponse, + cast_to=DoagentDeleteResponse, ) def update_status( @@ -410,7 +410,7 @@ def update_status( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AgentUpdateStatusResponse: + ) -> DoagentUpdateStatusResponse: """Check whether an agent is public or private. To update the agent status, send a @@ -436,16 +436,16 @@ def update_status( "body_uuid": body_uuid, "visibility": visibility, }, - agent_update_status_params.AgentUpdateStatusParams, + doagent_update_status_params.DoagentUpdateStatusParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=AgentUpdateStatusResponse, + cast_to=DoagentUpdateStatusResponse, ) -class AsyncAgentsResource(AsyncAPIResource): +class AsyncDoagentsResource(AsyncAPIResource): @cached_property def api_keys(self) -> AsyncAPIKeysResource: return AsyncAPIKeysResource(self._client) @@ -467,23 +467,23 @@ def child_agents(self) -> AsyncChildAgentsResource: return AsyncChildAgentsResource(self._client) @cached_property - def with_raw_response(self) -> AsyncAgentsResourceWithRawResponse: + def with_raw_response(self) -> AsyncDoagentsResourceWithRawResponse: """ This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ - return AsyncAgentsResourceWithRawResponse(self) + return AsyncDoagentsResourceWithRawResponse(self) @cached_property - def with_streaming_response(self) -> AsyncAgentsResourceWithStreamingResponse: + def with_streaming_response(self) -> AsyncDoagentsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ - return AsyncAgentsResourceWithStreamingResponse(self) + return AsyncDoagentsResourceWithStreamingResponse(self) async def create( self, @@ -504,7 +504,7 @@ async def create( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AgentCreateResponse: + ) -> DoagentCreateResponse: """To create a new agent, send a POST request to `/v2/gen-ai/agents`. The response @@ -543,12 +543,12 @@ async def create( "region": region, "tags": tags, }, - agent_create_params.AgentCreateParams, + doagent_create_params.DoagentCreateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=AgentCreateResponse, + cast_to=DoagentCreateResponse, ) async def retrieve( @@ -561,7 +561,7 @@ async def retrieve( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AgentRetrieveResponse: + ) -> DoagentRetrieveResponse: """To retrieve details of an agent, GET request to `/v2/gen-ai/agents/{uuid}`. The @@ -585,7 +585,7 @@ async def retrieve( options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=AgentRetrieveResponse, + cast_to=DoagentRetrieveResponse, ) async def update( @@ -613,7 +613,7 @@ async def update( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AgentUpdateResponse: + ) -> DoagentUpdateResponse: """To update an agent, send a PUT request to `/v2/gen-ai/agents/{uuid}`. The @@ -671,12 +671,12 @@ async def update( "top_p": top_p, "body_uuid": body_uuid, }, - agent_update_params.AgentUpdateParams, + doagent_update_params.DoagentUpdateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=AgentUpdateResponse, + cast_to=DoagentUpdateResponse, ) async def list( @@ -691,7 +691,7 @@ async def list( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AgentListResponse: + ) -> DoagentListResponse: """ To list all agents, send a GET request to `/v2/gen-ai/agents`. @@ -725,10 +725,10 @@ async def list( "page": page, "per_page": per_page, }, - agent_list_params.AgentListParams, + doagent_list_params.DoagentListParams, ), ), - cast_to=AgentListResponse, + cast_to=DoagentListResponse, ) async def delete( @@ -741,7 +741,7 @@ async def delete( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AgentDeleteResponse: + ) -> DoagentDeleteResponse: """ To delete an agent, send a DELETE request to `/v2/gen-ai/agents/{uuid}`. @@ -763,7 +763,7 @@ async def delete( options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=AgentDeleteResponse, + cast_to=DoagentDeleteResponse, ) async def update_status( @@ -778,7 +778,7 @@ async def update_status( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AgentUpdateStatusResponse: + ) -> DoagentUpdateStatusResponse: """Check whether an agent is public or private. To update the agent status, send a @@ -804,186 +804,186 @@ async def update_status( "body_uuid": body_uuid, "visibility": visibility, }, - agent_update_status_params.AgentUpdateStatusParams, + doagent_update_status_params.DoagentUpdateStatusParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=AgentUpdateStatusResponse, + cast_to=DoagentUpdateStatusResponse, ) -class AgentsResourceWithRawResponse: - def __init__(self, agents: AgentsResource) -> None: - self._agents = agents +class DoagentsResourceWithRawResponse: + def __init__(self, doagents: DoagentsResource) -> None: + self._doagents = doagents self.create = to_raw_response_wrapper( - agents.create, + doagents.create, ) self.retrieve = to_raw_response_wrapper( - agents.retrieve, + doagents.retrieve, ) self.update = to_raw_response_wrapper( - agents.update, + doagents.update, ) self.list = to_raw_response_wrapper( - agents.list, + doagents.list, ) self.delete = to_raw_response_wrapper( - agents.delete, + doagents.delete, ) self.update_status = to_raw_response_wrapper( - agents.update_status, + doagents.update_status, ) @cached_property def api_keys(self) -> APIKeysResourceWithRawResponse: - return APIKeysResourceWithRawResponse(self._agents.api_keys) + return APIKeysResourceWithRawResponse(self._doagents.api_keys) @cached_property def functions(self) -> FunctionsResourceWithRawResponse: - return FunctionsResourceWithRawResponse(self._agents.functions) + return FunctionsResourceWithRawResponse(self._doagents.functions) @cached_property def versions(self) -> VersionsResourceWithRawResponse: - return VersionsResourceWithRawResponse(self._agents.versions) + return VersionsResourceWithRawResponse(self._doagents.versions) @cached_property def knowledge_bases(self) -> KnowledgeBasesResourceWithRawResponse: - return KnowledgeBasesResourceWithRawResponse(self._agents.knowledge_bases) + return KnowledgeBasesResourceWithRawResponse(self._doagents.knowledge_bases) @cached_property def child_agents(self) -> ChildAgentsResourceWithRawResponse: - return ChildAgentsResourceWithRawResponse(self._agents.child_agents) + return ChildAgentsResourceWithRawResponse(self._doagents.child_agents) -class AsyncAgentsResourceWithRawResponse: - def __init__(self, agents: AsyncAgentsResource) -> None: - self._agents = agents +class AsyncDoagentsResourceWithRawResponse: + def __init__(self, doagents: AsyncDoagentsResource) -> None: + self._doagents = doagents self.create = async_to_raw_response_wrapper( - agents.create, + doagents.create, ) self.retrieve = async_to_raw_response_wrapper( - agents.retrieve, + doagents.retrieve, ) self.update = async_to_raw_response_wrapper( - agents.update, + doagents.update, ) self.list = async_to_raw_response_wrapper( - agents.list, + doagents.list, ) self.delete = async_to_raw_response_wrapper( - agents.delete, + doagents.delete, ) self.update_status = async_to_raw_response_wrapper( - agents.update_status, + doagents.update_status, ) @cached_property def api_keys(self) -> AsyncAPIKeysResourceWithRawResponse: - return AsyncAPIKeysResourceWithRawResponse(self._agents.api_keys) + return AsyncAPIKeysResourceWithRawResponse(self._doagents.api_keys) @cached_property def functions(self) -> AsyncFunctionsResourceWithRawResponse: - return AsyncFunctionsResourceWithRawResponse(self._agents.functions) + return AsyncFunctionsResourceWithRawResponse(self._doagents.functions) @cached_property def versions(self) -> AsyncVersionsResourceWithRawResponse: - return AsyncVersionsResourceWithRawResponse(self._agents.versions) + return AsyncVersionsResourceWithRawResponse(self._doagents.versions) @cached_property def knowledge_bases(self) -> AsyncKnowledgeBasesResourceWithRawResponse: - return AsyncKnowledgeBasesResourceWithRawResponse(self._agents.knowledge_bases) + return AsyncKnowledgeBasesResourceWithRawResponse(self._doagents.knowledge_bases) @cached_property def child_agents(self) -> AsyncChildAgentsResourceWithRawResponse: - return AsyncChildAgentsResourceWithRawResponse(self._agents.child_agents) + return AsyncChildAgentsResourceWithRawResponse(self._doagents.child_agents) -class AgentsResourceWithStreamingResponse: - def __init__(self, agents: AgentsResource) -> None: - self._agents = agents +class DoagentsResourceWithStreamingResponse: + def __init__(self, doagents: DoagentsResource) -> None: + self._doagents = doagents self.create = to_streamed_response_wrapper( - agents.create, + doagents.create, ) self.retrieve = to_streamed_response_wrapper( - agents.retrieve, + doagents.retrieve, ) self.update = to_streamed_response_wrapper( - agents.update, + doagents.update, ) self.list = to_streamed_response_wrapper( - agents.list, + doagents.list, ) self.delete = to_streamed_response_wrapper( - agents.delete, + doagents.delete, ) self.update_status = to_streamed_response_wrapper( - agents.update_status, + doagents.update_status, ) @cached_property def api_keys(self) -> APIKeysResourceWithStreamingResponse: - return APIKeysResourceWithStreamingResponse(self._agents.api_keys) + return APIKeysResourceWithStreamingResponse(self._doagents.api_keys) @cached_property def functions(self) -> FunctionsResourceWithStreamingResponse: - return FunctionsResourceWithStreamingResponse(self._agents.functions) + return FunctionsResourceWithStreamingResponse(self._doagents.functions) @cached_property def versions(self) -> VersionsResourceWithStreamingResponse: - return VersionsResourceWithStreamingResponse(self._agents.versions) + return VersionsResourceWithStreamingResponse(self._doagents.versions) @cached_property def knowledge_bases(self) -> KnowledgeBasesResourceWithStreamingResponse: - return KnowledgeBasesResourceWithStreamingResponse(self._agents.knowledge_bases) + return KnowledgeBasesResourceWithStreamingResponse(self._doagents.knowledge_bases) @cached_property def child_agents(self) -> ChildAgentsResourceWithStreamingResponse: - return ChildAgentsResourceWithStreamingResponse(self._agents.child_agents) + return ChildAgentsResourceWithStreamingResponse(self._doagents.child_agents) -class AsyncAgentsResourceWithStreamingResponse: - def __init__(self, agents: AsyncAgentsResource) -> None: - self._agents = agents +class AsyncDoagentsResourceWithStreamingResponse: + def __init__(self, doagents: AsyncDoagentsResource) -> None: + self._doagents = doagents self.create = async_to_streamed_response_wrapper( - agents.create, + doagents.create, ) self.retrieve = async_to_streamed_response_wrapper( - agents.retrieve, + doagents.retrieve, ) self.update = async_to_streamed_response_wrapper( - agents.update, + doagents.update, ) self.list = async_to_streamed_response_wrapper( - agents.list, + doagents.list, ) self.delete = async_to_streamed_response_wrapper( - agents.delete, + doagents.delete, ) self.update_status = async_to_streamed_response_wrapper( - agents.update_status, + doagents.update_status, ) @cached_property def api_keys(self) -> AsyncAPIKeysResourceWithStreamingResponse: - return AsyncAPIKeysResourceWithStreamingResponse(self._agents.api_keys) + return AsyncAPIKeysResourceWithStreamingResponse(self._doagents.api_keys) @cached_property def functions(self) -> AsyncFunctionsResourceWithStreamingResponse: - return AsyncFunctionsResourceWithStreamingResponse(self._agents.functions) + return AsyncFunctionsResourceWithStreamingResponse(self._doagents.functions) @cached_property def versions(self) -> AsyncVersionsResourceWithStreamingResponse: - return AsyncVersionsResourceWithStreamingResponse(self._agents.versions) + return AsyncVersionsResourceWithStreamingResponse(self._doagents.versions) @cached_property def knowledge_bases(self) -> AsyncKnowledgeBasesResourceWithStreamingResponse: - return AsyncKnowledgeBasesResourceWithStreamingResponse(self._agents.knowledge_bases) + return AsyncKnowledgeBasesResourceWithStreamingResponse(self._doagents.knowledge_bases) @cached_property def child_agents(self) -> AsyncChildAgentsResourceWithStreamingResponse: - return AsyncChildAgentsResourceWithStreamingResponse(self._agents.child_agents) + return AsyncChildAgentsResourceWithStreamingResponse(self._doagents.child_agents) diff --git a/src/gradientai/resources/agents/functions.py b/src/gradientai/resources/doagents/functions.py similarity index 98% rename from src/gradientai/resources/agents/functions.py rename to src/gradientai/resources/doagents/functions.py index 67a811cc..65ab2801 100644 --- a/src/gradientai/resources/agents/functions.py +++ b/src/gradientai/resources/doagents/functions.py @@ -15,10 +15,10 @@ async_to_streamed_response_wrapper, ) from ..._base_client import make_request_options -from ...types.agents import function_create_params, function_update_params -from ...types.agents.function_create_response import FunctionCreateResponse -from ...types.agents.function_delete_response import FunctionDeleteResponse -from ...types.agents.function_update_response import FunctionUpdateResponse +from ...types.doagents import function_create_params, function_update_params +from ...types.doagents.function_create_response import FunctionCreateResponse +from ...types.doagents.function_delete_response import FunctionDeleteResponse +from ...types.doagents.function_update_response import FunctionUpdateResponse __all__ = ["FunctionsResource", "AsyncFunctionsResource"] diff --git a/src/gradientai/resources/agents/knowledge_bases.py b/src/gradientai/resources/doagents/knowledge_bases.py similarity index 98% rename from src/gradientai/resources/agents/knowledge_bases.py rename to src/gradientai/resources/doagents/knowledge_bases.py index 3b9b0cd2..e806d7a2 100644 --- a/src/gradientai/resources/agents/knowledge_bases.py +++ b/src/gradientai/resources/doagents/knowledge_bases.py @@ -14,8 +14,8 @@ async_to_streamed_response_wrapper, ) from ..._base_client import make_request_options -from ...types.agents.api_link_knowledge_base_output import APILinkKnowledgeBaseOutput -from ...types.agents.knowledge_base_detach_response import KnowledgeBaseDetachResponse +from ...types.doagents.api_link_knowledge_base_output import APILinkKnowledgeBaseOutput +from ...types.doagents.knowledge_base_detach_response import KnowledgeBaseDetachResponse __all__ = ["KnowledgeBasesResource", "AsyncKnowledgeBasesResource"] diff --git a/src/gradientai/resources/agents/versions.py b/src/gradientai/resources/doagents/versions.py similarity index 98% rename from src/gradientai/resources/agents/versions.py rename to src/gradientai/resources/doagents/versions.py index 86dbf99f..6301bc0a 100644 --- a/src/gradientai/resources/agents/versions.py +++ b/src/gradientai/resources/doagents/versions.py @@ -15,9 +15,9 @@ async_to_streamed_response_wrapper, ) from ..._base_client import make_request_options -from ...types.agents import version_list_params, version_update_params -from ...types.agents.version_list_response import VersionListResponse -from ...types.agents.version_update_response import VersionUpdateResponse +from ...types.doagents import version_list_params, version_update_params +from ...types.doagents.version_list_response import VersionListResponse +from ...types.doagents.version_update_response import VersionUpdateResponse __all__ = ["VersionsResource", "AsyncVersionsResource"] diff --git a/src/gradientai/types/__init__.py b/src/gradientai/types/__init__.py index 5ee961c6..09d071f0 100644 --- a/src/gradientai/types/__init__.py +++ b/src/gradientai/types/__init__.py @@ -5,34 +5,34 @@ from .api_agent import APIAgent as APIAgent from .api_model import APIModel as APIModel from .api_indexing_job import APIIndexingJob as APIIndexingJob -from .agent_list_params import AgentListParams as AgentListParams from .model_list_params import ModelListParams as ModelListParams from .api_knowledge_base import APIKnowledgeBase as APIKnowledgeBase from .region_list_params import RegionListParams as RegionListParams -from .agent_create_params import AgentCreateParams as AgentCreateParams -from .agent_list_response import AgentListResponse as AgentListResponse -from .agent_update_params import AgentUpdateParams as AgentUpdateParams +from .doagent_list_params import DoagentListParams as DoagentListParams from .model_list_response import ModelListResponse as ModelListResponse from .api_retrieval_method import APIRetrievalMethod as APIRetrievalMethod from .region_list_response import RegionListResponse as RegionListResponse -from .agent_create_response import AgentCreateResponse as AgentCreateResponse -from .agent_delete_response import AgentDeleteResponse as AgentDeleteResponse -from .agent_update_response import AgentUpdateResponse as AgentUpdateResponse +from .doagent_create_params import DoagentCreateParams as DoagentCreateParams +from .doagent_list_response import DoagentListResponse as DoagentListResponse +from .doagent_update_params import DoagentUpdateParams as DoagentUpdateParams from .api_agent_api_key_info import APIAgentAPIKeyInfo as APIAgentAPIKeyInfo -from .agent_retrieve_response import AgentRetrieveResponse as AgentRetrieveResponse from .api_openai_api_key_info import APIOpenAIAPIKeyInfo as APIOpenAIAPIKeyInfo +from .doagent_create_response import DoagentCreateResponse as DoagentCreateResponse +from .doagent_delete_response import DoagentDeleteResponse as DoagentDeleteResponse +from .doagent_update_response import DoagentUpdateResponse as DoagentUpdateResponse from .indexing_job_list_params import IndexingJobListParams as IndexingJobListParams from .api_deployment_visibility import APIDeploymentVisibility as APIDeploymentVisibility -from .agent_update_status_params import AgentUpdateStatusParams as AgentUpdateStatusParams +from .doagent_retrieve_response import DoagentRetrieveResponse as DoagentRetrieveResponse from .api_anthropic_api_key_info import APIAnthropicAPIKeyInfo as APIAnthropicAPIKeyInfo from .indexing_job_create_params import IndexingJobCreateParams as IndexingJobCreateParams from .indexing_job_list_response import IndexingJobListResponse as IndexingJobListResponse from .knowledge_base_list_params import KnowledgeBaseListParams as KnowledgeBaseListParams -from .agent_update_status_response import AgentUpdateStatusResponse as AgentUpdateStatusResponse +from .doagent_update_status_params import DoagentUpdateStatusParams as DoagentUpdateStatusParams from .indexing_job_create_response import IndexingJobCreateResponse as IndexingJobCreateResponse from .knowledge_base_create_params import KnowledgeBaseCreateParams as KnowledgeBaseCreateParams from .knowledge_base_list_response import KnowledgeBaseListResponse as KnowledgeBaseListResponse from .knowledge_base_update_params import KnowledgeBaseUpdateParams as KnowledgeBaseUpdateParams +from .doagent_update_status_response import DoagentUpdateStatusResponse as DoagentUpdateStatusResponse from .indexing_job_retrieve_response import IndexingJobRetrieveResponse as IndexingJobRetrieveResponse from .knowledge_base_create_response import KnowledgeBaseCreateResponse as KnowledgeBaseCreateResponse from .knowledge_base_delete_response import KnowledgeBaseDeleteResponse as KnowledgeBaseDeleteResponse diff --git a/src/gradientai/types/agent_update_status_response.py b/src/gradientai/types/agent_update_status_response.py deleted file mode 100644 index b200f99d..00000000 --- a/src/gradientai/types/agent_update_status_response.py +++ /dev/null @@ -1,16 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Optional - -from .._models import BaseModel - -__all__ = ["AgentUpdateStatusResponse"] - - -class AgentUpdateStatusResponse(BaseModel): - agent: Optional["APIAgent"] = None - - -from .api_agent import APIAgent diff --git a/src/gradientai/types/agent_create_params.py b/src/gradientai/types/doagent_create_params.py similarity index 90% rename from src/gradientai/types/agent_create_params.py rename to src/gradientai/types/doagent_create_params.py index 58b99df7..b5b6e72d 100644 --- a/src/gradientai/types/agent_create_params.py +++ b/src/gradientai/types/doagent_create_params.py @@ -7,10 +7,10 @@ from .._utils import PropertyInfo -__all__ = ["AgentCreateParams"] +__all__ = ["DoagentCreateParams"] -class AgentCreateParams(TypedDict, total=False): +class DoagentCreateParams(TypedDict, total=False): anthropic_key_uuid: str description: str diff --git a/src/gradientai/types/agent_update_response.py b/src/gradientai/types/doagent_create_response.py similarity index 77% rename from src/gradientai/types/agent_update_response.py rename to src/gradientai/types/doagent_create_response.py index 2948aa1c..2d171436 100644 --- a/src/gradientai/types/agent_update_response.py +++ b/src/gradientai/types/doagent_create_response.py @@ -6,10 +6,10 @@ from .._models import BaseModel -__all__ = ["AgentUpdateResponse"] +__all__ = ["DoagentCreateResponse"] -class AgentUpdateResponse(BaseModel): +class DoagentCreateResponse(BaseModel): agent: Optional["APIAgent"] = None diff --git a/src/gradientai/types/agent_retrieve_response.py b/src/gradientai/types/doagent_delete_response.py similarity index 77% rename from src/gradientai/types/agent_retrieve_response.py rename to src/gradientai/types/doagent_delete_response.py index 2eed88af..5d90ba17 100644 --- a/src/gradientai/types/agent_retrieve_response.py +++ b/src/gradientai/types/doagent_delete_response.py @@ -6,10 +6,10 @@ from .._models import BaseModel -__all__ = ["AgentRetrieveResponse"] +__all__ = ["DoagentDeleteResponse"] -class AgentRetrieveResponse(BaseModel): +class DoagentDeleteResponse(BaseModel): agent: Optional["APIAgent"] = None diff --git a/src/gradientai/types/agent_list_params.py b/src/gradientai/types/doagent_list_params.py similarity index 79% rename from src/gradientai/types/agent_list_params.py rename to src/gradientai/types/doagent_list_params.py index e13a10c9..a9b3fb2b 100644 --- a/src/gradientai/types/agent_list_params.py +++ b/src/gradientai/types/doagent_list_params.py @@ -4,10 +4,10 @@ from typing_extensions import TypedDict -__all__ = ["AgentListParams"] +__all__ = ["DoagentListParams"] -class AgentListParams(TypedDict, total=False): +class DoagentListParams(TypedDict, total=False): only_deployed: bool """only list agents that are deployed.""" diff --git a/src/gradientai/types/agent_list_response.py b/src/gradientai/types/doagent_list_response.py similarity index 98% rename from src/gradientai/types/agent_list_response.py rename to src/gradientai/types/doagent_list_response.py index 6af9cd51..65c2b076 100644 --- a/src/gradientai/types/agent_list_response.py +++ b/src/gradientai/types/doagent_list_response.py @@ -5,14 +5,14 @@ from typing_extensions import Literal from .._models import BaseModel -from .agents.api_meta import APIMeta -from .agents.api_links import APILinks +from .doagents.api_meta import APIMeta from .api_knowledge_base import APIKnowledgeBase +from .doagents.api_links import APILinks from .api_retrieval_method import APIRetrievalMethod from .api_deployment_visibility import APIDeploymentVisibility __all__ = [ - "AgentListResponse", + "DoagentListResponse", "Agent", "AgentChatbot", "AgentChatbotIdentifier", @@ -323,7 +323,7 @@ class Agent(BaseModel): uuid: Optional[str] = None -class AgentListResponse(BaseModel): +class DoagentListResponse(BaseModel): agents: Optional[List[Agent]] = None links: Optional[APILinks] = None diff --git a/src/gradientai/types/agent_delete_response.py b/src/gradientai/types/doagent_retrieve_response.py similarity index 76% rename from src/gradientai/types/agent_delete_response.py rename to src/gradientai/types/doagent_retrieve_response.py index eb1d440d..9fb0a722 100644 --- a/src/gradientai/types/agent_delete_response.py +++ b/src/gradientai/types/doagent_retrieve_response.py @@ -6,10 +6,10 @@ from .._models import BaseModel -__all__ = ["AgentDeleteResponse"] +__all__ = ["DoagentRetrieveResponse"] -class AgentDeleteResponse(BaseModel): +class DoagentRetrieveResponse(BaseModel): agent: Optional["APIAgent"] = None diff --git a/src/gradientai/types/agent_update_params.py b/src/gradientai/types/doagent_update_params.py similarity index 95% rename from src/gradientai/types/agent_update_params.py rename to src/gradientai/types/doagent_update_params.py index 85f9a9c2..a8598f5e 100644 --- a/src/gradientai/types/agent_update_params.py +++ b/src/gradientai/types/doagent_update_params.py @@ -8,10 +8,10 @@ from .._utils import PropertyInfo from .api_retrieval_method import APIRetrievalMethod -__all__ = ["AgentUpdateParams"] +__all__ = ["DoagentUpdateParams"] -class AgentUpdateParams(TypedDict, total=False): +class DoagentUpdateParams(TypedDict, total=False): anthropic_key_uuid: str description: str diff --git a/src/gradientai/types/agent_create_response.py b/src/gradientai/types/doagent_update_response.py similarity index 77% rename from src/gradientai/types/agent_create_response.py rename to src/gradientai/types/doagent_update_response.py index 48545fe9..4d48bee7 100644 --- a/src/gradientai/types/agent_create_response.py +++ b/src/gradientai/types/doagent_update_response.py @@ -6,10 +6,10 @@ from .._models import BaseModel -__all__ = ["AgentCreateResponse"] +__all__ = ["DoagentUpdateResponse"] -class AgentCreateResponse(BaseModel): +class DoagentUpdateResponse(BaseModel): agent: Optional["APIAgent"] = None diff --git a/src/gradientai/types/agent_update_status_params.py b/src/gradientai/types/doagent_update_status_params.py similarity index 79% rename from src/gradientai/types/agent_update_status_params.py rename to src/gradientai/types/doagent_update_status_params.py index a0cdc0b9..3bd0c539 100644 --- a/src/gradientai/types/agent_update_status_params.py +++ b/src/gradientai/types/doagent_update_status_params.py @@ -7,10 +7,10 @@ from .._utils import PropertyInfo from .api_deployment_visibility import APIDeploymentVisibility -__all__ = ["AgentUpdateStatusParams"] +__all__ = ["DoagentUpdateStatusParams"] -class AgentUpdateStatusParams(TypedDict, total=False): +class DoagentUpdateStatusParams(TypedDict, total=False): body_uuid: Annotated[str, PropertyInfo(alias="uuid")] visibility: APIDeploymentVisibility diff --git a/src/gradientai/types/doagent_update_status_response.py b/src/gradientai/types/doagent_update_status_response.py new file mode 100644 index 00000000..b31c1e99 --- /dev/null +++ b/src/gradientai/types/doagent_update_status_response.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional + +from .._models import BaseModel + +__all__ = ["DoagentUpdateStatusResponse"] + + +class DoagentUpdateStatusResponse(BaseModel): + agent: Optional["APIAgent"] = None + + +from .api_agent import APIAgent diff --git a/src/gradientai/types/agents/__init__.py b/src/gradientai/types/doagents/__init__.py similarity index 100% rename from src/gradientai/types/agents/__init__.py rename to src/gradientai/types/doagents/__init__.py diff --git a/src/gradientai/types/agents/api_key_create_params.py b/src/gradientai/types/doagents/api_key_create_params.py similarity index 100% rename from src/gradientai/types/agents/api_key_create_params.py rename to src/gradientai/types/doagents/api_key_create_params.py diff --git a/src/gradientai/types/agents/api_key_create_response.py b/src/gradientai/types/doagents/api_key_create_response.py similarity index 100% rename from src/gradientai/types/agents/api_key_create_response.py rename to src/gradientai/types/doagents/api_key_create_response.py diff --git a/src/gradientai/types/agents/api_key_delete_response.py b/src/gradientai/types/doagents/api_key_delete_response.py similarity index 100% rename from src/gradientai/types/agents/api_key_delete_response.py rename to src/gradientai/types/doagents/api_key_delete_response.py diff --git a/src/gradientai/types/agents/api_key_list_params.py b/src/gradientai/types/doagents/api_key_list_params.py similarity index 100% rename from src/gradientai/types/agents/api_key_list_params.py rename to src/gradientai/types/doagents/api_key_list_params.py diff --git a/src/gradientai/types/agents/api_key_list_response.py b/src/gradientai/types/doagents/api_key_list_response.py similarity index 100% rename from src/gradientai/types/agents/api_key_list_response.py rename to src/gradientai/types/doagents/api_key_list_response.py diff --git a/src/gradientai/types/agents/api_key_regenerate_response.py b/src/gradientai/types/doagents/api_key_regenerate_response.py similarity index 100% rename from src/gradientai/types/agents/api_key_regenerate_response.py rename to src/gradientai/types/doagents/api_key_regenerate_response.py diff --git a/src/gradientai/types/agents/api_key_update_params.py b/src/gradientai/types/doagents/api_key_update_params.py similarity index 100% rename from src/gradientai/types/agents/api_key_update_params.py rename to src/gradientai/types/doagents/api_key_update_params.py diff --git a/src/gradientai/types/agents/api_key_update_response.py b/src/gradientai/types/doagents/api_key_update_response.py similarity index 100% rename from src/gradientai/types/agents/api_key_update_response.py rename to src/gradientai/types/doagents/api_key_update_response.py diff --git a/src/gradientai/types/agents/api_link_knowledge_base_output.py b/src/gradientai/types/doagents/api_link_knowledge_base_output.py similarity index 100% rename from src/gradientai/types/agents/api_link_knowledge_base_output.py rename to src/gradientai/types/doagents/api_link_knowledge_base_output.py diff --git a/src/gradientai/types/agents/api_links.py b/src/gradientai/types/doagents/api_links.py similarity index 100% rename from src/gradientai/types/agents/api_links.py rename to src/gradientai/types/doagents/api_links.py diff --git a/src/gradientai/types/agents/api_meta.py b/src/gradientai/types/doagents/api_meta.py similarity index 100% rename from src/gradientai/types/agents/api_meta.py rename to src/gradientai/types/doagents/api_meta.py diff --git a/src/gradientai/types/agents/child_agent_add_params.py b/src/gradientai/types/doagents/child_agent_add_params.py similarity index 100% rename from src/gradientai/types/agents/child_agent_add_params.py rename to src/gradientai/types/doagents/child_agent_add_params.py diff --git a/src/gradientai/types/agents/child_agent_add_response.py b/src/gradientai/types/doagents/child_agent_add_response.py similarity index 100% rename from src/gradientai/types/agents/child_agent_add_response.py rename to src/gradientai/types/doagents/child_agent_add_response.py diff --git a/src/gradientai/types/agents/child_agent_delete_response.py b/src/gradientai/types/doagents/child_agent_delete_response.py similarity index 100% rename from src/gradientai/types/agents/child_agent_delete_response.py rename to src/gradientai/types/doagents/child_agent_delete_response.py diff --git a/src/gradientai/types/agents/child_agent_update_params.py b/src/gradientai/types/doagents/child_agent_update_params.py similarity index 100% rename from src/gradientai/types/agents/child_agent_update_params.py rename to src/gradientai/types/doagents/child_agent_update_params.py diff --git a/src/gradientai/types/agents/child_agent_update_response.py b/src/gradientai/types/doagents/child_agent_update_response.py similarity index 100% rename from src/gradientai/types/agents/child_agent_update_response.py rename to src/gradientai/types/doagents/child_agent_update_response.py diff --git a/src/gradientai/types/agents/child_agent_view_response.py b/src/gradientai/types/doagents/child_agent_view_response.py similarity index 100% rename from src/gradientai/types/agents/child_agent_view_response.py rename to src/gradientai/types/doagents/child_agent_view_response.py diff --git a/src/gradientai/types/agents/function_create_params.py b/src/gradientai/types/doagents/function_create_params.py similarity index 100% rename from src/gradientai/types/agents/function_create_params.py rename to src/gradientai/types/doagents/function_create_params.py diff --git a/src/gradientai/types/agents/function_create_response.py b/src/gradientai/types/doagents/function_create_response.py similarity index 100% rename from src/gradientai/types/agents/function_create_response.py rename to src/gradientai/types/doagents/function_create_response.py diff --git a/src/gradientai/types/agents/function_delete_response.py b/src/gradientai/types/doagents/function_delete_response.py similarity index 100% rename from src/gradientai/types/agents/function_delete_response.py rename to src/gradientai/types/doagents/function_delete_response.py diff --git a/src/gradientai/types/agents/function_update_params.py b/src/gradientai/types/doagents/function_update_params.py similarity index 100% rename from src/gradientai/types/agents/function_update_params.py rename to src/gradientai/types/doagents/function_update_params.py diff --git a/src/gradientai/types/agents/function_update_response.py b/src/gradientai/types/doagents/function_update_response.py similarity index 100% rename from src/gradientai/types/agents/function_update_response.py rename to src/gradientai/types/doagents/function_update_response.py diff --git a/src/gradientai/types/agents/knowledge_base_detach_response.py b/src/gradientai/types/doagents/knowledge_base_detach_response.py similarity index 100% rename from src/gradientai/types/agents/knowledge_base_detach_response.py rename to src/gradientai/types/doagents/knowledge_base_detach_response.py diff --git a/src/gradientai/types/agents/version_list_params.py b/src/gradientai/types/doagents/version_list_params.py similarity index 100% rename from src/gradientai/types/agents/version_list_params.py rename to src/gradientai/types/doagents/version_list_params.py diff --git a/src/gradientai/types/agents/version_list_response.py b/src/gradientai/types/doagents/version_list_response.py similarity index 100% rename from src/gradientai/types/agents/version_list_response.py rename to src/gradientai/types/doagents/version_list_response.py diff --git a/src/gradientai/types/agents/version_update_params.py b/src/gradientai/types/doagents/version_update_params.py similarity index 100% rename from src/gradientai/types/agents/version_update_params.py rename to src/gradientai/types/doagents/version_update_params.py diff --git a/src/gradientai/types/agents/version_update_response.py b/src/gradientai/types/doagents/version_update_response.py similarity index 100% rename from src/gradientai/types/agents/version_update_response.py rename to src/gradientai/types/doagents/version_update_response.py diff --git a/src/gradientai/types/indexing_job_list_response.py b/src/gradientai/types/indexing_job_list_response.py index 1379cc55..dc94b966 100644 --- a/src/gradientai/types/indexing_job_list_response.py +++ b/src/gradientai/types/indexing_job_list_response.py @@ -3,9 +3,9 @@ from typing import List, Optional from .._models import BaseModel -from .agents.api_meta import APIMeta -from .agents.api_links import APILinks from .api_indexing_job import APIIndexingJob +from .doagents.api_meta import APIMeta +from .doagents.api_links import APILinks __all__ = ["IndexingJobListResponse"] diff --git a/src/gradientai/types/inference/api_key_list_response.py b/src/gradientai/types/inference/api_key_list_response.py index 535e2f96..9cbc4bd5 100644 --- a/src/gradientai/types/inference/api_key_list_response.py +++ b/src/gradientai/types/inference/api_key_list_response.py @@ -3,8 +3,8 @@ from typing import List, Optional from ..._models import BaseModel -from ..agents.api_meta import APIMeta -from ..agents.api_links import APILinks +from ..doagents.api_meta import APIMeta +from ..doagents.api_links import APILinks from .api_model_api_key_info import APIModelAPIKeyInfo __all__ = ["APIKeyListResponse"] diff --git a/src/gradientai/types/knowledge_base_list_response.py b/src/gradientai/types/knowledge_base_list_response.py index 09ca1ad3..4fa7536d 100644 --- a/src/gradientai/types/knowledge_base_list_response.py +++ b/src/gradientai/types/knowledge_base_list_response.py @@ -3,9 +3,9 @@ from typing import List, Optional from .._models import BaseModel -from .agents.api_meta import APIMeta -from .agents.api_links import APILinks +from .doagents.api_meta import APIMeta from .api_knowledge_base import APIKnowledgeBase +from .doagents.api_links import APILinks __all__ = ["KnowledgeBaseListResponse"] diff --git a/src/gradientai/types/knowledge_bases/data_source_list_response.py b/src/gradientai/types/knowledge_bases/data_source_list_response.py index 78246ce1..d0c16c12 100644 --- a/src/gradientai/types/knowledge_bases/data_source_list_response.py +++ b/src/gradientai/types/knowledge_bases/data_source_list_response.py @@ -3,8 +3,8 @@ from typing import List, Optional from ..._models import BaseModel -from ..agents.api_meta import APIMeta -from ..agents.api_links import APILinks +from ..doagents.api_meta import APIMeta +from ..doagents.api_links import APILinks from .api_knowledge_base_data_source import APIKnowledgeBaseDataSource __all__ = ["DataSourceListResponse"] diff --git a/src/gradientai/types/model_list_response.py b/src/gradientai/types/model_list_response.py index e6f5fad5..1eb8f907 100644 --- a/src/gradientai/types/model_list_response.py +++ b/src/gradientai/types/model_list_response.py @@ -4,8 +4,8 @@ from .._models import BaseModel from .api_model import APIModel -from .agents.api_meta import APIMeta -from .agents.api_links import APILinks +from .doagents.api_meta import APIMeta +from .doagents.api_links import APILinks __all__ = ["ModelListResponse"] diff --git a/src/gradientai/types/providers/anthropic/key_list_agents_response.py b/src/gradientai/types/providers/anthropic/key_list_agents_response.py index ba6ca946..174b5ea0 100644 --- a/src/gradientai/types/providers/anthropic/key_list_agents_response.py +++ b/src/gradientai/types/providers/anthropic/key_list_agents_response.py @@ -5,8 +5,8 @@ from typing import List, Optional from ...._models import BaseModel -from ...agents.api_meta import APIMeta -from ...agents.api_links import APILinks +from ...doagents.api_meta import APIMeta +from ...doagents.api_links import APILinks __all__ = ["KeyListAgentsResponse"] diff --git a/src/gradientai/types/providers/anthropic/key_list_response.py b/src/gradientai/types/providers/anthropic/key_list_response.py index d0b84e96..7699e23b 100644 --- a/src/gradientai/types/providers/anthropic/key_list_response.py +++ b/src/gradientai/types/providers/anthropic/key_list_response.py @@ -3,8 +3,8 @@ from typing import List, Optional from ...._models import BaseModel -from ...agents.api_meta import APIMeta -from ...agents.api_links import APILinks +from ...doagents.api_meta import APIMeta +from ...doagents.api_links import APILinks from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo __all__ = ["KeyListResponse"] diff --git a/src/gradientai/types/providers/openai/key_list_response.py b/src/gradientai/types/providers/openai/key_list_response.py index c263cba3..68a74cd1 100644 --- a/src/gradientai/types/providers/openai/key_list_response.py +++ b/src/gradientai/types/providers/openai/key_list_response.py @@ -3,8 +3,8 @@ from typing import List, Optional from ...._models import BaseModel -from ...agents.api_meta import APIMeta -from ...agents.api_links import APILinks +from ...doagents.api_meta import APIMeta +from ...doagents.api_links import APILinks from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo __all__ = ["KeyListResponse"] diff --git a/src/gradientai/types/providers/openai/key_retrieve_agents_response.py b/src/gradientai/types/providers/openai/key_retrieve_agents_response.py index f42edea6..9393fe08 100644 --- a/src/gradientai/types/providers/openai/key_retrieve_agents_response.py +++ b/src/gradientai/types/providers/openai/key_retrieve_agents_response.py @@ -5,8 +5,8 @@ from typing import List, Optional from ...._models import BaseModel -from ...agents.api_meta import APIMeta -from ...agents.api_links import APILinks +from ...doagents.api_meta import APIMeta +from ...doagents.api_links import APILinks __all__ = ["KeyRetrieveAgentsResponse"] diff --git a/tests/api_resources/agents/__init__.py b/tests/api_resources/doagents/__init__.py similarity index 100% rename from tests/api_resources/agents/__init__.py rename to tests/api_resources/doagents/__init__.py diff --git a/tests/api_resources/agents/test_api_keys.py b/tests/api_resources/doagents/test_api_keys.py similarity index 84% rename from tests/api_resources/agents/test_api_keys.py rename to tests/api_resources/doagents/test_api_keys.py index e8489258..dd654e83 100644 --- a/tests/api_resources/agents/test_api_keys.py +++ b/tests/api_resources/doagents/test_api_keys.py @@ -9,7 +9,7 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import ( +from gradientai.types.doagents import ( APIKeyListResponse, APIKeyCreateResponse, APIKeyDeleteResponse, @@ -26,7 +26,7 @@ class TestAPIKeys: @pytest.mark.skip() @parametrize def test_method_create(self, client: GradientAI) -> None: - api_key = client.agents.api_keys.create( + api_key = client.doagents.api_keys.create( path_agent_uuid="agent_uuid", ) assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) @@ -34,7 +34,7 @@ def test_method_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: - api_key = client.agents.api_keys.create( + api_key = client.doagents.api_keys.create( path_agent_uuid="agent_uuid", body_agent_uuid="agent_uuid", name="name", @@ -44,7 +44,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_create(self, client: GradientAI) -> None: - response = client.agents.api_keys.with_raw_response.create( + response = client.doagents.api_keys.with_raw_response.create( path_agent_uuid="agent_uuid", ) @@ -56,7 +56,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_create(self, client: GradientAI) -> None: - with client.agents.api_keys.with_streaming_response.create( + with client.doagents.api_keys.with_streaming_response.create( path_agent_uuid="agent_uuid", ) as response: assert not response.is_closed @@ -71,14 +71,14 @@ def test_streaming_response_create(self, client: GradientAI) -> None: @parametrize def test_path_params_create(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - client.agents.api_keys.with_raw_response.create( + client.doagents.api_keys.with_raw_response.create( path_agent_uuid="", ) @pytest.mark.skip() @parametrize def test_method_update(self, client: GradientAI) -> None: - api_key = client.agents.api_keys.update( + api_key = client.doagents.api_keys.update( path_api_key_uuid="api_key_uuid", path_agent_uuid="agent_uuid", ) @@ -87,7 +87,7 @@ def test_method_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: - api_key = client.agents.api_keys.update( + api_key = client.doagents.api_keys.update( path_api_key_uuid="api_key_uuid", path_agent_uuid="agent_uuid", body_agent_uuid="agent_uuid", @@ -99,7 +99,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_update(self, client: GradientAI) -> None: - response = client.agents.api_keys.with_raw_response.update( + response = client.doagents.api_keys.with_raw_response.update( path_api_key_uuid="api_key_uuid", path_agent_uuid="agent_uuid", ) @@ -112,7 +112,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: - with client.agents.api_keys.with_streaming_response.update( + with client.doagents.api_keys.with_streaming_response.update( path_api_key_uuid="api_key_uuid", path_agent_uuid="agent_uuid", ) as response: @@ -128,13 +128,13 @@ def test_streaming_response_update(self, client: GradientAI) -> None: @parametrize def test_path_params_update(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - client.agents.api_keys.with_raw_response.update( + client.doagents.api_keys.with_raw_response.update( path_api_key_uuid="api_key_uuid", path_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - client.agents.api_keys.with_raw_response.update( + client.doagents.api_keys.with_raw_response.update( path_api_key_uuid="", path_agent_uuid="agent_uuid", ) @@ -142,7 +142,7 @@ def test_path_params_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_list(self, client: GradientAI) -> None: - api_key = client.agents.api_keys.list( + api_key = client.doagents.api_keys.list( agent_uuid="agent_uuid", ) assert_matches_type(APIKeyListResponse, api_key, path=["response"]) @@ -150,7 +150,7 @@ def test_method_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_list_with_all_params(self, client: GradientAI) -> None: - api_key = client.agents.api_keys.list( + api_key = client.doagents.api_keys.list( agent_uuid="agent_uuid", page=0, per_page=0, @@ -160,7 +160,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_list(self, client: GradientAI) -> None: - response = client.agents.api_keys.with_raw_response.list( + response = client.doagents.api_keys.with_raw_response.list( agent_uuid="agent_uuid", ) @@ -172,7 +172,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_list(self, client: GradientAI) -> None: - with client.agents.api_keys.with_streaming_response.list( + with client.doagents.api_keys.with_streaming_response.list( agent_uuid="agent_uuid", ) as response: assert not response.is_closed @@ -187,14 +187,14 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @parametrize def test_path_params_list(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - client.agents.api_keys.with_raw_response.list( + client.doagents.api_keys.with_raw_response.list( agent_uuid="", ) @pytest.mark.skip() @parametrize def test_method_delete(self, client: GradientAI) -> None: - api_key = client.agents.api_keys.delete( + api_key = client.doagents.api_keys.delete( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) @@ -203,7 +203,7 @@ def test_method_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_delete(self, client: GradientAI) -> None: - response = client.agents.api_keys.with_raw_response.delete( + response = client.doagents.api_keys.with_raw_response.delete( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) @@ -216,7 +216,7 @@ def test_raw_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_delete(self, client: GradientAI) -> None: - with client.agents.api_keys.with_streaming_response.delete( + with client.doagents.api_keys.with_streaming_response.delete( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) as response: @@ -232,13 +232,13 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: @parametrize def test_path_params_delete(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - client.agents.api_keys.with_raw_response.delete( + client.doagents.api_keys.with_raw_response.delete( api_key_uuid="api_key_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.agents.api_keys.with_raw_response.delete( + client.doagents.api_keys.with_raw_response.delete( api_key_uuid="", agent_uuid="agent_uuid", ) @@ -246,7 +246,7 @@ def test_path_params_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_regenerate(self, client: GradientAI) -> None: - api_key = client.agents.api_keys.regenerate( + api_key = client.doagents.api_keys.regenerate( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) @@ -255,7 +255,7 @@ def test_method_regenerate(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_regenerate(self, client: GradientAI) -> None: - response = client.agents.api_keys.with_raw_response.regenerate( + response = client.doagents.api_keys.with_raw_response.regenerate( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) @@ -268,7 +268,7 @@ def test_raw_response_regenerate(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_regenerate(self, client: GradientAI) -> None: - with client.agents.api_keys.with_streaming_response.regenerate( + with client.doagents.api_keys.with_streaming_response.regenerate( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) as response: @@ -284,13 +284,13 @@ def test_streaming_response_regenerate(self, client: GradientAI) -> None: @parametrize def test_path_params_regenerate(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - client.agents.api_keys.with_raw_response.regenerate( + client.doagents.api_keys.with_raw_response.regenerate( api_key_uuid="api_key_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.agents.api_keys.with_raw_response.regenerate( + client.doagents.api_keys.with_raw_response.regenerate( api_key_uuid="", agent_uuid="agent_uuid", ) @@ -302,7 +302,7 @@ class TestAsyncAPIKeys: @pytest.mark.skip() @parametrize async def test_method_create(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.agents.api_keys.create( + api_key = await async_client.doagents.api_keys.create( path_agent_uuid="agent_uuid", ) assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) @@ -310,7 +310,7 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.agents.api_keys.create( + api_key = await async_client.doagents.api_keys.create( path_agent_uuid="agent_uuid", body_agent_uuid="agent_uuid", name="name", @@ -320,7 +320,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.api_keys.with_raw_response.create( + response = await async_client.doagents.api_keys.with_raw_response.create( path_agent_uuid="agent_uuid", ) @@ -332,7 +332,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.api_keys.with_streaming_response.create( + async with async_client.doagents.api_keys.with_streaming_response.create( path_agent_uuid="agent_uuid", ) as response: assert not response.is_closed @@ -347,14 +347,14 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_create(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - await async_client.agents.api_keys.with_raw_response.create( + await async_client.doagents.api_keys.with_raw_response.create( path_agent_uuid="", ) @pytest.mark.skip() @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.agents.api_keys.update( + api_key = await async_client.doagents.api_keys.update( path_api_key_uuid="api_key_uuid", path_agent_uuid="agent_uuid", ) @@ -363,7 +363,7 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.agents.api_keys.update( + api_key = await async_client.doagents.api_keys.update( path_api_key_uuid="api_key_uuid", path_agent_uuid="agent_uuid", body_agent_uuid="agent_uuid", @@ -375,7 +375,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.api_keys.with_raw_response.update( + response = await async_client.doagents.api_keys.with_raw_response.update( path_api_key_uuid="api_key_uuid", path_agent_uuid="agent_uuid", ) @@ -388,7 +388,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.api_keys.with_streaming_response.update( + async with async_client.doagents.api_keys.with_streaming_response.update( path_api_key_uuid="api_key_uuid", path_agent_uuid="agent_uuid", ) as response: @@ -404,13 +404,13 @@ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - await async_client.agents.api_keys.with_raw_response.update( + await async_client.doagents.api_keys.with_raw_response.update( path_api_key_uuid="api_key_uuid", path_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - await async_client.agents.api_keys.with_raw_response.update( + await async_client.doagents.api_keys.with_raw_response.update( path_api_key_uuid="", path_agent_uuid="agent_uuid", ) @@ -418,7 +418,7 @@ async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_list(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.agents.api_keys.list( + api_key = await async_client.doagents.api_keys.list( agent_uuid="agent_uuid", ) assert_matches_type(APIKeyListResponse, api_key, path=["response"]) @@ -426,7 +426,7 @@ async def test_method_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.agents.api_keys.list( + api_key = await async_client.doagents.api_keys.list( agent_uuid="agent_uuid", page=0, per_page=0, @@ -436,7 +436,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.api_keys.with_raw_response.list( + response = await async_client.doagents.api_keys.with_raw_response.list( agent_uuid="agent_uuid", ) @@ -448,7 +448,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.api_keys.with_streaming_response.list( + async with async_client.doagents.api_keys.with_streaming_response.list( agent_uuid="agent_uuid", ) as response: assert not response.is_closed @@ -463,14 +463,14 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @parametrize async def test_path_params_list(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - await async_client.agents.api_keys.with_raw_response.list( + await async_client.doagents.api_keys.with_raw_response.list( agent_uuid="", ) @pytest.mark.skip() @parametrize async def test_method_delete(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.agents.api_keys.delete( + api_key = await async_client.doagents.api_keys.delete( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) @@ -479,7 +479,7 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.api_keys.with_raw_response.delete( + response = await async_client.doagents.api_keys.with_raw_response.delete( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) @@ -492,7 +492,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.api_keys.with_streaming_response.delete( + async with async_client.doagents.api_keys.with_streaming_response.delete( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) as response: @@ -508,13 +508,13 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - await async_client.agents.api_keys.with_raw_response.delete( + await async_client.doagents.api_keys.with_raw_response.delete( api_key_uuid="api_key_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.agents.api_keys.with_raw_response.delete( + await async_client.doagents.api_keys.with_raw_response.delete( api_key_uuid="", agent_uuid="agent_uuid", ) @@ -522,7 +522,7 @@ async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_regenerate(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.agents.api_keys.regenerate( + api_key = await async_client.doagents.api_keys.regenerate( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) @@ -531,7 +531,7 @@ async def test_method_regenerate(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_raw_response_regenerate(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.api_keys.with_raw_response.regenerate( + response = await async_client.doagents.api_keys.with_raw_response.regenerate( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) @@ -544,7 +544,7 @@ async def test_raw_response_regenerate(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize async def test_streaming_response_regenerate(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.api_keys.with_streaming_response.regenerate( + async with async_client.doagents.api_keys.with_streaming_response.regenerate( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) as response: @@ -560,13 +560,13 @@ async def test_streaming_response_regenerate(self, async_client: AsyncGradientAI @parametrize async def test_path_params_regenerate(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - await async_client.agents.api_keys.with_raw_response.regenerate( + await async_client.doagents.api_keys.with_raw_response.regenerate( api_key_uuid="api_key_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.agents.api_keys.with_raw_response.regenerate( + await async_client.doagents.api_keys.with_raw_response.regenerate( api_key_uuid="", agent_uuid="agent_uuid", ) diff --git a/tests/api_resources/agents/test_child_agents.py b/tests/api_resources/doagents/test_child_agents.py similarity index 84% rename from tests/api_resources/agents/test_child_agents.py rename to tests/api_resources/doagents/test_child_agents.py index 14af3b93..8e0eb0a0 100644 --- a/tests/api_resources/agents/test_child_agents.py +++ b/tests/api_resources/doagents/test_child_agents.py @@ -9,7 +9,7 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import ( +from gradientai.types.doagents import ( ChildAgentAddResponse, ChildAgentViewResponse, ChildAgentDeleteResponse, @@ -25,7 +25,7 @@ class TestChildAgents: @pytest.mark.skip() @parametrize def test_method_update(self, client: GradientAI) -> None: - child_agent = client.agents.child_agents.update( + child_agent = client.doagents.child_agents.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) @@ -34,7 +34,7 @@ def test_method_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: - child_agent = client.agents.child_agents.update( + child_agent = client.doagents.child_agents.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", body_child_agent_uuid="child_agent_uuid", @@ -48,7 +48,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_update(self, client: GradientAI) -> None: - response = client.agents.child_agents.with_raw_response.update( + response = client.doagents.child_agents.with_raw_response.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) @@ -61,7 +61,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: - with client.agents.child_agents.with_streaming_response.update( + with client.doagents.child_agents.with_streaming_response.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) as response: @@ -79,13 +79,13 @@ def test_path_params_update(self, client: GradientAI) -> None: with pytest.raises( ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" ): - client.agents.child_agents.with_raw_response.update( + client.doagents.child_agents.with_raw_response.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): - client.agents.child_agents.with_raw_response.update( + client.doagents.child_agents.with_raw_response.update( path_child_agent_uuid="", path_parent_agent_uuid="parent_agent_uuid", ) @@ -93,7 +93,7 @@ def test_path_params_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_delete(self, client: GradientAI) -> None: - child_agent = client.agents.child_agents.delete( + child_agent = client.doagents.child_agents.delete( child_agent_uuid="child_agent_uuid", parent_agent_uuid="parent_agent_uuid", ) @@ -102,7 +102,7 @@ def test_method_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_delete(self, client: GradientAI) -> None: - response = client.agents.child_agents.with_raw_response.delete( + response = client.doagents.child_agents.with_raw_response.delete( child_agent_uuid="child_agent_uuid", parent_agent_uuid="parent_agent_uuid", ) @@ -115,7 +115,7 @@ def test_raw_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_delete(self, client: GradientAI) -> None: - with client.agents.child_agents.with_streaming_response.delete( + with client.doagents.child_agents.with_streaming_response.delete( child_agent_uuid="child_agent_uuid", parent_agent_uuid="parent_agent_uuid", ) as response: @@ -131,13 +131,13 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: @parametrize def test_path_params_delete(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `parent_agent_uuid` but received ''"): - client.agents.child_agents.with_raw_response.delete( + client.doagents.child_agents.with_raw_response.delete( child_agent_uuid="child_agent_uuid", parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `child_agent_uuid` but received ''"): - client.agents.child_agents.with_raw_response.delete( + client.doagents.child_agents.with_raw_response.delete( child_agent_uuid="", parent_agent_uuid="parent_agent_uuid", ) @@ -145,7 +145,7 @@ def test_path_params_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_add(self, client: GradientAI) -> None: - child_agent = client.agents.child_agents.add( + child_agent = client.doagents.child_agents.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) @@ -154,7 +154,7 @@ def test_method_add(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_add_with_all_params(self, client: GradientAI) -> None: - child_agent = client.agents.child_agents.add( + child_agent = client.doagents.child_agents.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", body_child_agent_uuid="child_agent_uuid", @@ -167,7 +167,7 @@ def test_method_add_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_add(self, client: GradientAI) -> None: - response = client.agents.child_agents.with_raw_response.add( + response = client.doagents.child_agents.with_raw_response.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) @@ -180,7 +180,7 @@ def test_raw_response_add(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_add(self, client: GradientAI) -> None: - with client.agents.child_agents.with_streaming_response.add( + with client.doagents.child_agents.with_streaming_response.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) as response: @@ -198,13 +198,13 @@ def test_path_params_add(self, client: GradientAI) -> None: with pytest.raises( ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" ): - client.agents.child_agents.with_raw_response.add( + client.doagents.child_agents.with_raw_response.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): - client.agents.child_agents.with_raw_response.add( + client.doagents.child_agents.with_raw_response.add( path_child_agent_uuid="", path_parent_agent_uuid="parent_agent_uuid", ) @@ -212,7 +212,7 @@ def test_path_params_add(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_view(self, client: GradientAI) -> None: - child_agent = client.agents.child_agents.view( + child_agent = client.doagents.child_agents.view( "uuid", ) assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"]) @@ -220,7 +220,7 @@ def test_method_view(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_view(self, client: GradientAI) -> None: - response = client.agents.child_agents.with_raw_response.view( + response = client.doagents.child_agents.with_raw_response.view( "uuid", ) @@ -232,7 +232,7 @@ def test_raw_response_view(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_view(self, client: GradientAI) -> None: - with client.agents.child_agents.with_streaming_response.view( + with client.doagents.child_agents.with_streaming_response.view( "uuid", ) as response: assert not response.is_closed @@ -247,7 +247,7 @@ def test_streaming_response_view(self, client: GradientAI) -> None: @parametrize def test_path_params_view(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.agents.child_agents.with_raw_response.view( + client.doagents.child_agents.with_raw_response.view( "", ) @@ -258,7 +258,7 @@ class TestAsyncChildAgents: @pytest.mark.skip() @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: - child_agent = await async_client.agents.child_agents.update( + child_agent = await async_client.doagents.child_agents.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) @@ -267,7 +267,7 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: - child_agent = await async_client.agents.child_agents.update( + child_agent = await async_client.doagents.child_agents.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", body_child_agent_uuid="child_agent_uuid", @@ -281,7 +281,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.child_agents.with_raw_response.update( + response = await async_client.doagents.child_agents.with_raw_response.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) @@ -294,7 +294,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.child_agents.with_streaming_response.update( + async with async_client.doagents.child_agents.with_streaming_response.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) as response: @@ -312,13 +312,13 @@ async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: with pytest.raises( ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" ): - await async_client.agents.child_agents.with_raw_response.update( + await async_client.doagents.child_agents.with_raw_response.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): - await async_client.agents.child_agents.with_raw_response.update( + await async_client.doagents.child_agents.with_raw_response.update( path_child_agent_uuid="", path_parent_agent_uuid="parent_agent_uuid", ) @@ -326,7 +326,7 @@ async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_delete(self, async_client: AsyncGradientAI) -> None: - child_agent = await async_client.agents.child_agents.delete( + child_agent = await async_client.doagents.child_agents.delete( child_agent_uuid="child_agent_uuid", parent_agent_uuid="parent_agent_uuid", ) @@ -335,7 +335,7 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.child_agents.with_raw_response.delete( + response = await async_client.doagents.child_agents.with_raw_response.delete( child_agent_uuid="child_agent_uuid", parent_agent_uuid="parent_agent_uuid", ) @@ -348,7 +348,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.child_agents.with_streaming_response.delete( + async with async_client.doagents.child_agents.with_streaming_response.delete( child_agent_uuid="child_agent_uuid", parent_agent_uuid="parent_agent_uuid", ) as response: @@ -364,13 +364,13 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `parent_agent_uuid` but received ''"): - await async_client.agents.child_agents.with_raw_response.delete( + await async_client.doagents.child_agents.with_raw_response.delete( child_agent_uuid="child_agent_uuid", parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `child_agent_uuid` but received ''"): - await async_client.agents.child_agents.with_raw_response.delete( + await async_client.doagents.child_agents.with_raw_response.delete( child_agent_uuid="", parent_agent_uuid="parent_agent_uuid", ) @@ -378,7 +378,7 @@ async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_add(self, async_client: AsyncGradientAI) -> None: - child_agent = await async_client.agents.child_agents.add( + child_agent = await async_client.doagents.child_agents.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) @@ -387,7 +387,7 @@ async def test_method_add(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_add_with_all_params(self, async_client: AsyncGradientAI) -> None: - child_agent = await async_client.agents.child_agents.add( + child_agent = await async_client.doagents.child_agents.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", body_child_agent_uuid="child_agent_uuid", @@ -400,7 +400,7 @@ async def test_method_add_with_all_params(self, async_client: AsyncGradientAI) - @pytest.mark.skip() @parametrize async def test_raw_response_add(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.child_agents.with_raw_response.add( + response = await async_client.doagents.child_agents.with_raw_response.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) @@ -413,7 +413,7 @@ async def test_raw_response_add(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_add(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.child_agents.with_streaming_response.add( + async with async_client.doagents.child_agents.with_streaming_response.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) as response: @@ -431,13 +431,13 @@ async def test_path_params_add(self, async_client: AsyncGradientAI) -> None: with pytest.raises( ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" ): - await async_client.agents.child_agents.with_raw_response.add( + await async_client.doagents.child_agents.with_raw_response.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): - await async_client.agents.child_agents.with_raw_response.add( + await async_client.doagents.child_agents.with_raw_response.add( path_child_agent_uuid="", path_parent_agent_uuid="parent_agent_uuid", ) @@ -445,7 +445,7 @@ async def test_path_params_add(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_view(self, async_client: AsyncGradientAI) -> None: - child_agent = await async_client.agents.child_agents.view( + child_agent = await async_client.doagents.child_agents.view( "uuid", ) assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"]) @@ -453,7 +453,7 @@ async def test_method_view(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_raw_response_view(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.child_agents.with_raw_response.view( + response = await async_client.doagents.child_agents.with_raw_response.view( "uuid", ) @@ -465,7 +465,7 @@ async def test_raw_response_view(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_view(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.child_agents.with_streaming_response.view( + async with async_client.doagents.child_agents.with_streaming_response.view( "uuid", ) as response: assert not response.is_closed @@ -480,6 +480,6 @@ async def test_streaming_response_view(self, async_client: AsyncGradientAI) -> N @parametrize async def test_path_params_view(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.agents.child_agents.with_raw_response.view( + await async_client.doagents.child_agents.with_raw_response.view( "", ) diff --git a/tests/api_resources/agents/test_functions.py b/tests/api_resources/doagents/test_functions.py similarity index 85% rename from tests/api_resources/agents/test_functions.py rename to tests/api_resources/doagents/test_functions.py index bfb05fa6..11c76719 100644 --- a/tests/api_resources/agents/test_functions.py +++ b/tests/api_resources/doagents/test_functions.py @@ -9,7 +9,7 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import ( +from gradientai.types.doagents import ( FunctionCreateResponse, FunctionDeleteResponse, FunctionUpdateResponse, @@ -24,7 +24,7 @@ class TestFunctions: @pytest.mark.skip() @parametrize def test_method_create(self, client: GradientAI) -> None: - function = client.agents.functions.create( + function = client.doagents.functions.create( path_agent_uuid="agent_uuid", ) assert_matches_type(FunctionCreateResponse, function, path=["response"]) @@ -32,7 +32,7 @@ def test_method_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: - function = client.agents.functions.create( + function = client.doagents.functions.create( path_agent_uuid="agent_uuid", body_agent_uuid="agent_uuid", description="description", @@ -47,7 +47,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_create(self, client: GradientAI) -> None: - response = client.agents.functions.with_raw_response.create( + response = client.doagents.functions.with_raw_response.create( path_agent_uuid="agent_uuid", ) @@ -59,7 +59,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_create(self, client: GradientAI) -> None: - with client.agents.functions.with_streaming_response.create( + with client.doagents.functions.with_streaming_response.create( path_agent_uuid="agent_uuid", ) as response: assert not response.is_closed @@ -74,14 +74,14 @@ def test_streaming_response_create(self, client: GradientAI) -> None: @parametrize def test_path_params_create(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - client.agents.functions.with_raw_response.create( + client.doagents.functions.with_raw_response.create( path_agent_uuid="", ) @pytest.mark.skip() @parametrize def test_method_update(self, client: GradientAI) -> None: - function = client.agents.functions.update( + function = client.doagents.functions.update( path_function_uuid="function_uuid", path_agent_uuid="agent_uuid", ) @@ -90,7 +90,7 @@ def test_method_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: - function = client.agents.functions.update( + function = client.doagents.functions.update( path_function_uuid="function_uuid", path_agent_uuid="agent_uuid", body_agent_uuid="agent_uuid", @@ -107,7 +107,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_update(self, client: GradientAI) -> None: - response = client.agents.functions.with_raw_response.update( + response = client.doagents.functions.with_raw_response.update( path_function_uuid="function_uuid", path_agent_uuid="agent_uuid", ) @@ -120,7 +120,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: - with client.agents.functions.with_streaming_response.update( + with client.doagents.functions.with_streaming_response.update( path_function_uuid="function_uuid", path_agent_uuid="agent_uuid", ) as response: @@ -136,13 +136,13 @@ def test_streaming_response_update(self, client: GradientAI) -> None: @parametrize def test_path_params_update(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - client.agents.functions.with_raw_response.update( + client.doagents.functions.with_raw_response.update( path_function_uuid="function_uuid", path_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_function_uuid` but received ''"): - client.agents.functions.with_raw_response.update( + client.doagents.functions.with_raw_response.update( path_function_uuid="", path_agent_uuid="agent_uuid", ) @@ -150,7 +150,7 @@ def test_path_params_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_delete(self, client: GradientAI) -> None: - function = client.agents.functions.delete( + function = client.doagents.functions.delete( function_uuid="function_uuid", agent_uuid="agent_uuid", ) @@ -159,7 +159,7 @@ def test_method_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_delete(self, client: GradientAI) -> None: - response = client.agents.functions.with_raw_response.delete( + response = client.doagents.functions.with_raw_response.delete( function_uuid="function_uuid", agent_uuid="agent_uuid", ) @@ -172,7 +172,7 @@ def test_raw_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_delete(self, client: GradientAI) -> None: - with client.agents.functions.with_streaming_response.delete( + with client.doagents.functions.with_streaming_response.delete( function_uuid="function_uuid", agent_uuid="agent_uuid", ) as response: @@ -188,13 +188,13 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: @parametrize def test_path_params_delete(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - client.agents.functions.with_raw_response.delete( + client.doagents.functions.with_raw_response.delete( function_uuid="function_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `function_uuid` but received ''"): - client.agents.functions.with_raw_response.delete( + client.doagents.functions.with_raw_response.delete( function_uuid="", agent_uuid="agent_uuid", ) @@ -206,7 +206,7 @@ class TestAsyncFunctions: @pytest.mark.skip() @parametrize async def test_method_create(self, async_client: AsyncGradientAI) -> None: - function = await async_client.agents.functions.create( + function = await async_client.doagents.functions.create( path_agent_uuid="agent_uuid", ) assert_matches_type(FunctionCreateResponse, function, path=["response"]) @@ -214,7 +214,7 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: - function = await async_client.agents.functions.create( + function = await async_client.doagents.functions.create( path_agent_uuid="agent_uuid", body_agent_uuid="agent_uuid", description="description", @@ -229,7 +229,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.functions.with_raw_response.create( + response = await async_client.doagents.functions.with_raw_response.create( path_agent_uuid="agent_uuid", ) @@ -241,7 +241,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.functions.with_streaming_response.create( + async with async_client.doagents.functions.with_streaming_response.create( path_agent_uuid="agent_uuid", ) as response: assert not response.is_closed @@ -256,14 +256,14 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_create(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - await async_client.agents.functions.with_raw_response.create( + await async_client.doagents.functions.with_raw_response.create( path_agent_uuid="", ) @pytest.mark.skip() @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: - function = await async_client.agents.functions.update( + function = await async_client.doagents.functions.update( path_function_uuid="function_uuid", path_agent_uuid="agent_uuid", ) @@ -272,7 +272,7 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: - function = await async_client.agents.functions.update( + function = await async_client.doagents.functions.update( path_function_uuid="function_uuid", path_agent_uuid="agent_uuid", body_agent_uuid="agent_uuid", @@ -289,7 +289,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.functions.with_raw_response.update( + response = await async_client.doagents.functions.with_raw_response.update( path_function_uuid="function_uuid", path_agent_uuid="agent_uuid", ) @@ -302,7 +302,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.functions.with_streaming_response.update( + async with async_client.doagents.functions.with_streaming_response.update( path_function_uuid="function_uuid", path_agent_uuid="agent_uuid", ) as response: @@ -318,13 +318,13 @@ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - await async_client.agents.functions.with_raw_response.update( + await async_client.doagents.functions.with_raw_response.update( path_function_uuid="function_uuid", path_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_function_uuid` but received ''"): - await async_client.agents.functions.with_raw_response.update( + await async_client.doagents.functions.with_raw_response.update( path_function_uuid="", path_agent_uuid="agent_uuid", ) @@ -332,7 +332,7 @@ async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_delete(self, async_client: AsyncGradientAI) -> None: - function = await async_client.agents.functions.delete( + function = await async_client.doagents.functions.delete( function_uuid="function_uuid", agent_uuid="agent_uuid", ) @@ -341,7 +341,7 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.functions.with_raw_response.delete( + response = await async_client.doagents.functions.with_raw_response.delete( function_uuid="function_uuid", agent_uuid="agent_uuid", ) @@ -354,7 +354,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.functions.with_streaming_response.delete( + async with async_client.doagents.functions.with_streaming_response.delete( function_uuid="function_uuid", agent_uuid="agent_uuid", ) as response: @@ -370,13 +370,13 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - await async_client.agents.functions.with_raw_response.delete( + await async_client.doagents.functions.with_raw_response.delete( function_uuid="function_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `function_uuid` but received ''"): - await async_client.agents.functions.with_raw_response.delete( + await async_client.doagents.functions.with_raw_response.delete( function_uuid="", agent_uuid="agent_uuid", ) diff --git a/tests/api_resources/agents/test_knowledge_bases.py b/tests/api_resources/doagents/test_knowledge_bases.py similarity index 82% rename from tests/api_resources/agents/test_knowledge_bases.py rename to tests/api_resources/doagents/test_knowledge_bases.py index dff80a9a..f077caaa 100644 --- a/tests/api_resources/agents/test_knowledge_bases.py +++ b/tests/api_resources/doagents/test_knowledge_bases.py @@ -9,7 +9,7 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse +from gradientai.types.doagents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -20,7 +20,7 @@ class TestKnowledgeBases: @pytest.mark.skip() @parametrize def test_method_attach(self, client: GradientAI) -> None: - knowledge_base = client.agents.knowledge_bases.attach( + knowledge_base = client.doagents.knowledge_bases.attach( "agent_uuid", ) assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) @@ -28,7 +28,7 @@ def test_method_attach(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_attach(self, client: GradientAI) -> None: - response = client.agents.knowledge_bases.with_raw_response.attach( + response = client.doagents.knowledge_bases.with_raw_response.attach( "agent_uuid", ) @@ -40,7 +40,7 @@ def test_raw_response_attach(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_attach(self, client: GradientAI) -> None: - with client.agents.knowledge_bases.with_streaming_response.attach( + with client.doagents.knowledge_bases.with_streaming_response.attach( "agent_uuid", ) as response: assert not response.is_closed @@ -55,14 +55,14 @@ def test_streaming_response_attach(self, client: GradientAI) -> None: @parametrize def test_path_params_attach(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - client.agents.knowledge_bases.with_raw_response.attach( + client.doagents.knowledge_bases.with_raw_response.attach( "", ) @pytest.mark.skip() @parametrize def test_method_attach_single(self, client: GradientAI) -> None: - knowledge_base = client.agents.knowledge_bases.attach_single( + knowledge_base = client.doagents.knowledge_bases.attach_single( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) @@ -71,7 +71,7 @@ def test_method_attach_single(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_attach_single(self, client: GradientAI) -> None: - response = client.agents.knowledge_bases.with_raw_response.attach_single( + response = client.doagents.knowledge_bases.with_raw_response.attach_single( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) @@ -84,7 +84,7 @@ def test_raw_response_attach_single(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_attach_single(self, client: GradientAI) -> None: - with client.agents.knowledge_bases.with_streaming_response.attach_single( + with client.doagents.knowledge_bases.with_streaming_response.attach_single( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) as response: @@ -100,13 +100,13 @@ def test_streaming_response_attach_single(self, client: GradientAI) -> None: @parametrize def test_path_params_attach_single(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - client.agents.knowledge_bases.with_raw_response.attach_single( + client.doagents.knowledge_bases.with_raw_response.attach_single( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): - client.agents.knowledge_bases.with_raw_response.attach_single( + client.doagents.knowledge_bases.with_raw_response.attach_single( knowledge_base_uuid="", agent_uuid="agent_uuid", ) @@ -114,7 +114,7 @@ def test_path_params_attach_single(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_detach(self, client: GradientAI) -> None: - knowledge_base = client.agents.knowledge_bases.detach( + knowledge_base = client.doagents.knowledge_bases.detach( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) @@ -123,7 +123,7 @@ def test_method_detach(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_detach(self, client: GradientAI) -> None: - response = client.agents.knowledge_bases.with_raw_response.detach( + response = client.doagents.knowledge_bases.with_raw_response.detach( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) @@ -136,7 +136,7 @@ def test_raw_response_detach(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_detach(self, client: GradientAI) -> None: - with client.agents.knowledge_bases.with_streaming_response.detach( + with client.doagents.knowledge_bases.with_streaming_response.detach( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) as response: @@ -152,13 +152,13 @@ def test_streaming_response_detach(self, client: GradientAI) -> None: @parametrize def test_path_params_detach(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - client.agents.knowledge_bases.with_raw_response.detach( + client.doagents.knowledge_bases.with_raw_response.detach( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): - client.agents.knowledge_bases.with_raw_response.detach( + client.doagents.knowledge_bases.with_raw_response.detach( knowledge_base_uuid="", agent_uuid="agent_uuid", ) @@ -170,7 +170,7 @@ class TestAsyncKnowledgeBases: @pytest.mark.skip() @parametrize async def test_method_attach(self, async_client: AsyncGradientAI) -> None: - knowledge_base = await async_client.agents.knowledge_bases.attach( + knowledge_base = await async_client.doagents.knowledge_bases.attach( "agent_uuid", ) assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) @@ -178,7 +178,7 @@ async def test_method_attach(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_raw_response_attach(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.knowledge_bases.with_raw_response.attach( + response = await async_client.doagents.knowledge_bases.with_raw_response.attach( "agent_uuid", ) @@ -190,7 +190,7 @@ async def test_raw_response_attach(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_attach(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.knowledge_bases.with_streaming_response.attach( + async with async_client.doagents.knowledge_bases.with_streaming_response.attach( "agent_uuid", ) as response: assert not response.is_closed @@ -205,14 +205,14 @@ async def test_streaming_response_attach(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_attach(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - await async_client.agents.knowledge_bases.with_raw_response.attach( + await async_client.doagents.knowledge_bases.with_raw_response.attach( "", ) @pytest.mark.skip() @parametrize async def test_method_attach_single(self, async_client: AsyncGradientAI) -> None: - knowledge_base = await async_client.agents.knowledge_bases.attach_single( + knowledge_base = await async_client.doagents.knowledge_bases.attach_single( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) @@ -221,7 +221,7 @@ async def test_method_attach_single(self, async_client: AsyncGradientAI) -> None @pytest.mark.skip() @parametrize async def test_raw_response_attach_single(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.knowledge_bases.with_raw_response.attach_single( + response = await async_client.doagents.knowledge_bases.with_raw_response.attach_single( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) @@ -234,7 +234,7 @@ async def test_raw_response_attach_single(self, async_client: AsyncGradientAI) - @pytest.mark.skip() @parametrize async def test_streaming_response_attach_single(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.knowledge_bases.with_streaming_response.attach_single( + async with async_client.doagents.knowledge_bases.with_streaming_response.attach_single( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) as response: @@ -250,13 +250,13 @@ async def test_streaming_response_attach_single(self, async_client: AsyncGradien @parametrize async def test_path_params_attach_single(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - await async_client.agents.knowledge_bases.with_raw_response.attach_single( + await async_client.doagents.knowledge_bases.with_raw_response.attach_single( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): - await async_client.agents.knowledge_bases.with_raw_response.attach_single( + await async_client.doagents.knowledge_bases.with_raw_response.attach_single( knowledge_base_uuid="", agent_uuid="agent_uuid", ) @@ -264,7 +264,7 @@ async def test_path_params_attach_single(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize async def test_method_detach(self, async_client: AsyncGradientAI) -> None: - knowledge_base = await async_client.agents.knowledge_bases.detach( + knowledge_base = await async_client.doagents.knowledge_bases.detach( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) @@ -273,7 +273,7 @@ async def test_method_detach(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_raw_response_detach(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.knowledge_bases.with_raw_response.detach( + response = await async_client.doagents.knowledge_bases.with_raw_response.detach( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) @@ -286,7 +286,7 @@ async def test_raw_response_detach(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_detach(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.knowledge_bases.with_streaming_response.detach( + async with async_client.doagents.knowledge_bases.with_streaming_response.detach( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) as response: @@ -302,13 +302,13 @@ async def test_streaming_response_detach(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_detach(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - await async_client.agents.knowledge_bases.with_raw_response.detach( + await async_client.doagents.knowledge_bases.with_raw_response.detach( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): - await async_client.agents.knowledge_bases.with_raw_response.detach( + await async_client.doagents.knowledge_bases.with_raw_response.detach( knowledge_base_uuid="", agent_uuid="agent_uuid", ) diff --git a/tests/api_resources/agents/test_versions.py b/tests/api_resources/doagents/test_versions.py similarity index 84% rename from tests/api_resources/agents/test_versions.py rename to tests/api_resources/doagents/test_versions.py index 77fee4c6..ec5e293d 100644 --- a/tests/api_resources/agents/test_versions.py +++ b/tests/api_resources/doagents/test_versions.py @@ -9,7 +9,7 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import ( +from gradientai.types.doagents import ( VersionListResponse, VersionUpdateResponse, ) @@ -23,7 +23,7 @@ class TestVersions: @pytest.mark.skip() @parametrize def test_method_update(self, client: GradientAI) -> None: - version = client.agents.versions.update( + version = client.doagents.versions.update( path_uuid="uuid", ) assert_matches_type(VersionUpdateResponse, version, path=["response"]) @@ -31,7 +31,7 @@ def test_method_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: - version = client.agents.versions.update( + version = client.doagents.versions.update( path_uuid="uuid", body_uuid="uuid", version_hash="version_hash", @@ -41,7 +41,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_update(self, client: GradientAI) -> None: - response = client.agents.versions.with_raw_response.update( + response = client.doagents.versions.with_raw_response.update( path_uuid="uuid", ) @@ -53,7 +53,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: - with client.agents.versions.with_streaming_response.update( + with client.doagents.versions.with_streaming_response.update( path_uuid="uuid", ) as response: assert not response.is_closed @@ -68,14 +68,14 @@ def test_streaming_response_update(self, client: GradientAI) -> None: @parametrize def test_path_params_update(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - client.agents.versions.with_raw_response.update( + client.doagents.versions.with_raw_response.update( path_uuid="", ) @pytest.mark.skip() @parametrize def test_method_list(self, client: GradientAI) -> None: - version = client.agents.versions.list( + version = client.doagents.versions.list( uuid="uuid", ) assert_matches_type(VersionListResponse, version, path=["response"]) @@ -83,7 +83,7 @@ def test_method_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_list_with_all_params(self, client: GradientAI) -> None: - version = client.agents.versions.list( + version = client.doagents.versions.list( uuid="uuid", page=0, per_page=0, @@ -93,7 +93,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_list(self, client: GradientAI) -> None: - response = client.agents.versions.with_raw_response.list( + response = client.doagents.versions.with_raw_response.list( uuid="uuid", ) @@ -105,7 +105,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_list(self, client: GradientAI) -> None: - with client.agents.versions.with_streaming_response.list( + with client.doagents.versions.with_streaming_response.list( uuid="uuid", ) as response: assert not response.is_closed @@ -120,7 +120,7 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @parametrize def test_path_params_list(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.agents.versions.with_raw_response.list( + client.doagents.versions.with_raw_response.list( uuid="", ) @@ -131,7 +131,7 @@ class TestAsyncVersions: @pytest.mark.skip() @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: - version = await async_client.agents.versions.update( + version = await async_client.doagents.versions.update( path_uuid="uuid", ) assert_matches_type(VersionUpdateResponse, version, path=["response"]) @@ -139,7 +139,7 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: - version = await async_client.agents.versions.update( + version = await async_client.doagents.versions.update( path_uuid="uuid", body_uuid="uuid", version_hash="version_hash", @@ -149,7 +149,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.versions.with_raw_response.update( + response = await async_client.doagents.versions.with_raw_response.update( path_uuid="uuid", ) @@ -161,7 +161,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.versions.with_streaming_response.update( + async with async_client.doagents.versions.with_streaming_response.update( path_uuid="uuid", ) as response: assert not response.is_closed @@ -176,14 +176,14 @@ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - await async_client.agents.versions.with_raw_response.update( + await async_client.doagents.versions.with_raw_response.update( path_uuid="", ) @pytest.mark.skip() @parametrize async def test_method_list(self, async_client: AsyncGradientAI) -> None: - version = await async_client.agents.versions.list( + version = await async_client.doagents.versions.list( uuid="uuid", ) assert_matches_type(VersionListResponse, version, path=["response"]) @@ -191,7 +191,7 @@ async def test_method_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: - version = await async_client.agents.versions.list( + version = await async_client.doagents.versions.list( uuid="uuid", page=0, per_page=0, @@ -201,7 +201,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.versions.with_raw_response.list( + response = await async_client.doagents.versions.with_raw_response.list( uuid="uuid", ) @@ -213,7 +213,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.versions.with_streaming_response.list( + async with async_client.doagents.versions.with_streaming_response.list( uuid="uuid", ) as response: assert not response.is_closed @@ -228,6 +228,6 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @parametrize async def test_path_params_list(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.agents.versions.with_raw_response.list( + await async_client.doagents.versions.with_raw_response.list( uuid="", ) diff --git a/tests/api_resources/test_agents.py b/tests/api_resources/test_doagents.py similarity index 66% rename from tests/api_resources/test_agents.py rename to tests/api_resources/test_doagents.py index f39ac4d5..9a8c5c91 100644 --- a/tests/api_resources/test_agents.py +++ b/tests/api_resources/test_doagents.py @@ -10,30 +10,30 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type from gradientai.types import ( - AgentListResponse, - AgentCreateResponse, - AgentDeleteResponse, - AgentUpdateResponse, - AgentRetrieveResponse, - AgentUpdateStatusResponse, + DoagentListResponse, + DoagentCreateResponse, + DoagentDeleteResponse, + DoagentUpdateResponse, + DoagentRetrieveResponse, + DoagentUpdateStatusResponse, ) base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") -class TestAgents: +class TestDoagents: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) @pytest.mark.skip() @parametrize def test_method_create(self, client: GradientAI) -> None: - agent = client.agents.create() - assert_matches_type(AgentCreateResponse, agent, path=["response"]) + doagent = client.doagents.create() + assert_matches_type(DoagentCreateResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: - agent = client.agents.create( + doagent = client.doagents.create( anthropic_key_uuid="anthropic_key_uuid", description="description", instruction="instruction", @@ -45,61 +45,61 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: region="region", tags=["string"], ) - assert_matches_type(AgentCreateResponse, agent, path=["response"]) + assert_matches_type(DoagentCreateResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize def test_raw_response_create(self, client: GradientAI) -> None: - response = client.agents.with_raw_response.create() + response = client.doagents.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = response.parse() - assert_matches_type(AgentCreateResponse, agent, path=["response"]) + doagent = response.parse() + assert_matches_type(DoagentCreateResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize def test_streaming_response_create(self, client: GradientAI) -> None: - with client.agents.with_streaming_response.create() as response: + with client.doagents.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = response.parse() - assert_matches_type(AgentCreateResponse, agent, path=["response"]) + doagent = response.parse() + assert_matches_type(DoagentCreateResponse, doagent, path=["response"]) assert cast(Any, response.is_closed) is True @pytest.mark.skip() @parametrize def test_method_retrieve(self, client: GradientAI) -> None: - agent = client.agents.retrieve( + doagent = client.doagents.retrieve( "uuid", ) - assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) + assert_matches_type(DoagentRetrieveResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize def test_raw_response_retrieve(self, client: GradientAI) -> None: - response = client.agents.with_raw_response.retrieve( + response = client.doagents.with_raw_response.retrieve( "uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = response.parse() - assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) + doagent = response.parse() + assert_matches_type(DoagentRetrieveResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize def test_streaming_response_retrieve(self, client: GradientAI) -> None: - with client.agents.with_streaming_response.retrieve( + with client.doagents.with_streaming_response.retrieve( "uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = response.parse() - assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) + doagent = response.parse() + assert_matches_type(DoagentRetrieveResponse, doagent, path=["response"]) assert cast(Any, response.is_closed) is True @@ -107,22 +107,22 @@ def test_streaming_response_retrieve(self, client: GradientAI) -> None: @parametrize def test_path_params_retrieve(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.agents.with_raw_response.retrieve( + client.doagents.with_raw_response.retrieve( "", ) @pytest.mark.skip() @parametrize def test_method_update(self, client: GradientAI) -> None: - agent = client.agents.update( + doagent = client.doagents.update( path_uuid="uuid", ) - assert_matches_type(AgentUpdateResponse, agent, path=["response"]) + assert_matches_type(DoagentUpdateResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: - agent = client.agents.update( + doagent = client.doagents.update( path_uuid="uuid", anthropic_key_uuid="anthropic_key_uuid", description="description", @@ -140,31 +140,31 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: top_p=0, body_uuid="uuid", ) - assert_matches_type(AgentUpdateResponse, agent, path=["response"]) + assert_matches_type(DoagentUpdateResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize def test_raw_response_update(self, client: GradientAI) -> None: - response = client.agents.with_raw_response.update( + response = client.doagents.with_raw_response.update( path_uuid="uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = response.parse() - assert_matches_type(AgentUpdateResponse, agent, path=["response"]) + doagent = response.parse() + assert_matches_type(DoagentUpdateResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: - with client.agents.with_streaming_response.update( + with client.doagents.with_streaming_response.update( path_uuid="uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = response.parse() - assert_matches_type(AgentUpdateResponse, agent, path=["response"]) + doagent = response.parse() + assert_matches_type(DoagentUpdateResponse, doagent, path=["response"]) assert cast(Any, response.is_closed) is True @@ -172,79 +172,79 @@ def test_streaming_response_update(self, client: GradientAI) -> None: @parametrize def test_path_params_update(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - client.agents.with_raw_response.update( + client.doagents.with_raw_response.update( path_uuid="", ) @pytest.mark.skip() @parametrize def test_method_list(self, client: GradientAI) -> None: - agent = client.agents.list() - assert_matches_type(AgentListResponse, agent, path=["response"]) + doagent = client.doagents.list() + assert_matches_type(DoagentListResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize def test_method_list_with_all_params(self, client: GradientAI) -> None: - agent = client.agents.list( + doagent = client.doagents.list( only_deployed=True, page=0, per_page=0, ) - assert_matches_type(AgentListResponse, agent, path=["response"]) + assert_matches_type(DoagentListResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize def test_raw_response_list(self, client: GradientAI) -> None: - response = client.agents.with_raw_response.list() + response = client.doagents.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = response.parse() - assert_matches_type(AgentListResponse, agent, path=["response"]) + doagent = response.parse() + assert_matches_type(DoagentListResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize def test_streaming_response_list(self, client: GradientAI) -> None: - with client.agents.with_streaming_response.list() as response: + with client.doagents.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = response.parse() - assert_matches_type(AgentListResponse, agent, path=["response"]) + doagent = response.parse() + assert_matches_type(DoagentListResponse, doagent, path=["response"]) assert cast(Any, response.is_closed) is True @pytest.mark.skip() @parametrize def test_method_delete(self, client: GradientAI) -> None: - agent = client.agents.delete( + doagent = client.doagents.delete( "uuid", ) - assert_matches_type(AgentDeleteResponse, agent, path=["response"]) + assert_matches_type(DoagentDeleteResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize def test_raw_response_delete(self, client: GradientAI) -> None: - response = client.agents.with_raw_response.delete( + response = client.doagents.with_raw_response.delete( "uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = response.parse() - assert_matches_type(AgentDeleteResponse, agent, path=["response"]) + doagent = response.parse() + assert_matches_type(DoagentDeleteResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize def test_streaming_response_delete(self, client: GradientAI) -> None: - with client.agents.with_streaming_response.delete( + with client.doagents.with_streaming_response.delete( "uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = response.parse() - assert_matches_type(AgentDeleteResponse, agent, path=["response"]) + doagent = response.parse() + assert_matches_type(DoagentDeleteResponse, doagent, path=["response"]) assert cast(Any, response.is_closed) is True @@ -252,51 +252,51 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: @parametrize def test_path_params_delete(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.agents.with_raw_response.delete( + client.doagents.with_raw_response.delete( "", ) @pytest.mark.skip() @parametrize def test_method_update_status(self, client: GradientAI) -> None: - agent = client.agents.update_status( + doagent = client.doagents.update_status( path_uuid="uuid", ) - assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) + assert_matches_type(DoagentUpdateStatusResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize def test_method_update_status_with_all_params(self, client: GradientAI) -> None: - agent = client.agents.update_status( + doagent = client.doagents.update_status( path_uuid="uuid", body_uuid="uuid", visibility="VISIBILITY_UNKNOWN", ) - assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) + assert_matches_type(DoagentUpdateStatusResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize def test_raw_response_update_status(self, client: GradientAI) -> None: - response = client.agents.with_raw_response.update_status( + response = client.doagents.with_raw_response.update_status( path_uuid="uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = response.parse() - assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) + doagent = response.parse() + assert_matches_type(DoagentUpdateStatusResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize def test_streaming_response_update_status(self, client: GradientAI) -> None: - with client.agents.with_streaming_response.update_status( + with client.doagents.with_streaming_response.update_status( path_uuid="uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = response.parse() - assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) + doagent = response.parse() + assert_matches_type(DoagentUpdateStatusResponse, doagent, path=["response"]) assert cast(Any, response.is_closed) is True @@ -304,24 +304,24 @@ def test_streaming_response_update_status(self, client: GradientAI) -> None: @parametrize def test_path_params_update_status(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - client.agents.with_raw_response.update_status( + client.doagents.with_raw_response.update_status( path_uuid="", ) -class TestAsyncAgents: +class TestAsyncDoagents: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @pytest.mark.skip() @parametrize async def test_method_create(self, async_client: AsyncGradientAI) -> None: - agent = await async_client.agents.create() - assert_matches_type(AgentCreateResponse, agent, path=["response"]) + doagent = await async_client.doagents.create() + assert_matches_type(DoagentCreateResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: - agent = await async_client.agents.create( + doagent = await async_client.doagents.create( anthropic_key_uuid="anthropic_key_uuid", description="description", instruction="instruction", @@ -333,61 +333,61 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI region="region", tags=["string"], ) - assert_matches_type(AgentCreateResponse, agent, path=["response"]) + assert_matches_type(DoagentCreateResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.with_raw_response.create() + response = await async_client.doagents.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = await response.parse() - assert_matches_type(AgentCreateResponse, agent, path=["response"]) + doagent = await response.parse() + assert_matches_type(DoagentCreateResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.with_streaming_response.create() as response: + async with async_client.doagents.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = await response.parse() - assert_matches_type(AgentCreateResponse, agent, path=["response"]) + doagent = await response.parse() + assert_matches_type(DoagentCreateResponse, doagent, path=["response"]) assert cast(Any, response.is_closed) is True @pytest.mark.skip() @parametrize async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: - agent = await async_client.agents.retrieve( + doagent = await async_client.doagents.retrieve( "uuid", ) - assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) + assert_matches_type(DoagentRetrieveResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.with_raw_response.retrieve( + response = await async_client.doagents.with_raw_response.retrieve( "uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = await response.parse() - assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) + doagent = await response.parse() + assert_matches_type(DoagentRetrieveResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.with_streaming_response.retrieve( + async with async_client.doagents.with_streaming_response.retrieve( "uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = await response.parse() - assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) + doagent = await response.parse() + assert_matches_type(DoagentRetrieveResponse, doagent, path=["response"]) assert cast(Any, response.is_closed) is True @@ -395,22 +395,22 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) @parametrize async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.agents.with_raw_response.retrieve( + await async_client.doagents.with_raw_response.retrieve( "", ) @pytest.mark.skip() @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: - agent = await async_client.agents.update( + doagent = await async_client.doagents.update( path_uuid="uuid", ) - assert_matches_type(AgentUpdateResponse, agent, path=["response"]) + assert_matches_type(DoagentUpdateResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: - agent = await async_client.agents.update( + doagent = await async_client.doagents.update( path_uuid="uuid", anthropic_key_uuid="anthropic_key_uuid", description="description", @@ -428,31 +428,31 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI top_p=0, body_uuid="uuid", ) - assert_matches_type(AgentUpdateResponse, agent, path=["response"]) + assert_matches_type(DoagentUpdateResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.with_raw_response.update( + response = await async_client.doagents.with_raw_response.update( path_uuid="uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = await response.parse() - assert_matches_type(AgentUpdateResponse, agent, path=["response"]) + doagent = await response.parse() + assert_matches_type(DoagentUpdateResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.with_streaming_response.update( + async with async_client.doagents.with_streaming_response.update( path_uuid="uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = await response.parse() - assert_matches_type(AgentUpdateResponse, agent, path=["response"]) + doagent = await response.parse() + assert_matches_type(DoagentUpdateResponse, doagent, path=["response"]) assert cast(Any, response.is_closed) is True @@ -460,79 +460,79 @@ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - await async_client.agents.with_raw_response.update( + await async_client.doagents.with_raw_response.update( path_uuid="", ) @pytest.mark.skip() @parametrize async def test_method_list(self, async_client: AsyncGradientAI) -> None: - agent = await async_client.agents.list() - assert_matches_type(AgentListResponse, agent, path=["response"]) + doagent = await async_client.doagents.list() + assert_matches_type(DoagentListResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: - agent = await async_client.agents.list( + doagent = await async_client.doagents.list( only_deployed=True, page=0, per_page=0, ) - assert_matches_type(AgentListResponse, agent, path=["response"]) + assert_matches_type(DoagentListResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.with_raw_response.list() + response = await async_client.doagents.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = await response.parse() - assert_matches_type(AgentListResponse, agent, path=["response"]) + doagent = await response.parse() + assert_matches_type(DoagentListResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.with_streaming_response.list() as response: + async with async_client.doagents.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = await response.parse() - assert_matches_type(AgentListResponse, agent, path=["response"]) + doagent = await response.parse() + assert_matches_type(DoagentListResponse, doagent, path=["response"]) assert cast(Any, response.is_closed) is True @pytest.mark.skip() @parametrize async def test_method_delete(self, async_client: AsyncGradientAI) -> None: - agent = await async_client.agents.delete( + doagent = await async_client.doagents.delete( "uuid", ) - assert_matches_type(AgentDeleteResponse, agent, path=["response"]) + assert_matches_type(DoagentDeleteResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.with_raw_response.delete( + response = await async_client.doagents.with_raw_response.delete( "uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = await response.parse() - assert_matches_type(AgentDeleteResponse, agent, path=["response"]) + doagent = await response.parse() + assert_matches_type(DoagentDeleteResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.with_streaming_response.delete( + async with async_client.doagents.with_streaming_response.delete( "uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = await response.parse() - assert_matches_type(AgentDeleteResponse, agent, path=["response"]) + doagent = await response.parse() + assert_matches_type(DoagentDeleteResponse, doagent, path=["response"]) assert cast(Any, response.is_closed) is True @@ -540,51 +540,51 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.agents.with_raw_response.delete( + await async_client.doagents.with_raw_response.delete( "", ) @pytest.mark.skip() @parametrize async def test_method_update_status(self, async_client: AsyncGradientAI) -> None: - agent = await async_client.agents.update_status( + doagent = await async_client.doagents.update_status( path_uuid="uuid", ) - assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) + assert_matches_type(DoagentUpdateStatusResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_update_status_with_all_params(self, async_client: AsyncGradientAI) -> None: - agent = await async_client.agents.update_status( + doagent = await async_client.doagents.update_status( path_uuid="uuid", body_uuid="uuid", visibility="VISIBILITY_UNKNOWN", ) - assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) + assert_matches_type(DoagentUpdateStatusResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize async def test_raw_response_update_status(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.with_raw_response.update_status( + response = await async_client.doagents.with_raw_response.update_status( path_uuid="uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = await response.parse() - assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) + doagent = await response.parse() + assert_matches_type(DoagentUpdateStatusResponse, doagent, path=["response"]) @pytest.mark.skip() @parametrize async def test_streaming_response_update_status(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.with_streaming_response.update_status( + async with async_client.doagents.with_streaming_response.update_status( path_uuid="uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - agent = await response.parse() - assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) + doagent = await response.parse() + assert_matches_type(DoagentUpdateStatusResponse, doagent, path=["response"]) assert cast(Any, response.is_closed) is True @@ -592,6 +592,6 @@ async def test_streaming_response_update_status(self, async_client: AsyncGradien @parametrize async def test_path_params_update_status(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - await async_client.agents.with_raw_response.update_status( + await async_client.doagents.with_raw_response.update_status( path_uuid="", ) diff --git a/tests/test_client.py b/tests/test_client.py index d83082e3..4a26cbd0 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -724,7 +724,7 @@ def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, clien respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=httpx.TimeoutException("Test timeout error")) with pytest.raises(APITimeoutError): - client.agents.versions.with_streaming_response.list(uuid="uuid").__enter__() + client.doagents.versions.with_streaming_response.list(uuid="uuid").__enter__() assert _get_open_connections(self.client) == 0 @@ -734,7 +734,7 @@ def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, client respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(return_value=httpx.Response(500)) with pytest.raises(APIStatusError): - client.agents.versions.with_streaming_response.list(uuid="uuid").__enter__() + client.doagents.versions.with_streaming_response.list(uuid="uuid").__enter__() assert _get_open_connections(self.client) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) @@ -763,7 +763,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=retry_handler) - response = client.agents.versions.with_raw_response.list(uuid="uuid") + response = client.doagents.versions.with_raw_response.list(uuid="uuid") assert response.retries_taken == failures_before_success assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success @@ -787,7 +787,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=retry_handler) - response = client.agents.versions.with_raw_response.list( + response = client.doagents.versions.with_raw_response.list( uuid="uuid", extra_headers={"x-stainless-retry-count": Omit()} ) @@ -812,7 +812,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=retry_handler) - response = client.agents.versions.with_raw_response.list( + response = client.doagents.versions.with_raw_response.list( uuid="uuid", extra_headers={"x-stainless-retry-count": "42"} ) @@ -1544,7 +1544,7 @@ async def test_retrying_timeout_errors_doesnt_leak( respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=httpx.TimeoutException("Test timeout error")) with pytest.raises(APITimeoutError): - await async_client.agents.versions.with_streaming_response.list(uuid="uuid").__aenter__() + await async_client.doagents.versions.with_streaming_response.list(uuid="uuid").__aenter__() assert _get_open_connections(self.client) == 0 @@ -1556,7 +1556,7 @@ async def test_retrying_status_errors_doesnt_leak( respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(return_value=httpx.Response(500)) with pytest.raises(APIStatusError): - await async_client.agents.versions.with_streaming_response.list(uuid="uuid").__aenter__() + await async_client.doagents.versions.with_streaming_response.list(uuid="uuid").__aenter__() assert _get_open_connections(self.client) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) @@ -1586,7 +1586,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=retry_handler) - response = await client.agents.versions.with_raw_response.list(uuid="uuid") + response = await client.doagents.versions.with_raw_response.list(uuid="uuid") assert response.retries_taken == failures_before_success assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success @@ -1611,7 +1611,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=retry_handler) - response = await client.agents.versions.with_raw_response.list( + response = await client.doagents.versions.with_raw_response.list( uuid="uuid", extra_headers={"x-stainless-retry-count": Omit()} ) @@ -1637,7 +1637,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=retry_handler) - response = await client.agents.versions.with_raw_response.list( + response = await client.doagents.versions.with_raw_response.list( uuid="uuid", extra_headers={"x-stainless-retry-count": "42"} ) From f27643e1e00f606029be919a7117801facc6e5b7 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 20 Jun 2025 14:17:51 +0000 Subject: [PATCH 27/41] feat(api): update via SDK Studio --- .stats.yml | 2 +- README.md | 16 +- api.md | 70 ++--- src/gradientai/_client.py | 40 +-- src/gradientai/resources/__init__.py | 28 +- .../{doagents => agents}/__init__.py | 28 +- .../doagents.py => agents/agents.py} | 218 +++++++-------- .../{doagents => agents}/api_keys.py | 12 +- .../{doagents => agents}/child_agents.py | 10 +- .../{doagents => agents}/functions.py | 8 +- .../{doagents => agents}/knowledge_bases.py | 4 +- .../{doagents => agents}/versions.py | 6 +- src/gradientai/types/__init__.py | 20 +- ...reate_params.py => agent_create_params.py} | 4 +- ...e_response.py => agent_create_response.py} | 4 +- ...e_response.py => agent_delete_response.py} | 4 +- ...nt_list_params.py => agent_list_params.py} | 4 +- ...ist_response.py => agent_list_response.py} | 8 +- ...response.py => agent_retrieve_response.py} | 4 +- ...pdate_params.py => agent_update_params.py} | 4 +- ...e_response.py => agent_update_response.py} | 4 +- ...arams.py => agent_update_status_params.py} | 4 +- .../types/agent_update_status_response.py | 16 ++ .../types/{doagents => agents}/__init__.py | 0 .../api_key_create_params.py | 0 .../api_key_create_response.py | 0 .../api_key_delete_response.py | 0 .../api_key_list_params.py | 0 .../api_key_list_response.py | 0 .../api_key_regenerate_response.py | 0 .../api_key_update_params.py | 0 .../api_key_update_response.py | 0 .../api_link_knowledge_base_output.py | 0 .../types/{doagents => agents}/api_links.py | 0 .../types/{doagents => agents}/api_meta.py | 0 .../child_agent_add_params.py | 0 .../child_agent_add_response.py | 0 .../child_agent_delete_response.py | 0 .../child_agent_update_params.py | 0 .../child_agent_update_response.py | 0 .../child_agent_view_response.py | 0 .../function_create_params.py | 0 .../function_create_response.py | 0 .../function_delete_response.py | 0 .../function_update_params.py | 0 .../function_update_response.py | 0 .../knowledge_base_detach_response.py | 0 .../version_list_params.py | 0 .../version_list_response.py | 0 .../version_update_params.py | 0 .../version_update_response.py | 0 .../types/doagent_update_status_response.py | 16 -- .../types/indexing_job_list_response.py | 4 +- .../types/inference/api_key_list_response.py | 4 +- .../types/knowledge_base_list_response.py | 4 +- .../data_source_list_response.py | 4 +- src/gradientai/types/model_list_response.py | 4 +- .../anthropic/key_list_agents_response.py | 4 +- .../providers/anthropic/key_list_response.py | 4 +- .../providers/openai/key_list_response.py | 4 +- .../openai/key_retrieve_agents_response.py | 4 +- .../{doagents => agents}/__init__.py | 0 .../{doagents => agents}/test_api_keys.py | 106 ++++---- .../{doagents => agents}/test_child_agents.py | 86 +++--- .../{doagents => agents}/test_functions.py | 66 ++--- .../test_knowledge_bases.py | 58 ++-- .../{doagents => agents}/test_versions.py | 42 +-- .../{test_doagents.py => test_agents.py} | 256 +++++++++--------- tests/test_client.py | 20 +- 69 files changed, 602 insertions(+), 602 deletions(-) rename src/gradientai/resources/{doagents => agents}/__init__.py (84%) rename src/gradientai/resources/{doagents/doagents.py => agents/agents.py} (87%) rename src/gradientai/resources/{doagents => agents}/api_keys.py (98%) rename src/gradientai/resources/{doagents => agents}/child_agents.py (98%) rename src/gradientai/resources/{doagents => agents}/functions.py (98%) rename src/gradientai/resources/{doagents => agents}/knowledge_bases.py (98%) rename src/gradientai/resources/{doagents => agents}/versions.py (98%) rename src/gradientai/types/{doagent_create_params.py => agent_create_params.py} (90%) rename src/gradientai/types/{doagent_update_response.py => agent_create_response.py} (77%) rename src/gradientai/types/{doagent_create_response.py => agent_delete_response.py} (77%) rename src/gradientai/types/{doagent_list_params.py => agent_list_params.py} (79%) rename src/gradientai/types/{doagent_list_response.py => agent_list_response.py} (98%) rename src/gradientai/types/{doagent_delete_response.py => agent_retrieve_response.py} (77%) rename src/gradientai/types/{doagent_update_params.py => agent_update_params.py} (95%) rename src/gradientai/types/{doagent_retrieve_response.py => agent_update_response.py} (76%) rename src/gradientai/types/{doagent_update_status_params.py => agent_update_status_params.py} (79%) create mode 100644 src/gradientai/types/agent_update_status_response.py rename src/gradientai/types/{doagents => agents}/__init__.py (100%) rename src/gradientai/types/{doagents => agents}/api_key_create_params.py (100%) rename src/gradientai/types/{doagents => agents}/api_key_create_response.py (100%) rename src/gradientai/types/{doagents => agents}/api_key_delete_response.py (100%) rename src/gradientai/types/{doagents => agents}/api_key_list_params.py (100%) rename src/gradientai/types/{doagents => agents}/api_key_list_response.py (100%) rename src/gradientai/types/{doagents => agents}/api_key_regenerate_response.py (100%) rename src/gradientai/types/{doagents => agents}/api_key_update_params.py (100%) rename src/gradientai/types/{doagents => agents}/api_key_update_response.py (100%) rename src/gradientai/types/{doagents => agents}/api_link_knowledge_base_output.py (100%) rename src/gradientai/types/{doagents => agents}/api_links.py (100%) rename src/gradientai/types/{doagents => agents}/api_meta.py (100%) rename src/gradientai/types/{doagents => agents}/child_agent_add_params.py (100%) rename src/gradientai/types/{doagents => agents}/child_agent_add_response.py (100%) rename src/gradientai/types/{doagents => agents}/child_agent_delete_response.py (100%) rename src/gradientai/types/{doagents => agents}/child_agent_update_params.py (100%) rename src/gradientai/types/{doagents => agents}/child_agent_update_response.py (100%) rename src/gradientai/types/{doagents => agents}/child_agent_view_response.py (100%) rename src/gradientai/types/{doagents => agents}/function_create_params.py (100%) rename src/gradientai/types/{doagents => agents}/function_create_response.py (100%) rename src/gradientai/types/{doagents => agents}/function_delete_response.py (100%) rename src/gradientai/types/{doagents => agents}/function_update_params.py (100%) rename src/gradientai/types/{doagents => agents}/function_update_response.py (100%) rename src/gradientai/types/{doagents => agents}/knowledge_base_detach_response.py (100%) rename src/gradientai/types/{doagents => agents}/version_list_params.py (100%) rename src/gradientai/types/{doagents => agents}/version_list_response.py (100%) rename src/gradientai/types/{doagents => agents}/version_update_params.py (100%) rename src/gradientai/types/{doagents => agents}/version_update_response.py (100%) delete mode 100644 src/gradientai/types/doagent_update_status_response.py rename tests/api_resources/{doagents => agents}/__init__.py (100%) rename tests/api_resources/{doagents => agents}/test_api_keys.py (84%) rename tests/api_resources/{doagents => agents}/test_child_agents.py (84%) rename tests/api_resources/{doagents => agents}/test_functions.py (85%) rename tests/api_resources/{doagents => agents}/test_knowledge_bases.py (82%) rename tests/api_resources/{doagents => agents}/test_versions.py (84%) rename tests/api_resources/{test_doagents.py => test_agents.py} (66%) diff --git a/.stats.yml b/.stats.yml index 0e1ae316..8f85d58c 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 58 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 -config_hash: c424a9395cc2b0dbf298813e54562194 +config_hash: a2c4c23eb1e8655fbfb2b6930ce0fd46 diff --git a/README.md b/README.md index d047f658..36edcfbd 100644 --- a/README.md +++ b/README.md @@ -33,7 +33,7 @@ client = GradientAI( ), # This is the default and can be omitted ) -versions = client.doagents.versions.list( +versions = client.agents.versions.list( uuid="REPLACE_ME", ) print(versions.agent_versions) @@ -61,7 +61,7 @@ client = AsyncGradientAI( async def main() -> None: - versions = await client.doagents.versions.list( + versions = await client.agents.versions.list( uuid="REPLACE_ME", ) print(versions.agent_versions) @@ -113,7 +113,7 @@ from gradientai import GradientAI client = GradientAI() try: - client.doagents.versions.list( + client.agents.versions.list( uuid="REPLACE_ME", ) except gradientai.APIConnectionError as e: @@ -158,7 +158,7 @@ client = GradientAI( ) # Or, configure per-request: -client.with_options(max_retries=5).doagents.versions.list( +client.with_options(max_retries=5).agents.versions.list( uuid="REPLACE_ME", ) ``` @@ -183,7 +183,7 @@ client = GradientAI( ) # Override per-request: -client.with_options(timeout=5.0).doagents.versions.list( +client.with_options(timeout=5.0).agents.versions.list( uuid="REPLACE_ME", ) ``` @@ -226,12 +226,12 @@ The "raw" Response object can be accessed by prefixing `.with_raw_response.` to from gradientai import GradientAI client = GradientAI() -response = client.doagents.versions.with_raw_response.list( +response = client.agents.versions.with_raw_response.list( uuid="REPLACE_ME", ) print(response.headers.get('X-My-Header')) -version = response.parse() # get the object that `doagents.versions.list()` would have returned +version = response.parse() # get the object that `agents.versions.list()` would have returned print(version.agent_versions) ``` @@ -246,7 +246,7 @@ The above interface eagerly reads the full response body when you make the reque To stream the response body, use `.with_streaming_response` instead, which requires a context manager and only reads the response body once you call `.read()`, `.text()`, `.json()`, `.iter_bytes()`, `.iter_text()`, `.iter_lines()` or `.parse()`. In the async client, these are async methods. ```python -with client.doagents.versions.with_streaming_response.list( +with client.agents.versions.with_streaming_response.list( uuid="REPLACE_ME", ) as response: print(response.headers.get("X-My-Header")) diff --git a/api.md b/api.md index 0bc41bbe..2376a11f 100644 --- a/api.md +++ b/api.md @@ -1,4 +1,4 @@ -# Doagents +# Agents Types: @@ -10,30 +10,30 @@ from gradientai.types import ( APIDeploymentVisibility, APIOpenAIAPIKeyInfo, APIRetrievalMethod, - DoagentCreateResponse, - DoagentRetrieveResponse, - DoagentUpdateResponse, - DoagentListResponse, - DoagentDeleteResponse, - DoagentUpdateStatusResponse, + AgentCreateResponse, + AgentRetrieveResponse, + AgentUpdateResponse, + AgentListResponse, + AgentDeleteResponse, + AgentUpdateStatusResponse, ) ``` Methods: -- client.doagents.create(\*\*params) -> DoagentCreateResponse -- client.doagents.retrieve(uuid) -> DoagentRetrieveResponse -- client.doagents.update(path_uuid, \*\*params) -> DoagentUpdateResponse -- client.doagents.list(\*\*params) -> DoagentListResponse -- client.doagents.delete(uuid) -> DoagentDeleteResponse -- client.doagents.update_status(path_uuid, \*\*params) -> DoagentUpdateStatusResponse +- client.agents.create(\*\*params) -> AgentCreateResponse +- client.agents.retrieve(uuid) -> AgentRetrieveResponse +- client.agents.update(path_uuid, \*\*params) -> AgentUpdateResponse +- client.agents.list(\*\*params) -> AgentListResponse +- client.agents.delete(uuid) -> AgentDeleteResponse +- client.agents.update_status(path_uuid, \*\*params) -> AgentUpdateStatusResponse ## APIKeys Types: ```python -from gradientai.types.doagents import ( +from gradientai.types.agents import ( APIKeyCreateResponse, APIKeyUpdateResponse, APIKeyListResponse, @@ -44,18 +44,18 @@ from gradientai.types.doagents import ( Methods: -- client.doagents.api_keys.create(path_agent_uuid, \*\*params) -> APIKeyCreateResponse -- client.doagents.api_keys.update(path_api_key_uuid, \*, path_agent_uuid, \*\*params) -> APIKeyUpdateResponse -- client.doagents.api_keys.list(agent_uuid, \*\*params) -> APIKeyListResponse -- client.doagents.api_keys.delete(api_key_uuid, \*, agent_uuid) -> APIKeyDeleteResponse -- client.doagents.api_keys.regenerate(api_key_uuid, \*, agent_uuid) -> APIKeyRegenerateResponse +- client.agents.api_keys.create(path_agent_uuid, \*\*params) -> APIKeyCreateResponse +- client.agents.api_keys.update(path_api_key_uuid, \*, path_agent_uuid, \*\*params) -> APIKeyUpdateResponse +- client.agents.api_keys.list(agent_uuid, \*\*params) -> APIKeyListResponse +- client.agents.api_keys.delete(api_key_uuid, \*, agent_uuid) -> APIKeyDeleteResponse +- client.agents.api_keys.regenerate(api_key_uuid, \*, agent_uuid) -> APIKeyRegenerateResponse ## Functions Types: ```python -from gradientai.types.doagents import ( +from gradientai.types.agents import ( FunctionCreateResponse, FunctionUpdateResponse, FunctionDeleteResponse, @@ -64,43 +64,43 @@ from gradientai.types.doagents import ( Methods: -- client.doagents.functions.create(path_agent_uuid, \*\*params) -> FunctionCreateResponse -- client.doagents.functions.update(path_function_uuid, \*, path_agent_uuid, \*\*params) -> FunctionUpdateResponse -- client.doagents.functions.delete(function_uuid, \*, agent_uuid) -> FunctionDeleteResponse +- client.agents.functions.create(path_agent_uuid, \*\*params) -> FunctionCreateResponse +- client.agents.functions.update(path_function_uuid, \*, path_agent_uuid, \*\*params) -> FunctionUpdateResponse +- client.agents.functions.delete(function_uuid, \*, agent_uuid) -> FunctionDeleteResponse ## Versions Types: ```python -from gradientai.types.doagents import APILinks, APIMeta, VersionUpdateResponse, VersionListResponse +from gradientai.types.agents import APILinks, APIMeta, VersionUpdateResponse, VersionListResponse ``` Methods: -- client.doagents.versions.update(path_uuid, \*\*params) -> VersionUpdateResponse -- client.doagents.versions.list(uuid, \*\*params) -> VersionListResponse +- client.agents.versions.update(path_uuid, \*\*params) -> VersionUpdateResponse +- client.agents.versions.list(uuid, \*\*params) -> VersionListResponse ## KnowledgeBases Types: ```python -from gradientai.types.doagents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse +from gradientai.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse ``` Methods: -- client.doagents.knowledge_bases.attach(agent_uuid) -> APILinkKnowledgeBaseOutput -- client.doagents.knowledge_bases.attach_single(knowledge_base_uuid, \*, agent_uuid) -> APILinkKnowledgeBaseOutput -- client.doagents.knowledge_bases.detach(knowledge_base_uuid, \*, agent_uuid) -> KnowledgeBaseDetachResponse +- client.agents.knowledge_bases.attach(agent_uuid) -> APILinkKnowledgeBaseOutput +- client.agents.knowledge_bases.attach_single(knowledge_base_uuid, \*, agent_uuid) -> APILinkKnowledgeBaseOutput +- client.agents.knowledge_bases.detach(knowledge_base_uuid, \*, agent_uuid) -> KnowledgeBaseDetachResponse ## ChildAgents Types: ```python -from gradientai.types.doagents import ( +from gradientai.types.agents import ( ChildAgentUpdateResponse, ChildAgentDeleteResponse, ChildAgentAddResponse, @@ -110,10 +110,10 @@ from gradientai.types.doagents import ( Methods: -- client.doagents.child_agents.update(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> ChildAgentUpdateResponse -- client.doagents.child_agents.delete(child_agent_uuid, \*, parent_agent_uuid) -> ChildAgentDeleteResponse -- client.doagents.child_agents.add(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> ChildAgentAddResponse -- client.doagents.child_agents.view(uuid) -> ChildAgentViewResponse +- client.agents.child_agents.update(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> ChildAgentUpdateResponse +- client.agents.child_agents.delete(child_agent_uuid, \*, parent_agent_uuid) -> ChildAgentDeleteResponse +- client.agents.child_agents.add(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> ChildAgentAddResponse +- client.agents.child_agents.view(uuid) -> ChildAgentViewResponse # Providers diff --git a/src/gradientai/_client.py b/src/gradientai/_client.py index 992559a2..0a5eb9a1 100644 --- a/src/gradientai/_client.py +++ b/src/gradientai/_client.py @@ -31,12 +31,12 @@ ) if TYPE_CHECKING: - from .resources import chat, models, regions, doagents, inference, providers, indexing_jobs, knowledge_bases + from .resources import chat, agents, models, regions, inference, providers, indexing_jobs, knowledge_bases from .resources.models import ModelsResource, AsyncModelsResource from .resources.regions import RegionsResource, AsyncRegionsResource from .resources.chat.chat import ChatResource, AsyncChatResource + from .resources.agents.agents import AgentsResource, AsyncAgentsResource from .resources.indexing_jobs import IndexingJobsResource, AsyncIndexingJobsResource - from .resources.doagents.doagents import DoagentsResource, AsyncDoagentsResource from .resources.inference.inference import InferenceResource, AsyncInferenceResource from .resources.providers.providers import ProvidersResource, AsyncProvidersResource from .resources.knowledge_bases.knowledge_bases import KnowledgeBasesResource, AsyncKnowledgeBasesResource @@ -110,10 +110,10 @@ def __init__( ) @cached_property - def doagents(self) -> DoagentsResource: - from .resources.doagents import DoagentsResource + def agents(self) -> AgentsResource: + from .resources.agents import AgentsResource - return DoagentsResource(self) + return AgentsResource(self) @cached_property def providers(self) -> ProvidersResource: @@ -329,10 +329,10 @@ def __init__( ) @cached_property - def doagents(self) -> AsyncDoagentsResource: - from .resources.doagents import AsyncDoagentsResource + def agents(self) -> AsyncAgentsResource: + from .resources.agents import AsyncAgentsResource - return AsyncDoagentsResource(self) + return AsyncAgentsResource(self) @cached_property def providers(self) -> AsyncProvidersResource: @@ -498,10 +498,10 @@ def __init__(self, client: GradientAI) -> None: self._client = client @cached_property - def doagents(self) -> doagents.DoagentsResourceWithRawResponse: - from .resources.doagents import DoagentsResourceWithRawResponse + def agents(self) -> agents.AgentsResourceWithRawResponse: + from .resources.agents import AgentsResourceWithRawResponse - return DoagentsResourceWithRawResponse(self._client.doagents) + return AgentsResourceWithRawResponse(self._client.agents) @cached_property def providers(self) -> providers.ProvidersResourceWithRawResponse: @@ -553,10 +553,10 @@ def __init__(self, client: AsyncGradientAI) -> None: self._client = client @cached_property - def doagents(self) -> doagents.AsyncDoagentsResourceWithRawResponse: - from .resources.doagents import AsyncDoagentsResourceWithRawResponse + def agents(self) -> agents.AsyncAgentsResourceWithRawResponse: + from .resources.agents import AsyncAgentsResourceWithRawResponse - return AsyncDoagentsResourceWithRawResponse(self._client.doagents) + return AsyncAgentsResourceWithRawResponse(self._client.agents) @cached_property def providers(self) -> providers.AsyncProvidersResourceWithRawResponse: @@ -608,10 +608,10 @@ def __init__(self, client: GradientAI) -> None: self._client = client @cached_property - def doagents(self) -> doagents.DoagentsResourceWithStreamingResponse: - from .resources.doagents import DoagentsResourceWithStreamingResponse + def agents(self) -> agents.AgentsResourceWithStreamingResponse: + from .resources.agents import AgentsResourceWithStreamingResponse - return DoagentsResourceWithStreamingResponse(self._client.doagents) + return AgentsResourceWithStreamingResponse(self._client.agents) @cached_property def providers(self) -> providers.ProvidersResourceWithStreamingResponse: @@ -663,10 +663,10 @@ def __init__(self, client: AsyncGradientAI) -> None: self._client = client @cached_property - def doagents(self) -> doagents.AsyncDoagentsResourceWithStreamingResponse: - from .resources.doagents import AsyncDoagentsResourceWithStreamingResponse + def agents(self) -> agents.AsyncAgentsResourceWithStreamingResponse: + from .resources.agents import AsyncAgentsResourceWithStreamingResponse - return AsyncDoagentsResourceWithStreamingResponse(self._client.doagents) + return AsyncAgentsResourceWithStreamingResponse(self._client.agents) @cached_property def providers(self) -> providers.AsyncProvidersResourceWithStreamingResponse: diff --git a/src/gradientai/resources/__init__.py b/src/gradientai/resources/__init__.py index 17791967..1763a13e 100644 --- a/src/gradientai/resources/__init__.py +++ b/src/gradientai/resources/__init__.py @@ -8,6 +8,14 @@ ChatResourceWithStreamingResponse, AsyncChatResourceWithStreamingResponse, ) +from .agents import ( + AgentsResource, + AsyncAgentsResource, + AgentsResourceWithRawResponse, + AsyncAgentsResourceWithRawResponse, + AgentsResourceWithStreamingResponse, + AsyncAgentsResourceWithStreamingResponse, +) from .models import ( ModelsResource, AsyncModelsResource, @@ -24,14 +32,6 @@ RegionsResourceWithStreamingResponse, AsyncRegionsResourceWithStreamingResponse, ) -from .doagents import ( - DoagentsResource, - AsyncDoagentsResource, - DoagentsResourceWithRawResponse, - AsyncDoagentsResourceWithRawResponse, - DoagentsResourceWithStreamingResponse, - AsyncDoagentsResourceWithStreamingResponse, -) from .inference import ( InferenceResource, AsyncInferenceResource, @@ -66,12 +66,12 @@ ) __all__ = [ - "DoagentsResource", - "AsyncDoagentsResource", - "DoagentsResourceWithRawResponse", - "AsyncDoagentsResourceWithRawResponse", - "DoagentsResourceWithStreamingResponse", - "AsyncDoagentsResourceWithStreamingResponse", + "AgentsResource", + "AsyncAgentsResource", + "AgentsResourceWithRawResponse", + "AsyncAgentsResourceWithRawResponse", + "AgentsResourceWithStreamingResponse", + "AsyncAgentsResourceWithStreamingResponse", "ProvidersResource", "AsyncProvidersResource", "ProvidersResourceWithRawResponse", diff --git a/src/gradientai/resources/doagents/__init__.py b/src/gradientai/resources/agents/__init__.py similarity index 84% rename from src/gradientai/resources/doagents/__init__.py rename to src/gradientai/resources/agents/__init__.py index 5ee3485f..f41a0408 100644 --- a/src/gradientai/resources/doagents/__init__.py +++ b/src/gradientai/resources/agents/__init__.py @@ -1,5 +1,13 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +from .agents import ( + AgentsResource, + AsyncAgentsResource, + AgentsResourceWithRawResponse, + AsyncAgentsResourceWithRawResponse, + AgentsResourceWithStreamingResponse, + AsyncAgentsResourceWithStreamingResponse, +) from .api_keys import ( APIKeysResource, AsyncAPIKeysResource, @@ -8,14 +16,6 @@ APIKeysResourceWithStreamingResponse, AsyncAPIKeysResourceWithStreamingResponse, ) -from .doagents import ( - DoagentsResource, - AsyncDoagentsResource, - DoagentsResourceWithRawResponse, - AsyncDoagentsResourceWithRawResponse, - DoagentsResourceWithStreamingResponse, - AsyncDoagentsResourceWithStreamingResponse, -) from .versions import ( VersionsResource, AsyncVersionsResource, @@ -80,10 +80,10 @@ "AsyncChildAgentsResourceWithRawResponse", "ChildAgentsResourceWithStreamingResponse", "AsyncChildAgentsResourceWithStreamingResponse", - "DoagentsResource", - "AsyncDoagentsResource", - "DoagentsResourceWithRawResponse", - "AsyncDoagentsResourceWithRawResponse", - "DoagentsResourceWithStreamingResponse", - "AsyncDoagentsResourceWithStreamingResponse", + "AgentsResource", + "AsyncAgentsResource", + "AgentsResourceWithRawResponse", + "AsyncAgentsResourceWithRawResponse", + "AgentsResourceWithStreamingResponse", + "AsyncAgentsResourceWithStreamingResponse", ] diff --git a/src/gradientai/resources/doagents/doagents.py b/src/gradientai/resources/agents/agents.py similarity index 87% rename from src/gradientai/resources/doagents/doagents.py rename to src/gradientai/resources/agents/agents.py index 89951704..78439d33 100644 --- a/src/gradientai/resources/doagents/doagents.py +++ b/src/gradientai/resources/agents/agents.py @@ -9,10 +9,10 @@ from ...types import ( APIRetrievalMethod, APIDeploymentVisibility, - doagent_list_params, - doagent_create_params, - doagent_update_params, - doagent_update_status_params, + agent_list_params, + agent_create_params, + agent_update_params, + agent_update_status_params, ) from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven from ..._utils import maybe_transform, async_maybe_transform @@ -65,19 +65,19 @@ KnowledgeBasesResourceWithStreamingResponse, AsyncKnowledgeBasesResourceWithStreamingResponse, ) +from ...types.agent_list_response import AgentListResponse from ...types.api_retrieval_method import APIRetrievalMethod -from ...types.doagent_list_response import DoagentListResponse -from ...types.doagent_create_response import DoagentCreateResponse -from ...types.doagent_delete_response import DoagentDeleteResponse -from ...types.doagent_update_response import DoagentUpdateResponse +from ...types.agent_create_response import AgentCreateResponse +from ...types.agent_delete_response import AgentDeleteResponse +from ...types.agent_update_response import AgentUpdateResponse +from ...types.agent_retrieve_response import AgentRetrieveResponse from ...types.api_deployment_visibility import APIDeploymentVisibility -from ...types.doagent_retrieve_response import DoagentRetrieveResponse -from ...types.doagent_update_status_response import DoagentUpdateStatusResponse +from ...types.agent_update_status_response import AgentUpdateStatusResponse -__all__ = ["DoagentsResource", "AsyncDoagentsResource"] +__all__ = ["AgentsResource", "AsyncAgentsResource"] -class DoagentsResource(SyncAPIResource): +class AgentsResource(SyncAPIResource): @cached_property def api_keys(self) -> APIKeysResource: return APIKeysResource(self._client) @@ -99,23 +99,23 @@ def child_agents(self) -> ChildAgentsResource: return ChildAgentsResource(self._client) @cached_property - def with_raw_response(self) -> DoagentsResourceWithRawResponse: + def with_raw_response(self) -> AgentsResourceWithRawResponse: """ This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ - return DoagentsResourceWithRawResponse(self) + return AgentsResourceWithRawResponse(self) @cached_property - def with_streaming_response(self) -> DoagentsResourceWithStreamingResponse: + def with_streaming_response(self) -> AgentsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ - return DoagentsResourceWithStreamingResponse(self) + return AgentsResourceWithStreamingResponse(self) def create( self, @@ -136,7 +136,7 @@ def create( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DoagentCreateResponse: + ) -> AgentCreateResponse: """To create a new agent, send a POST request to `/v2/gen-ai/agents`. The response @@ -175,12 +175,12 @@ def create( "region": region, "tags": tags, }, - doagent_create_params.DoagentCreateParams, + agent_create_params.AgentCreateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=DoagentCreateResponse, + cast_to=AgentCreateResponse, ) def retrieve( @@ -193,7 +193,7 @@ def retrieve( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DoagentRetrieveResponse: + ) -> AgentRetrieveResponse: """To retrieve details of an agent, GET request to `/v2/gen-ai/agents/{uuid}`. The @@ -217,7 +217,7 @@ def retrieve( options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=DoagentRetrieveResponse, + cast_to=AgentRetrieveResponse, ) def update( @@ -245,7 +245,7 @@ def update( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DoagentUpdateResponse: + ) -> AgentUpdateResponse: """To update an agent, send a PUT request to `/v2/gen-ai/agents/{uuid}`. The @@ -303,12 +303,12 @@ def update( "top_p": top_p, "body_uuid": body_uuid, }, - doagent_update_params.DoagentUpdateParams, + agent_update_params.AgentUpdateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=DoagentUpdateResponse, + cast_to=AgentUpdateResponse, ) def list( @@ -323,7 +323,7 @@ def list( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DoagentListResponse: + ) -> AgentListResponse: """ To list all agents, send a GET request to `/v2/gen-ai/agents`. @@ -357,10 +357,10 @@ def list( "page": page, "per_page": per_page, }, - doagent_list_params.DoagentListParams, + agent_list_params.AgentListParams, ), ), - cast_to=DoagentListResponse, + cast_to=AgentListResponse, ) def delete( @@ -373,7 +373,7 @@ def delete( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DoagentDeleteResponse: + ) -> AgentDeleteResponse: """ To delete an agent, send a DELETE request to `/v2/gen-ai/agents/{uuid}`. @@ -395,7 +395,7 @@ def delete( options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=DoagentDeleteResponse, + cast_to=AgentDeleteResponse, ) def update_status( @@ -410,7 +410,7 @@ def update_status( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DoagentUpdateStatusResponse: + ) -> AgentUpdateStatusResponse: """Check whether an agent is public or private. To update the agent status, send a @@ -436,16 +436,16 @@ def update_status( "body_uuid": body_uuid, "visibility": visibility, }, - doagent_update_status_params.DoagentUpdateStatusParams, + agent_update_status_params.AgentUpdateStatusParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=DoagentUpdateStatusResponse, + cast_to=AgentUpdateStatusResponse, ) -class AsyncDoagentsResource(AsyncAPIResource): +class AsyncAgentsResource(AsyncAPIResource): @cached_property def api_keys(self) -> AsyncAPIKeysResource: return AsyncAPIKeysResource(self._client) @@ -467,23 +467,23 @@ def child_agents(self) -> AsyncChildAgentsResource: return AsyncChildAgentsResource(self._client) @cached_property - def with_raw_response(self) -> AsyncDoagentsResourceWithRawResponse: + def with_raw_response(self) -> AsyncAgentsResourceWithRawResponse: """ This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers """ - return AsyncDoagentsResourceWithRawResponse(self) + return AsyncAgentsResourceWithRawResponse(self) @cached_property - def with_streaming_response(self) -> AsyncDoagentsResourceWithStreamingResponse: + def with_streaming_response(self) -> AsyncAgentsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response """ - return AsyncDoagentsResourceWithStreamingResponse(self) + return AsyncAgentsResourceWithStreamingResponse(self) async def create( self, @@ -504,7 +504,7 @@ async def create( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DoagentCreateResponse: + ) -> AgentCreateResponse: """To create a new agent, send a POST request to `/v2/gen-ai/agents`. The response @@ -543,12 +543,12 @@ async def create( "region": region, "tags": tags, }, - doagent_create_params.DoagentCreateParams, + agent_create_params.AgentCreateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=DoagentCreateResponse, + cast_to=AgentCreateResponse, ) async def retrieve( @@ -561,7 +561,7 @@ async def retrieve( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DoagentRetrieveResponse: + ) -> AgentRetrieveResponse: """To retrieve details of an agent, GET request to `/v2/gen-ai/agents/{uuid}`. The @@ -585,7 +585,7 @@ async def retrieve( options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=DoagentRetrieveResponse, + cast_to=AgentRetrieveResponse, ) async def update( @@ -613,7 +613,7 @@ async def update( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DoagentUpdateResponse: + ) -> AgentUpdateResponse: """To update an agent, send a PUT request to `/v2/gen-ai/agents/{uuid}`. The @@ -671,12 +671,12 @@ async def update( "top_p": top_p, "body_uuid": body_uuid, }, - doagent_update_params.DoagentUpdateParams, + agent_update_params.AgentUpdateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=DoagentUpdateResponse, + cast_to=AgentUpdateResponse, ) async def list( @@ -691,7 +691,7 @@ async def list( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DoagentListResponse: + ) -> AgentListResponse: """ To list all agents, send a GET request to `/v2/gen-ai/agents`. @@ -725,10 +725,10 @@ async def list( "page": page, "per_page": per_page, }, - doagent_list_params.DoagentListParams, + agent_list_params.AgentListParams, ), ), - cast_to=DoagentListResponse, + cast_to=AgentListResponse, ) async def delete( @@ -741,7 +741,7 @@ async def delete( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DoagentDeleteResponse: + ) -> AgentDeleteResponse: """ To delete an agent, send a DELETE request to `/v2/gen-ai/agents/{uuid}`. @@ -763,7 +763,7 @@ async def delete( options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=DoagentDeleteResponse, + cast_to=AgentDeleteResponse, ) async def update_status( @@ -778,7 +778,7 @@ async def update_status( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DoagentUpdateStatusResponse: + ) -> AgentUpdateStatusResponse: """Check whether an agent is public or private. To update the agent status, send a @@ -804,186 +804,186 @@ async def update_status( "body_uuid": body_uuid, "visibility": visibility, }, - doagent_update_status_params.DoagentUpdateStatusParams, + agent_update_status_params.AgentUpdateStatusParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), - cast_to=DoagentUpdateStatusResponse, + cast_to=AgentUpdateStatusResponse, ) -class DoagentsResourceWithRawResponse: - def __init__(self, doagents: DoagentsResource) -> None: - self._doagents = doagents +class AgentsResourceWithRawResponse: + def __init__(self, agents: AgentsResource) -> None: + self._agents = agents self.create = to_raw_response_wrapper( - doagents.create, + agents.create, ) self.retrieve = to_raw_response_wrapper( - doagents.retrieve, + agents.retrieve, ) self.update = to_raw_response_wrapper( - doagents.update, + agents.update, ) self.list = to_raw_response_wrapper( - doagents.list, + agents.list, ) self.delete = to_raw_response_wrapper( - doagents.delete, + agents.delete, ) self.update_status = to_raw_response_wrapper( - doagents.update_status, + agents.update_status, ) @cached_property def api_keys(self) -> APIKeysResourceWithRawResponse: - return APIKeysResourceWithRawResponse(self._doagents.api_keys) + return APIKeysResourceWithRawResponse(self._agents.api_keys) @cached_property def functions(self) -> FunctionsResourceWithRawResponse: - return FunctionsResourceWithRawResponse(self._doagents.functions) + return FunctionsResourceWithRawResponse(self._agents.functions) @cached_property def versions(self) -> VersionsResourceWithRawResponse: - return VersionsResourceWithRawResponse(self._doagents.versions) + return VersionsResourceWithRawResponse(self._agents.versions) @cached_property def knowledge_bases(self) -> KnowledgeBasesResourceWithRawResponse: - return KnowledgeBasesResourceWithRawResponse(self._doagents.knowledge_bases) + return KnowledgeBasesResourceWithRawResponse(self._agents.knowledge_bases) @cached_property def child_agents(self) -> ChildAgentsResourceWithRawResponse: - return ChildAgentsResourceWithRawResponse(self._doagents.child_agents) + return ChildAgentsResourceWithRawResponse(self._agents.child_agents) -class AsyncDoagentsResourceWithRawResponse: - def __init__(self, doagents: AsyncDoagentsResource) -> None: - self._doagents = doagents +class AsyncAgentsResourceWithRawResponse: + def __init__(self, agents: AsyncAgentsResource) -> None: + self._agents = agents self.create = async_to_raw_response_wrapper( - doagents.create, + agents.create, ) self.retrieve = async_to_raw_response_wrapper( - doagents.retrieve, + agents.retrieve, ) self.update = async_to_raw_response_wrapper( - doagents.update, + agents.update, ) self.list = async_to_raw_response_wrapper( - doagents.list, + agents.list, ) self.delete = async_to_raw_response_wrapper( - doagents.delete, + agents.delete, ) self.update_status = async_to_raw_response_wrapper( - doagents.update_status, + agents.update_status, ) @cached_property def api_keys(self) -> AsyncAPIKeysResourceWithRawResponse: - return AsyncAPIKeysResourceWithRawResponse(self._doagents.api_keys) + return AsyncAPIKeysResourceWithRawResponse(self._agents.api_keys) @cached_property def functions(self) -> AsyncFunctionsResourceWithRawResponse: - return AsyncFunctionsResourceWithRawResponse(self._doagents.functions) + return AsyncFunctionsResourceWithRawResponse(self._agents.functions) @cached_property def versions(self) -> AsyncVersionsResourceWithRawResponse: - return AsyncVersionsResourceWithRawResponse(self._doagents.versions) + return AsyncVersionsResourceWithRawResponse(self._agents.versions) @cached_property def knowledge_bases(self) -> AsyncKnowledgeBasesResourceWithRawResponse: - return AsyncKnowledgeBasesResourceWithRawResponse(self._doagents.knowledge_bases) + return AsyncKnowledgeBasesResourceWithRawResponse(self._agents.knowledge_bases) @cached_property def child_agents(self) -> AsyncChildAgentsResourceWithRawResponse: - return AsyncChildAgentsResourceWithRawResponse(self._doagents.child_agents) + return AsyncChildAgentsResourceWithRawResponse(self._agents.child_agents) -class DoagentsResourceWithStreamingResponse: - def __init__(self, doagents: DoagentsResource) -> None: - self._doagents = doagents +class AgentsResourceWithStreamingResponse: + def __init__(self, agents: AgentsResource) -> None: + self._agents = agents self.create = to_streamed_response_wrapper( - doagents.create, + agents.create, ) self.retrieve = to_streamed_response_wrapper( - doagents.retrieve, + agents.retrieve, ) self.update = to_streamed_response_wrapper( - doagents.update, + agents.update, ) self.list = to_streamed_response_wrapper( - doagents.list, + agents.list, ) self.delete = to_streamed_response_wrapper( - doagents.delete, + agents.delete, ) self.update_status = to_streamed_response_wrapper( - doagents.update_status, + agents.update_status, ) @cached_property def api_keys(self) -> APIKeysResourceWithStreamingResponse: - return APIKeysResourceWithStreamingResponse(self._doagents.api_keys) + return APIKeysResourceWithStreamingResponse(self._agents.api_keys) @cached_property def functions(self) -> FunctionsResourceWithStreamingResponse: - return FunctionsResourceWithStreamingResponse(self._doagents.functions) + return FunctionsResourceWithStreamingResponse(self._agents.functions) @cached_property def versions(self) -> VersionsResourceWithStreamingResponse: - return VersionsResourceWithStreamingResponse(self._doagents.versions) + return VersionsResourceWithStreamingResponse(self._agents.versions) @cached_property def knowledge_bases(self) -> KnowledgeBasesResourceWithStreamingResponse: - return KnowledgeBasesResourceWithStreamingResponse(self._doagents.knowledge_bases) + return KnowledgeBasesResourceWithStreamingResponse(self._agents.knowledge_bases) @cached_property def child_agents(self) -> ChildAgentsResourceWithStreamingResponse: - return ChildAgentsResourceWithStreamingResponse(self._doagents.child_agents) + return ChildAgentsResourceWithStreamingResponse(self._agents.child_agents) -class AsyncDoagentsResourceWithStreamingResponse: - def __init__(self, doagents: AsyncDoagentsResource) -> None: - self._doagents = doagents +class AsyncAgentsResourceWithStreamingResponse: + def __init__(self, agents: AsyncAgentsResource) -> None: + self._agents = agents self.create = async_to_streamed_response_wrapper( - doagents.create, + agents.create, ) self.retrieve = async_to_streamed_response_wrapper( - doagents.retrieve, + agents.retrieve, ) self.update = async_to_streamed_response_wrapper( - doagents.update, + agents.update, ) self.list = async_to_streamed_response_wrapper( - doagents.list, + agents.list, ) self.delete = async_to_streamed_response_wrapper( - doagents.delete, + agents.delete, ) self.update_status = async_to_streamed_response_wrapper( - doagents.update_status, + agents.update_status, ) @cached_property def api_keys(self) -> AsyncAPIKeysResourceWithStreamingResponse: - return AsyncAPIKeysResourceWithStreamingResponse(self._doagents.api_keys) + return AsyncAPIKeysResourceWithStreamingResponse(self._agents.api_keys) @cached_property def functions(self) -> AsyncFunctionsResourceWithStreamingResponse: - return AsyncFunctionsResourceWithStreamingResponse(self._doagents.functions) + return AsyncFunctionsResourceWithStreamingResponse(self._agents.functions) @cached_property def versions(self) -> AsyncVersionsResourceWithStreamingResponse: - return AsyncVersionsResourceWithStreamingResponse(self._doagents.versions) + return AsyncVersionsResourceWithStreamingResponse(self._agents.versions) @cached_property def knowledge_bases(self) -> AsyncKnowledgeBasesResourceWithStreamingResponse: - return AsyncKnowledgeBasesResourceWithStreamingResponse(self._doagents.knowledge_bases) + return AsyncKnowledgeBasesResourceWithStreamingResponse(self._agents.knowledge_bases) @cached_property def child_agents(self) -> AsyncChildAgentsResourceWithStreamingResponse: - return AsyncChildAgentsResourceWithStreamingResponse(self._doagents.child_agents) + return AsyncChildAgentsResourceWithStreamingResponse(self._agents.child_agents) diff --git a/src/gradientai/resources/doagents/api_keys.py b/src/gradientai/resources/agents/api_keys.py similarity index 98% rename from src/gradientai/resources/doagents/api_keys.py rename to src/gradientai/resources/agents/api_keys.py index c55249be..155e3adc 100644 --- a/src/gradientai/resources/doagents/api_keys.py +++ b/src/gradientai/resources/agents/api_keys.py @@ -15,12 +15,12 @@ async_to_streamed_response_wrapper, ) from ..._base_client import make_request_options -from ...types.doagents import api_key_list_params, api_key_create_params, api_key_update_params -from ...types.doagents.api_key_list_response import APIKeyListResponse -from ...types.doagents.api_key_create_response import APIKeyCreateResponse -from ...types.doagents.api_key_delete_response import APIKeyDeleteResponse -from ...types.doagents.api_key_update_response import APIKeyUpdateResponse -from ...types.doagents.api_key_regenerate_response import APIKeyRegenerateResponse +from ...types.agents import api_key_list_params, api_key_create_params, api_key_update_params +from ...types.agents.api_key_list_response import APIKeyListResponse +from ...types.agents.api_key_create_response import APIKeyCreateResponse +from ...types.agents.api_key_delete_response import APIKeyDeleteResponse +from ...types.agents.api_key_update_response import APIKeyUpdateResponse +from ...types.agents.api_key_regenerate_response import APIKeyRegenerateResponse __all__ = ["APIKeysResource", "AsyncAPIKeysResource"] diff --git a/src/gradientai/resources/doagents/child_agents.py b/src/gradientai/resources/agents/child_agents.py similarity index 98% rename from src/gradientai/resources/doagents/child_agents.py rename to src/gradientai/resources/agents/child_agents.py index 6e8abfb7..9031d8ce 100644 --- a/src/gradientai/resources/doagents/child_agents.py +++ b/src/gradientai/resources/agents/child_agents.py @@ -15,11 +15,11 @@ async_to_streamed_response_wrapper, ) from ..._base_client import make_request_options -from ...types.doagents import child_agent_add_params, child_agent_update_params -from ...types.doagents.child_agent_add_response import ChildAgentAddResponse -from ...types.doagents.child_agent_view_response import ChildAgentViewResponse -from ...types.doagents.child_agent_delete_response import ChildAgentDeleteResponse -from ...types.doagents.child_agent_update_response import ChildAgentUpdateResponse +from ...types.agents import child_agent_add_params, child_agent_update_params +from ...types.agents.child_agent_add_response import ChildAgentAddResponse +from ...types.agents.child_agent_view_response import ChildAgentViewResponse +from ...types.agents.child_agent_delete_response import ChildAgentDeleteResponse +from ...types.agents.child_agent_update_response import ChildAgentUpdateResponse __all__ = ["ChildAgentsResource", "AsyncChildAgentsResource"] diff --git a/src/gradientai/resources/doagents/functions.py b/src/gradientai/resources/agents/functions.py similarity index 98% rename from src/gradientai/resources/doagents/functions.py rename to src/gradientai/resources/agents/functions.py index 65ab2801..67a811cc 100644 --- a/src/gradientai/resources/doagents/functions.py +++ b/src/gradientai/resources/agents/functions.py @@ -15,10 +15,10 @@ async_to_streamed_response_wrapper, ) from ..._base_client import make_request_options -from ...types.doagents import function_create_params, function_update_params -from ...types.doagents.function_create_response import FunctionCreateResponse -from ...types.doagents.function_delete_response import FunctionDeleteResponse -from ...types.doagents.function_update_response import FunctionUpdateResponse +from ...types.agents import function_create_params, function_update_params +from ...types.agents.function_create_response import FunctionCreateResponse +from ...types.agents.function_delete_response import FunctionDeleteResponse +from ...types.agents.function_update_response import FunctionUpdateResponse __all__ = ["FunctionsResource", "AsyncFunctionsResource"] diff --git a/src/gradientai/resources/doagents/knowledge_bases.py b/src/gradientai/resources/agents/knowledge_bases.py similarity index 98% rename from src/gradientai/resources/doagents/knowledge_bases.py rename to src/gradientai/resources/agents/knowledge_bases.py index e806d7a2..3b9b0cd2 100644 --- a/src/gradientai/resources/doagents/knowledge_bases.py +++ b/src/gradientai/resources/agents/knowledge_bases.py @@ -14,8 +14,8 @@ async_to_streamed_response_wrapper, ) from ..._base_client import make_request_options -from ...types.doagents.api_link_knowledge_base_output import APILinkKnowledgeBaseOutput -from ...types.doagents.knowledge_base_detach_response import KnowledgeBaseDetachResponse +from ...types.agents.api_link_knowledge_base_output import APILinkKnowledgeBaseOutput +from ...types.agents.knowledge_base_detach_response import KnowledgeBaseDetachResponse __all__ = ["KnowledgeBasesResource", "AsyncKnowledgeBasesResource"] diff --git a/src/gradientai/resources/doagents/versions.py b/src/gradientai/resources/agents/versions.py similarity index 98% rename from src/gradientai/resources/doagents/versions.py rename to src/gradientai/resources/agents/versions.py index 6301bc0a..86dbf99f 100644 --- a/src/gradientai/resources/doagents/versions.py +++ b/src/gradientai/resources/agents/versions.py @@ -15,9 +15,9 @@ async_to_streamed_response_wrapper, ) from ..._base_client import make_request_options -from ...types.doagents import version_list_params, version_update_params -from ...types.doagents.version_list_response import VersionListResponse -from ...types.doagents.version_update_response import VersionUpdateResponse +from ...types.agents import version_list_params, version_update_params +from ...types.agents.version_list_response import VersionListResponse +from ...types.agents.version_update_response import VersionUpdateResponse __all__ = ["VersionsResource", "AsyncVersionsResource"] diff --git a/src/gradientai/types/__init__.py b/src/gradientai/types/__init__.py index 09d071f0..5ee961c6 100644 --- a/src/gradientai/types/__init__.py +++ b/src/gradientai/types/__init__.py @@ -5,34 +5,34 @@ from .api_agent import APIAgent as APIAgent from .api_model import APIModel as APIModel from .api_indexing_job import APIIndexingJob as APIIndexingJob +from .agent_list_params import AgentListParams as AgentListParams from .model_list_params import ModelListParams as ModelListParams from .api_knowledge_base import APIKnowledgeBase as APIKnowledgeBase from .region_list_params import RegionListParams as RegionListParams -from .doagent_list_params import DoagentListParams as DoagentListParams +from .agent_create_params import AgentCreateParams as AgentCreateParams +from .agent_list_response import AgentListResponse as AgentListResponse +from .agent_update_params import AgentUpdateParams as AgentUpdateParams from .model_list_response import ModelListResponse as ModelListResponse from .api_retrieval_method import APIRetrievalMethod as APIRetrievalMethod from .region_list_response import RegionListResponse as RegionListResponse -from .doagent_create_params import DoagentCreateParams as DoagentCreateParams -from .doagent_list_response import DoagentListResponse as DoagentListResponse -from .doagent_update_params import DoagentUpdateParams as DoagentUpdateParams +from .agent_create_response import AgentCreateResponse as AgentCreateResponse +from .agent_delete_response import AgentDeleteResponse as AgentDeleteResponse +from .agent_update_response import AgentUpdateResponse as AgentUpdateResponse from .api_agent_api_key_info import APIAgentAPIKeyInfo as APIAgentAPIKeyInfo +from .agent_retrieve_response import AgentRetrieveResponse as AgentRetrieveResponse from .api_openai_api_key_info import APIOpenAIAPIKeyInfo as APIOpenAIAPIKeyInfo -from .doagent_create_response import DoagentCreateResponse as DoagentCreateResponse -from .doagent_delete_response import DoagentDeleteResponse as DoagentDeleteResponse -from .doagent_update_response import DoagentUpdateResponse as DoagentUpdateResponse from .indexing_job_list_params import IndexingJobListParams as IndexingJobListParams from .api_deployment_visibility import APIDeploymentVisibility as APIDeploymentVisibility -from .doagent_retrieve_response import DoagentRetrieveResponse as DoagentRetrieveResponse +from .agent_update_status_params import AgentUpdateStatusParams as AgentUpdateStatusParams from .api_anthropic_api_key_info import APIAnthropicAPIKeyInfo as APIAnthropicAPIKeyInfo from .indexing_job_create_params import IndexingJobCreateParams as IndexingJobCreateParams from .indexing_job_list_response import IndexingJobListResponse as IndexingJobListResponse from .knowledge_base_list_params import KnowledgeBaseListParams as KnowledgeBaseListParams -from .doagent_update_status_params import DoagentUpdateStatusParams as DoagentUpdateStatusParams +from .agent_update_status_response import AgentUpdateStatusResponse as AgentUpdateStatusResponse from .indexing_job_create_response import IndexingJobCreateResponse as IndexingJobCreateResponse from .knowledge_base_create_params import KnowledgeBaseCreateParams as KnowledgeBaseCreateParams from .knowledge_base_list_response import KnowledgeBaseListResponse as KnowledgeBaseListResponse from .knowledge_base_update_params import KnowledgeBaseUpdateParams as KnowledgeBaseUpdateParams -from .doagent_update_status_response import DoagentUpdateStatusResponse as DoagentUpdateStatusResponse from .indexing_job_retrieve_response import IndexingJobRetrieveResponse as IndexingJobRetrieveResponse from .knowledge_base_create_response import KnowledgeBaseCreateResponse as KnowledgeBaseCreateResponse from .knowledge_base_delete_response import KnowledgeBaseDeleteResponse as KnowledgeBaseDeleteResponse diff --git a/src/gradientai/types/doagent_create_params.py b/src/gradientai/types/agent_create_params.py similarity index 90% rename from src/gradientai/types/doagent_create_params.py rename to src/gradientai/types/agent_create_params.py index b5b6e72d..58b99df7 100644 --- a/src/gradientai/types/doagent_create_params.py +++ b/src/gradientai/types/agent_create_params.py @@ -7,10 +7,10 @@ from .._utils import PropertyInfo -__all__ = ["DoagentCreateParams"] +__all__ = ["AgentCreateParams"] -class DoagentCreateParams(TypedDict, total=False): +class AgentCreateParams(TypedDict, total=False): anthropic_key_uuid: str description: str diff --git a/src/gradientai/types/doagent_update_response.py b/src/gradientai/types/agent_create_response.py similarity index 77% rename from src/gradientai/types/doagent_update_response.py rename to src/gradientai/types/agent_create_response.py index 4d48bee7..48545fe9 100644 --- a/src/gradientai/types/doagent_update_response.py +++ b/src/gradientai/types/agent_create_response.py @@ -6,10 +6,10 @@ from .._models import BaseModel -__all__ = ["DoagentUpdateResponse"] +__all__ = ["AgentCreateResponse"] -class DoagentUpdateResponse(BaseModel): +class AgentCreateResponse(BaseModel): agent: Optional["APIAgent"] = None diff --git a/src/gradientai/types/doagent_create_response.py b/src/gradientai/types/agent_delete_response.py similarity index 77% rename from src/gradientai/types/doagent_create_response.py rename to src/gradientai/types/agent_delete_response.py index 2d171436..eb1d440d 100644 --- a/src/gradientai/types/doagent_create_response.py +++ b/src/gradientai/types/agent_delete_response.py @@ -6,10 +6,10 @@ from .._models import BaseModel -__all__ = ["DoagentCreateResponse"] +__all__ = ["AgentDeleteResponse"] -class DoagentCreateResponse(BaseModel): +class AgentDeleteResponse(BaseModel): agent: Optional["APIAgent"] = None diff --git a/src/gradientai/types/doagent_list_params.py b/src/gradientai/types/agent_list_params.py similarity index 79% rename from src/gradientai/types/doagent_list_params.py rename to src/gradientai/types/agent_list_params.py index a9b3fb2b..e13a10c9 100644 --- a/src/gradientai/types/doagent_list_params.py +++ b/src/gradientai/types/agent_list_params.py @@ -4,10 +4,10 @@ from typing_extensions import TypedDict -__all__ = ["DoagentListParams"] +__all__ = ["AgentListParams"] -class DoagentListParams(TypedDict, total=False): +class AgentListParams(TypedDict, total=False): only_deployed: bool """only list agents that are deployed.""" diff --git a/src/gradientai/types/doagent_list_response.py b/src/gradientai/types/agent_list_response.py similarity index 98% rename from src/gradientai/types/doagent_list_response.py rename to src/gradientai/types/agent_list_response.py index 65c2b076..6af9cd51 100644 --- a/src/gradientai/types/doagent_list_response.py +++ b/src/gradientai/types/agent_list_response.py @@ -5,14 +5,14 @@ from typing_extensions import Literal from .._models import BaseModel -from .doagents.api_meta import APIMeta +from .agents.api_meta import APIMeta +from .agents.api_links import APILinks from .api_knowledge_base import APIKnowledgeBase -from .doagents.api_links import APILinks from .api_retrieval_method import APIRetrievalMethod from .api_deployment_visibility import APIDeploymentVisibility __all__ = [ - "DoagentListResponse", + "AgentListResponse", "Agent", "AgentChatbot", "AgentChatbotIdentifier", @@ -323,7 +323,7 @@ class Agent(BaseModel): uuid: Optional[str] = None -class DoagentListResponse(BaseModel): +class AgentListResponse(BaseModel): agents: Optional[List[Agent]] = None links: Optional[APILinks] = None diff --git a/src/gradientai/types/doagent_delete_response.py b/src/gradientai/types/agent_retrieve_response.py similarity index 77% rename from src/gradientai/types/doagent_delete_response.py rename to src/gradientai/types/agent_retrieve_response.py index 5d90ba17..2eed88af 100644 --- a/src/gradientai/types/doagent_delete_response.py +++ b/src/gradientai/types/agent_retrieve_response.py @@ -6,10 +6,10 @@ from .._models import BaseModel -__all__ = ["DoagentDeleteResponse"] +__all__ = ["AgentRetrieveResponse"] -class DoagentDeleteResponse(BaseModel): +class AgentRetrieveResponse(BaseModel): agent: Optional["APIAgent"] = None diff --git a/src/gradientai/types/doagent_update_params.py b/src/gradientai/types/agent_update_params.py similarity index 95% rename from src/gradientai/types/doagent_update_params.py rename to src/gradientai/types/agent_update_params.py index a8598f5e..85f9a9c2 100644 --- a/src/gradientai/types/doagent_update_params.py +++ b/src/gradientai/types/agent_update_params.py @@ -8,10 +8,10 @@ from .._utils import PropertyInfo from .api_retrieval_method import APIRetrievalMethod -__all__ = ["DoagentUpdateParams"] +__all__ = ["AgentUpdateParams"] -class DoagentUpdateParams(TypedDict, total=False): +class AgentUpdateParams(TypedDict, total=False): anthropic_key_uuid: str description: str diff --git a/src/gradientai/types/doagent_retrieve_response.py b/src/gradientai/types/agent_update_response.py similarity index 76% rename from src/gradientai/types/doagent_retrieve_response.py rename to src/gradientai/types/agent_update_response.py index 9fb0a722..2948aa1c 100644 --- a/src/gradientai/types/doagent_retrieve_response.py +++ b/src/gradientai/types/agent_update_response.py @@ -6,10 +6,10 @@ from .._models import BaseModel -__all__ = ["DoagentRetrieveResponse"] +__all__ = ["AgentUpdateResponse"] -class DoagentRetrieveResponse(BaseModel): +class AgentUpdateResponse(BaseModel): agent: Optional["APIAgent"] = None diff --git a/src/gradientai/types/doagent_update_status_params.py b/src/gradientai/types/agent_update_status_params.py similarity index 79% rename from src/gradientai/types/doagent_update_status_params.py rename to src/gradientai/types/agent_update_status_params.py index 3bd0c539..a0cdc0b9 100644 --- a/src/gradientai/types/doagent_update_status_params.py +++ b/src/gradientai/types/agent_update_status_params.py @@ -7,10 +7,10 @@ from .._utils import PropertyInfo from .api_deployment_visibility import APIDeploymentVisibility -__all__ = ["DoagentUpdateStatusParams"] +__all__ = ["AgentUpdateStatusParams"] -class DoagentUpdateStatusParams(TypedDict, total=False): +class AgentUpdateStatusParams(TypedDict, total=False): body_uuid: Annotated[str, PropertyInfo(alias="uuid")] visibility: APIDeploymentVisibility diff --git a/src/gradientai/types/agent_update_status_response.py b/src/gradientai/types/agent_update_status_response.py new file mode 100644 index 00000000..b200f99d --- /dev/null +++ b/src/gradientai/types/agent_update_status_response.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional + +from .._models import BaseModel + +__all__ = ["AgentUpdateStatusResponse"] + + +class AgentUpdateStatusResponse(BaseModel): + agent: Optional["APIAgent"] = None + + +from .api_agent import APIAgent diff --git a/src/gradientai/types/doagents/__init__.py b/src/gradientai/types/agents/__init__.py similarity index 100% rename from src/gradientai/types/doagents/__init__.py rename to src/gradientai/types/agents/__init__.py diff --git a/src/gradientai/types/doagents/api_key_create_params.py b/src/gradientai/types/agents/api_key_create_params.py similarity index 100% rename from src/gradientai/types/doagents/api_key_create_params.py rename to src/gradientai/types/agents/api_key_create_params.py diff --git a/src/gradientai/types/doagents/api_key_create_response.py b/src/gradientai/types/agents/api_key_create_response.py similarity index 100% rename from src/gradientai/types/doagents/api_key_create_response.py rename to src/gradientai/types/agents/api_key_create_response.py diff --git a/src/gradientai/types/doagents/api_key_delete_response.py b/src/gradientai/types/agents/api_key_delete_response.py similarity index 100% rename from src/gradientai/types/doagents/api_key_delete_response.py rename to src/gradientai/types/agents/api_key_delete_response.py diff --git a/src/gradientai/types/doagents/api_key_list_params.py b/src/gradientai/types/agents/api_key_list_params.py similarity index 100% rename from src/gradientai/types/doagents/api_key_list_params.py rename to src/gradientai/types/agents/api_key_list_params.py diff --git a/src/gradientai/types/doagents/api_key_list_response.py b/src/gradientai/types/agents/api_key_list_response.py similarity index 100% rename from src/gradientai/types/doagents/api_key_list_response.py rename to src/gradientai/types/agents/api_key_list_response.py diff --git a/src/gradientai/types/doagents/api_key_regenerate_response.py b/src/gradientai/types/agents/api_key_regenerate_response.py similarity index 100% rename from src/gradientai/types/doagents/api_key_regenerate_response.py rename to src/gradientai/types/agents/api_key_regenerate_response.py diff --git a/src/gradientai/types/doagents/api_key_update_params.py b/src/gradientai/types/agents/api_key_update_params.py similarity index 100% rename from src/gradientai/types/doagents/api_key_update_params.py rename to src/gradientai/types/agents/api_key_update_params.py diff --git a/src/gradientai/types/doagents/api_key_update_response.py b/src/gradientai/types/agents/api_key_update_response.py similarity index 100% rename from src/gradientai/types/doagents/api_key_update_response.py rename to src/gradientai/types/agents/api_key_update_response.py diff --git a/src/gradientai/types/doagents/api_link_knowledge_base_output.py b/src/gradientai/types/agents/api_link_knowledge_base_output.py similarity index 100% rename from src/gradientai/types/doagents/api_link_knowledge_base_output.py rename to src/gradientai/types/agents/api_link_knowledge_base_output.py diff --git a/src/gradientai/types/doagents/api_links.py b/src/gradientai/types/agents/api_links.py similarity index 100% rename from src/gradientai/types/doagents/api_links.py rename to src/gradientai/types/agents/api_links.py diff --git a/src/gradientai/types/doagents/api_meta.py b/src/gradientai/types/agents/api_meta.py similarity index 100% rename from src/gradientai/types/doagents/api_meta.py rename to src/gradientai/types/agents/api_meta.py diff --git a/src/gradientai/types/doagents/child_agent_add_params.py b/src/gradientai/types/agents/child_agent_add_params.py similarity index 100% rename from src/gradientai/types/doagents/child_agent_add_params.py rename to src/gradientai/types/agents/child_agent_add_params.py diff --git a/src/gradientai/types/doagents/child_agent_add_response.py b/src/gradientai/types/agents/child_agent_add_response.py similarity index 100% rename from src/gradientai/types/doagents/child_agent_add_response.py rename to src/gradientai/types/agents/child_agent_add_response.py diff --git a/src/gradientai/types/doagents/child_agent_delete_response.py b/src/gradientai/types/agents/child_agent_delete_response.py similarity index 100% rename from src/gradientai/types/doagents/child_agent_delete_response.py rename to src/gradientai/types/agents/child_agent_delete_response.py diff --git a/src/gradientai/types/doagents/child_agent_update_params.py b/src/gradientai/types/agents/child_agent_update_params.py similarity index 100% rename from src/gradientai/types/doagents/child_agent_update_params.py rename to src/gradientai/types/agents/child_agent_update_params.py diff --git a/src/gradientai/types/doagents/child_agent_update_response.py b/src/gradientai/types/agents/child_agent_update_response.py similarity index 100% rename from src/gradientai/types/doagents/child_agent_update_response.py rename to src/gradientai/types/agents/child_agent_update_response.py diff --git a/src/gradientai/types/doagents/child_agent_view_response.py b/src/gradientai/types/agents/child_agent_view_response.py similarity index 100% rename from src/gradientai/types/doagents/child_agent_view_response.py rename to src/gradientai/types/agents/child_agent_view_response.py diff --git a/src/gradientai/types/doagents/function_create_params.py b/src/gradientai/types/agents/function_create_params.py similarity index 100% rename from src/gradientai/types/doagents/function_create_params.py rename to src/gradientai/types/agents/function_create_params.py diff --git a/src/gradientai/types/doagents/function_create_response.py b/src/gradientai/types/agents/function_create_response.py similarity index 100% rename from src/gradientai/types/doagents/function_create_response.py rename to src/gradientai/types/agents/function_create_response.py diff --git a/src/gradientai/types/doagents/function_delete_response.py b/src/gradientai/types/agents/function_delete_response.py similarity index 100% rename from src/gradientai/types/doagents/function_delete_response.py rename to src/gradientai/types/agents/function_delete_response.py diff --git a/src/gradientai/types/doagents/function_update_params.py b/src/gradientai/types/agents/function_update_params.py similarity index 100% rename from src/gradientai/types/doagents/function_update_params.py rename to src/gradientai/types/agents/function_update_params.py diff --git a/src/gradientai/types/doagents/function_update_response.py b/src/gradientai/types/agents/function_update_response.py similarity index 100% rename from src/gradientai/types/doagents/function_update_response.py rename to src/gradientai/types/agents/function_update_response.py diff --git a/src/gradientai/types/doagents/knowledge_base_detach_response.py b/src/gradientai/types/agents/knowledge_base_detach_response.py similarity index 100% rename from src/gradientai/types/doagents/knowledge_base_detach_response.py rename to src/gradientai/types/agents/knowledge_base_detach_response.py diff --git a/src/gradientai/types/doagents/version_list_params.py b/src/gradientai/types/agents/version_list_params.py similarity index 100% rename from src/gradientai/types/doagents/version_list_params.py rename to src/gradientai/types/agents/version_list_params.py diff --git a/src/gradientai/types/doagents/version_list_response.py b/src/gradientai/types/agents/version_list_response.py similarity index 100% rename from src/gradientai/types/doagents/version_list_response.py rename to src/gradientai/types/agents/version_list_response.py diff --git a/src/gradientai/types/doagents/version_update_params.py b/src/gradientai/types/agents/version_update_params.py similarity index 100% rename from src/gradientai/types/doagents/version_update_params.py rename to src/gradientai/types/agents/version_update_params.py diff --git a/src/gradientai/types/doagents/version_update_response.py b/src/gradientai/types/agents/version_update_response.py similarity index 100% rename from src/gradientai/types/doagents/version_update_response.py rename to src/gradientai/types/agents/version_update_response.py diff --git a/src/gradientai/types/doagent_update_status_response.py b/src/gradientai/types/doagent_update_status_response.py deleted file mode 100644 index b31c1e99..00000000 --- a/src/gradientai/types/doagent_update_status_response.py +++ /dev/null @@ -1,16 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Optional - -from .._models import BaseModel - -__all__ = ["DoagentUpdateStatusResponse"] - - -class DoagentUpdateStatusResponse(BaseModel): - agent: Optional["APIAgent"] = None - - -from .api_agent import APIAgent diff --git a/src/gradientai/types/indexing_job_list_response.py b/src/gradientai/types/indexing_job_list_response.py index dc94b966..1379cc55 100644 --- a/src/gradientai/types/indexing_job_list_response.py +++ b/src/gradientai/types/indexing_job_list_response.py @@ -3,9 +3,9 @@ from typing import List, Optional from .._models import BaseModel +from .agents.api_meta import APIMeta +from .agents.api_links import APILinks from .api_indexing_job import APIIndexingJob -from .doagents.api_meta import APIMeta -from .doagents.api_links import APILinks __all__ = ["IndexingJobListResponse"] diff --git a/src/gradientai/types/inference/api_key_list_response.py b/src/gradientai/types/inference/api_key_list_response.py index 9cbc4bd5..535e2f96 100644 --- a/src/gradientai/types/inference/api_key_list_response.py +++ b/src/gradientai/types/inference/api_key_list_response.py @@ -3,8 +3,8 @@ from typing import List, Optional from ..._models import BaseModel -from ..doagents.api_meta import APIMeta -from ..doagents.api_links import APILinks +from ..agents.api_meta import APIMeta +from ..agents.api_links import APILinks from .api_model_api_key_info import APIModelAPIKeyInfo __all__ = ["APIKeyListResponse"] diff --git a/src/gradientai/types/knowledge_base_list_response.py b/src/gradientai/types/knowledge_base_list_response.py index 4fa7536d..09ca1ad3 100644 --- a/src/gradientai/types/knowledge_base_list_response.py +++ b/src/gradientai/types/knowledge_base_list_response.py @@ -3,9 +3,9 @@ from typing import List, Optional from .._models import BaseModel -from .doagents.api_meta import APIMeta +from .agents.api_meta import APIMeta +from .agents.api_links import APILinks from .api_knowledge_base import APIKnowledgeBase -from .doagents.api_links import APILinks __all__ = ["KnowledgeBaseListResponse"] diff --git a/src/gradientai/types/knowledge_bases/data_source_list_response.py b/src/gradientai/types/knowledge_bases/data_source_list_response.py index d0c16c12..78246ce1 100644 --- a/src/gradientai/types/knowledge_bases/data_source_list_response.py +++ b/src/gradientai/types/knowledge_bases/data_source_list_response.py @@ -3,8 +3,8 @@ from typing import List, Optional from ..._models import BaseModel -from ..doagents.api_meta import APIMeta -from ..doagents.api_links import APILinks +from ..agents.api_meta import APIMeta +from ..agents.api_links import APILinks from .api_knowledge_base_data_source import APIKnowledgeBaseDataSource __all__ = ["DataSourceListResponse"] diff --git a/src/gradientai/types/model_list_response.py b/src/gradientai/types/model_list_response.py index 1eb8f907..e6f5fad5 100644 --- a/src/gradientai/types/model_list_response.py +++ b/src/gradientai/types/model_list_response.py @@ -4,8 +4,8 @@ from .._models import BaseModel from .api_model import APIModel -from .doagents.api_meta import APIMeta -from .doagents.api_links import APILinks +from .agents.api_meta import APIMeta +from .agents.api_links import APILinks __all__ = ["ModelListResponse"] diff --git a/src/gradientai/types/providers/anthropic/key_list_agents_response.py b/src/gradientai/types/providers/anthropic/key_list_agents_response.py index 174b5ea0..ba6ca946 100644 --- a/src/gradientai/types/providers/anthropic/key_list_agents_response.py +++ b/src/gradientai/types/providers/anthropic/key_list_agents_response.py @@ -5,8 +5,8 @@ from typing import List, Optional from ...._models import BaseModel -from ...doagents.api_meta import APIMeta -from ...doagents.api_links import APILinks +from ...agents.api_meta import APIMeta +from ...agents.api_links import APILinks __all__ = ["KeyListAgentsResponse"] diff --git a/src/gradientai/types/providers/anthropic/key_list_response.py b/src/gradientai/types/providers/anthropic/key_list_response.py index 7699e23b..d0b84e96 100644 --- a/src/gradientai/types/providers/anthropic/key_list_response.py +++ b/src/gradientai/types/providers/anthropic/key_list_response.py @@ -3,8 +3,8 @@ from typing import List, Optional from ...._models import BaseModel -from ...doagents.api_meta import APIMeta -from ...doagents.api_links import APILinks +from ...agents.api_meta import APIMeta +from ...agents.api_links import APILinks from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo __all__ = ["KeyListResponse"] diff --git a/src/gradientai/types/providers/openai/key_list_response.py b/src/gradientai/types/providers/openai/key_list_response.py index 68a74cd1..c263cba3 100644 --- a/src/gradientai/types/providers/openai/key_list_response.py +++ b/src/gradientai/types/providers/openai/key_list_response.py @@ -3,8 +3,8 @@ from typing import List, Optional from ...._models import BaseModel -from ...doagents.api_meta import APIMeta -from ...doagents.api_links import APILinks +from ...agents.api_meta import APIMeta +from ...agents.api_links import APILinks from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo __all__ = ["KeyListResponse"] diff --git a/src/gradientai/types/providers/openai/key_retrieve_agents_response.py b/src/gradientai/types/providers/openai/key_retrieve_agents_response.py index 9393fe08..f42edea6 100644 --- a/src/gradientai/types/providers/openai/key_retrieve_agents_response.py +++ b/src/gradientai/types/providers/openai/key_retrieve_agents_response.py @@ -5,8 +5,8 @@ from typing import List, Optional from ...._models import BaseModel -from ...doagents.api_meta import APIMeta -from ...doagents.api_links import APILinks +from ...agents.api_meta import APIMeta +from ...agents.api_links import APILinks __all__ = ["KeyRetrieveAgentsResponse"] diff --git a/tests/api_resources/doagents/__init__.py b/tests/api_resources/agents/__init__.py similarity index 100% rename from tests/api_resources/doagents/__init__.py rename to tests/api_resources/agents/__init__.py diff --git a/tests/api_resources/doagents/test_api_keys.py b/tests/api_resources/agents/test_api_keys.py similarity index 84% rename from tests/api_resources/doagents/test_api_keys.py rename to tests/api_resources/agents/test_api_keys.py index dd654e83..e8489258 100644 --- a/tests/api_resources/doagents/test_api_keys.py +++ b/tests/api_resources/agents/test_api_keys.py @@ -9,7 +9,7 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.doagents import ( +from gradientai.types.agents import ( APIKeyListResponse, APIKeyCreateResponse, APIKeyDeleteResponse, @@ -26,7 +26,7 @@ class TestAPIKeys: @pytest.mark.skip() @parametrize def test_method_create(self, client: GradientAI) -> None: - api_key = client.doagents.api_keys.create( + api_key = client.agents.api_keys.create( path_agent_uuid="agent_uuid", ) assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) @@ -34,7 +34,7 @@ def test_method_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: - api_key = client.doagents.api_keys.create( + api_key = client.agents.api_keys.create( path_agent_uuid="agent_uuid", body_agent_uuid="agent_uuid", name="name", @@ -44,7 +44,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_create(self, client: GradientAI) -> None: - response = client.doagents.api_keys.with_raw_response.create( + response = client.agents.api_keys.with_raw_response.create( path_agent_uuid="agent_uuid", ) @@ -56,7 +56,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_create(self, client: GradientAI) -> None: - with client.doagents.api_keys.with_streaming_response.create( + with client.agents.api_keys.with_streaming_response.create( path_agent_uuid="agent_uuid", ) as response: assert not response.is_closed @@ -71,14 +71,14 @@ def test_streaming_response_create(self, client: GradientAI) -> None: @parametrize def test_path_params_create(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - client.doagents.api_keys.with_raw_response.create( + client.agents.api_keys.with_raw_response.create( path_agent_uuid="", ) @pytest.mark.skip() @parametrize def test_method_update(self, client: GradientAI) -> None: - api_key = client.doagents.api_keys.update( + api_key = client.agents.api_keys.update( path_api_key_uuid="api_key_uuid", path_agent_uuid="agent_uuid", ) @@ -87,7 +87,7 @@ def test_method_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: - api_key = client.doagents.api_keys.update( + api_key = client.agents.api_keys.update( path_api_key_uuid="api_key_uuid", path_agent_uuid="agent_uuid", body_agent_uuid="agent_uuid", @@ -99,7 +99,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_update(self, client: GradientAI) -> None: - response = client.doagents.api_keys.with_raw_response.update( + response = client.agents.api_keys.with_raw_response.update( path_api_key_uuid="api_key_uuid", path_agent_uuid="agent_uuid", ) @@ -112,7 +112,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: - with client.doagents.api_keys.with_streaming_response.update( + with client.agents.api_keys.with_streaming_response.update( path_api_key_uuid="api_key_uuid", path_agent_uuid="agent_uuid", ) as response: @@ -128,13 +128,13 @@ def test_streaming_response_update(self, client: GradientAI) -> None: @parametrize def test_path_params_update(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - client.doagents.api_keys.with_raw_response.update( + client.agents.api_keys.with_raw_response.update( path_api_key_uuid="api_key_uuid", path_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - client.doagents.api_keys.with_raw_response.update( + client.agents.api_keys.with_raw_response.update( path_api_key_uuid="", path_agent_uuid="agent_uuid", ) @@ -142,7 +142,7 @@ def test_path_params_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_list(self, client: GradientAI) -> None: - api_key = client.doagents.api_keys.list( + api_key = client.agents.api_keys.list( agent_uuid="agent_uuid", ) assert_matches_type(APIKeyListResponse, api_key, path=["response"]) @@ -150,7 +150,7 @@ def test_method_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_list_with_all_params(self, client: GradientAI) -> None: - api_key = client.doagents.api_keys.list( + api_key = client.agents.api_keys.list( agent_uuid="agent_uuid", page=0, per_page=0, @@ -160,7 +160,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_list(self, client: GradientAI) -> None: - response = client.doagents.api_keys.with_raw_response.list( + response = client.agents.api_keys.with_raw_response.list( agent_uuid="agent_uuid", ) @@ -172,7 +172,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_list(self, client: GradientAI) -> None: - with client.doagents.api_keys.with_streaming_response.list( + with client.agents.api_keys.with_streaming_response.list( agent_uuid="agent_uuid", ) as response: assert not response.is_closed @@ -187,14 +187,14 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @parametrize def test_path_params_list(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - client.doagents.api_keys.with_raw_response.list( + client.agents.api_keys.with_raw_response.list( agent_uuid="", ) @pytest.mark.skip() @parametrize def test_method_delete(self, client: GradientAI) -> None: - api_key = client.doagents.api_keys.delete( + api_key = client.agents.api_keys.delete( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) @@ -203,7 +203,7 @@ def test_method_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_delete(self, client: GradientAI) -> None: - response = client.doagents.api_keys.with_raw_response.delete( + response = client.agents.api_keys.with_raw_response.delete( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) @@ -216,7 +216,7 @@ def test_raw_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_delete(self, client: GradientAI) -> None: - with client.doagents.api_keys.with_streaming_response.delete( + with client.agents.api_keys.with_streaming_response.delete( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) as response: @@ -232,13 +232,13 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: @parametrize def test_path_params_delete(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - client.doagents.api_keys.with_raw_response.delete( + client.agents.api_keys.with_raw_response.delete( api_key_uuid="api_key_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.doagents.api_keys.with_raw_response.delete( + client.agents.api_keys.with_raw_response.delete( api_key_uuid="", agent_uuid="agent_uuid", ) @@ -246,7 +246,7 @@ def test_path_params_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_regenerate(self, client: GradientAI) -> None: - api_key = client.doagents.api_keys.regenerate( + api_key = client.agents.api_keys.regenerate( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) @@ -255,7 +255,7 @@ def test_method_regenerate(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_regenerate(self, client: GradientAI) -> None: - response = client.doagents.api_keys.with_raw_response.regenerate( + response = client.agents.api_keys.with_raw_response.regenerate( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) @@ -268,7 +268,7 @@ def test_raw_response_regenerate(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_regenerate(self, client: GradientAI) -> None: - with client.doagents.api_keys.with_streaming_response.regenerate( + with client.agents.api_keys.with_streaming_response.regenerate( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) as response: @@ -284,13 +284,13 @@ def test_streaming_response_regenerate(self, client: GradientAI) -> None: @parametrize def test_path_params_regenerate(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - client.doagents.api_keys.with_raw_response.regenerate( + client.agents.api_keys.with_raw_response.regenerate( api_key_uuid="api_key_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - client.doagents.api_keys.with_raw_response.regenerate( + client.agents.api_keys.with_raw_response.regenerate( api_key_uuid="", agent_uuid="agent_uuid", ) @@ -302,7 +302,7 @@ class TestAsyncAPIKeys: @pytest.mark.skip() @parametrize async def test_method_create(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.doagents.api_keys.create( + api_key = await async_client.agents.api_keys.create( path_agent_uuid="agent_uuid", ) assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) @@ -310,7 +310,7 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.doagents.api_keys.create( + api_key = await async_client.agents.api_keys.create( path_agent_uuid="agent_uuid", body_agent_uuid="agent_uuid", name="name", @@ -320,7 +320,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: - response = await async_client.doagents.api_keys.with_raw_response.create( + response = await async_client.agents.api_keys.with_raw_response.create( path_agent_uuid="agent_uuid", ) @@ -332,7 +332,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: - async with async_client.doagents.api_keys.with_streaming_response.create( + async with async_client.agents.api_keys.with_streaming_response.create( path_agent_uuid="agent_uuid", ) as response: assert not response.is_closed @@ -347,14 +347,14 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_create(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - await async_client.doagents.api_keys.with_raw_response.create( + await async_client.agents.api_keys.with_raw_response.create( path_agent_uuid="", ) @pytest.mark.skip() @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.doagents.api_keys.update( + api_key = await async_client.agents.api_keys.update( path_api_key_uuid="api_key_uuid", path_agent_uuid="agent_uuid", ) @@ -363,7 +363,7 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.doagents.api_keys.update( + api_key = await async_client.agents.api_keys.update( path_api_key_uuid="api_key_uuid", path_agent_uuid="agent_uuid", body_agent_uuid="agent_uuid", @@ -375,7 +375,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: - response = await async_client.doagents.api_keys.with_raw_response.update( + response = await async_client.agents.api_keys.with_raw_response.update( path_api_key_uuid="api_key_uuid", path_agent_uuid="agent_uuid", ) @@ -388,7 +388,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: - async with async_client.doagents.api_keys.with_streaming_response.update( + async with async_client.agents.api_keys.with_streaming_response.update( path_api_key_uuid="api_key_uuid", path_agent_uuid="agent_uuid", ) as response: @@ -404,13 +404,13 @@ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - await async_client.doagents.api_keys.with_raw_response.update( + await async_client.agents.api_keys.with_raw_response.update( path_api_key_uuid="api_key_uuid", path_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): - await async_client.doagents.api_keys.with_raw_response.update( + await async_client.agents.api_keys.with_raw_response.update( path_api_key_uuid="", path_agent_uuid="agent_uuid", ) @@ -418,7 +418,7 @@ async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_list(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.doagents.api_keys.list( + api_key = await async_client.agents.api_keys.list( agent_uuid="agent_uuid", ) assert_matches_type(APIKeyListResponse, api_key, path=["response"]) @@ -426,7 +426,7 @@ async def test_method_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.doagents.api_keys.list( + api_key = await async_client.agents.api_keys.list( agent_uuid="agent_uuid", page=0, per_page=0, @@ -436,7 +436,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: - response = await async_client.doagents.api_keys.with_raw_response.list( + response = await async_client.agents.api_keys.with_raw_response.list( agent_uuid="agent_uuid", ) @@ -448,7 +448,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: - async with async_client.doagents.api_keys.with_streaming_response.list( + async with async_client.agents.api_keys.with_streaming_response.list( agent_uuid="agent_uuid", ) as response: assert not response.is_closed @@ -463,14 +463,14 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @parametrize async def test_path_params_list(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - await async_client.doagents.api_keys.with_raw_response.list( + await async_client.agents.api_keys.with_raw_response.list( agent_uuid="", ) @pytest.mark.skip() @parametrize async def test_method_delete(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.doagents.api_keys.delete( + api_key = await async_client.agents.api_keys.delete( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) @@ -479,7 +479,7 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: - response = await async_client.doagents.api_keys.with_raw_response.delete( + response = await async_client.agents.api_keys.with_raw_response.delete( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) @@ -492,7 +492,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: - async with async_client.doagents.api_keys.with_streaming_response.delete( + async with async_client.agents.api_keys.with_streaming_response.delete( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) as response: @@ -508,13 +508,13 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - await async_client.doagents.api_keys.with_raw_response.delete( + await async_client.agents.api_keys.with_raw_response.delete( api_key_uuid="api_key_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.doagents.api_keys.with_raw_response.delete( + await async_client.agents.api_keys.with_raw_response.delete( api_key_uuid="", agent_uuid="agent_uuid", ) @@ -522,7 +522,7 @@ async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_regenerate(self, async_client: AsyncGradientAI) -> None: - api_key = await async_client.doagents.api_keys.regenerate( + api_key = await async_client.agents.api_keys.regenerate( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) @@ -531,7 +531,7 @@ async def test_method_regenerate(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_raw_response_regenerate(self, async_client: AsyncGradientAI) -> None: - response = await async_client.doagents.api_keys.with_raw_response.regenerate( + response = await async_client.agents.api_keys.with_raw_response.regenerate( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) @@ -544,7 +544,7 @@ async def test_raw_response_regenerate(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize async def test_streaming_response_regenerate(self, async_client: AsyncGradientAI) -> None: - async with async_client.doagents.api_keys.with_streaming_response.regenerate( + async with async_client.agents.api_keys.with_streaming_response.regenerate( api_key_uuid="api_key_uuid", agent_uuid="agent_uuid", ) as response: @@ -560,13 +560,13 @@ async def test_streaming_response_regenerate(self, async_client: AsyncGradientAI @parametrize async def test_path_params_regenerate(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - await async_client.doagents.api_keys.with_raw_response.regenerate( + await async_client.agents.api_keys.with_raw_response.regenerate( api_key_uuid="api_key_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): - await async_client.doagents.api_keys.with_raw_response.regenerate( + await async_client.agents.api_keys.with_raw_response.regenerate( api_key_uuid="", agent_uuid="agent_uuid", ) diff --git a/tests/api_resources/doagents/test_child_agents.py b/tests/api_resources/agents/test_child_agents.py similarity index 84% rename from tests/api_resources/doagents/test_child_agents.py rename to tests/api_resources/agents/test_child_agents.py index 8e0eb0a0..14af3b93 100644 --- a/tests/api_resources/doagents/test_child_agents.py +++ b/tests/api_resources/agents/test_child_agents.py @@ -9,7 +9,7 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.doagents import ( +from gradientai.types.agents import ( ChildAgentAddResponse, ChildAgentViewResponse, ChildAgentDeleteResponse, @@ -25,7 +25,7 @@ class TestChildAgents: @pytest.mark.skip() @parametrize def test_method_update(self, client: GradientAI) -> None: - child_agent = client.doagents.child_agents.update( + child_agent = client.agents.child_agents.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) @@ -34,7 +34,7 @@ def test_method_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: - child_agent = client.doagents.child_agents.update( + child_agent = client.agents.child_agents.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", body_child_agent_uuid="child_agent_uuid", @@ -48,7 +48,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_update(self, client: GradientAI) -> None: - response = client.doagents.child_agents.with_raw_response.update( + response = client.agents.child_agents.with_raw_response.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) @@ -61,7 +61,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: - with client.doagents.child_agents.with_streaming_response.update( + with client.agents.child_agents.with_streaming_response.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) as response: @@ -79,13 +79,13 @@ def test_path_params_update(self, client: GradientAI) -> None: with pytest.raises( ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" ): - client.doagents.child_agents.with_raw_response.update( + client.agents.child_agents.with_raw_response.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): - client.doagents.child_agents.with_raw_response.update( + client.agents.child_agents.with_raw_response.update( path_child_agent_uuid="", path_parent_agent_uuid="parent_agent_uuid", ) @@ -93,7 +93,7 @@ def test_path_params_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_delete(self, client: GradientAI) -> None: - child_agent = client.doagents.child_agents.delete( + child_agent = client.agents.child_agents.delete( child_agent_uuid="child_agent_uuid", parent_agent_uuid="parent_agent_uuid", ) @@ -102,7 +102,7 @@ def test_method_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_delete(self, client: GradientAI) -> None: - response = client.doagents.child_agents.with_raw_response.delete( + response = client.agents.child_agents.with_raw_response.delete( child_agent_uuid="child_agent_uuid", parent_agent_uuid="parent_agent_uuid", ) @@ -115,7 +115,7 @@ def test_raw_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_delete(self, client: GradientAI) -> None: - with client.doagents.child_agents.with_streaming_response.delete( + with client.agents.child_agents.with_streaming_response.delete( child_agent_uuid="child_agent_uuid", parent_agent_uuid="parent_agent_uuid", ) as response: @@ -131,13 +131,13 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: @parametrize def test_path_params_delete(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `parent_agent_uuid` but received ''"): - client.doagents.child_agents.with_raw_response.delete( + client.agents.child_agents.with_raw_response.delete( child_agent_uuid="child_agent_uuid", parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `child_agent_uuid` but received ''"): - client.doagents.child_agents.with_raw_response.delete( + client.agents.child_agents.with_raw_response.delete( child_agent_uuid="", parent_agent_uuid="parent_agent_uuid", ) @@ -145,7 +145,7 @@ def test_path_params_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_add(self, client: GradientAI) -> None: - child_agent = client.doagents.child_agents.add( + child_agent = client.agents.child_agents.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) @@ -154,7 +154,7 @@ def test_method_add(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_add_with_all_params(self, client: GradientAI) -> None: - child_agent = client.doagents.child_agents.add( + child_agent = client.agents.child_agents.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", body_child_agent_uuid="child_agent_uuid", @@ -167,7 +167,7 @@ def test_method_add_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_add(self, client: GradientAI) -> None: - response = client.doagents.child_agents.with_raw_response.add( + response = client.agents.child_agents.with_raw_response.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) @@ -180,7 +180,7 @@ def test_raw_response_add(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_add(self, client: GradientAI) -> None: - with client.doagents.child_agents.with_streaming_response.add( + with client.agents.child_agents.with_streaming_response.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) as response: @@ -198,13 +198,13 @@ def test_path_params_add(self, client: GradientAI) -> None: with pytest.raises( ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" ): - client.doagents.child_agents.with_raw_response.add( + client.agents.child_agents.with_raw_response.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): - client.doagents.child_agents.with_raw_response.add( + client.agents.child_agents.with_raw_response.add( path_child_agent_uuid="", path_parent_agent_uuid="parent_agent_uuid", ) @@ -212,7 +212,7 @@ def test_path_params_add(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_view(self, client: GradientAI) -> None: - child_agent = client.doagents.child_agents.view( + child_agent = client.agents.child_agents.view( "uuid", ) assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"]) @@ -220,7 +220,7 @@ def test_method_view(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_view(self, client: GradientAI) -> None: - response = client.doagents.child_agents.with_raw_response.view( + response = client.agents.child_agents.with_raw_response.view( "uuid", ) @@ -232,7 +232,7 @@ def test_raw_response_view(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_view(self, client: GradientAI) -> None: - with client.doagents.child_agents.with_streaming_response.view( + with client.agents.child_agents.with_streaming_response.view( "uuid", ) as response: assert not response.is_closed @@ -247,7 +247,7 @@ def test_streaming_response_view(self, client: GradientAI) -> None: @parametrize def test_path_params_view(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.doagents.child_agents.with_raw_response.view( + client.agents.child_agents.with_raw_response.view( "", ) @@ -258,7 +258,7 @@ class TestAsyncChildAgents: @pytest.mark.skip() @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: - child_agent = await async_client.doagents.child_agents.update( + child_agent = await async_client.agents.child_agents.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) @@ -267,7 +267,7 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: - child_agent = await async_client.doagents.child_agents.update( + child_agent = await async_client.agents.child_agents.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", body_child_agent_uuid="child_agent_uuid", @@ -281,7 +281,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: - response = await async_client.doagents.child_agents.with_raw_response.update( + response = await async_client.agents.child_agents.with_raw_response.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) @@ -294,7 +294,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: - async with async_client.doagents.child_agents.with_streaming_response.update( + async with async_client.agents.child_agents.with_streaming_response.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) as response: @@ -312,13 +312,13 @@ async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: with pytest.raises( ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" ): - await async_client.doagents.child_agents.with_raw_response.update( + await async_client.agents.child_agents.with_raw_response.update( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): - await async_client.doagents.child_agents.with_raw_response.update( + await async_client.agents.child_agents.with_raw_response.update( path_child_agent_uuid="", path_parent_agent_uuid="parent_agent_uuid", ) @@ -326,7 +326,7 @@ async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_delete(self, async_client: AsyncGradientAI) -> None: - child_agent = await async_client.doagents.child_agents.delete( + child_agent = await async_client.agents.child_agents.delete( child_agent_uuid="child_agent_uuid", parent_agent_uuid="parent_agent_uuid", ) @@ -335,7 +335,7 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: - response = await async_client.doagents.child_agents.with_raw_response.delete( + response = await async_client.agents.child_agents.with_raw_response.delete( child_agent_uuid="child_agent_uuid", parent_agent_uuid="parent_agent_uuid", ) @@ -348,7 +348,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: - async with async_client.doagents.child_agents.with_streaming_response.delete( + async with async_client.agents.child_agents.with_streaming_response.delete( child_agent_uuid="child_agent_uuid", parent_agent_uuid="parent_agent_uuid", ) as response: @@ -364,13 +364,13 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `parent_agent_uuid` but received ''"): - await async_client.doagents.child_agents.with_raw_response.delete( + await async_client.agents.child_agents.with_raw_response.delete( child_agent_uuid="child_agent_uuid", parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `child_agent_uuid` but received ''"): - await async_client.doagents.child_agents.with_raw_response.delete( + await async_client.agents.child_agents.with_raw_response.delete( child_agent_uuid="", parent_agent_uuid="parent_agent_uuid", ) @@ -378,7 +378,7 @@ async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_add(self, async_client: AsyncGradientAI) -> None: - child_agent = await async_client.doagents.child_agents.add( + child_agent = await async_client.agents.child_agents.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) @@ -387,7 +387,7 @@ async def test_method_add(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_add_with_all_params(self, async_client: AsyncGradientAI) -> None: - child_agent = await async_client.doagents.child_agents.add( + child_agent = await async_client.agents.child_agents.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", body_child_agent_uuid="child_agent_uuid", @@ -400,7 +400,7 @@ async def test_method_add_with_all_params(self, async_client: AsyncGradientAI) - @pytest.mark.skip() @parametrize async def test_raw_response_add(self, async_client: AsyncGradientAI) -> None: - response = await async_client.doagents.child_agents.with_raw_response.add( + response = await async_client.agents.child_agents.with_raw_response.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) @@ -413,7 +413,7 @@ async def test_raw_response_add(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_add(self, async_client: AsyncGradientAI) -> None: - async with async_client.doagents.child_agents.with_streaming_response.add( + async with async_client.agents.child_agents.with_streaming_response.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="parent_agent_uuid", ) as response: @@ -431,13 +431,13 @@ async def test_path_params_add(self, async_client: AsyncGradientAI) -> None: with pytest.raises( ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" ): - await async_client.doagents.child_agents.with_raw_response.add( + await async_client.agents.child_agents.with_raw_response.add( path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): - await async_client.doagents.child_agents.with_raw_response.add( + await async_client.agents.child_agents.with_raw_response.add( path_child_agent_uuid="", path_parent_agent_uuid="parent_agent_uuid", ) @@ -445,7 +445,7 @@ async def test_path_params_add(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_view(self, async_client: AsyncGradientAI) -> None: - child_agent = await async_client.doagents.child_agents.view( + child_agent = await async_client.agents.child_agents.view( "uuid", ) assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"]) @@ -453,7 +453,7 @@ async def test_method_view(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_raw_response_view(self, async_client: AsyncGradientAI) -> None: - response = await async_client.doagents.child_agents.with_raw_response.view( + response = await async_client.agents.child_agents.with_raw_response.view( "uuid", ) @@ -465,7 +465,7 @@ async def test_raw_response_view(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_view(self, async_client: AsyncGradientAI) -> None: - async with async_client.doagents.child_agents.with_streaming_response.view( + async with async_client.agents.child_agents.with_streaming_response.view( "uuid", ) as response: assert not response.is_closed @@ -480,6 +480,6 @@ async def test_streaming_response_view(self, async_client: AsyncGradientAI) -> N @parametrize async def test_path_params_view(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.doagents.child_agents.with_raw_response.view( + await async_client.agents.child_agents.with_raw_response.view( "", ) diff --git a/tests/api_resources/doagents/test_functions.py b/tests/api_resources/agents/test_functions.py similarity index 85% rename from tests/api_resources/doagents/test_functions.py rename to tests/api_resources/agents/test_functions.py index 11c76719..bfb05fa6 100644 --- a/tests/api_resources/doagents/test_functions.py +++ b/tests/api_resources/agents/test_functions.py @@ -9,7 +9,7 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.doagents import ( +from gradientai.types.agents import ( FunctionCreateResponse, FunctionDeleteResponse, FunctionUpdateResponse, @@ -24,7 +24,7 @@ class TestFunctions: @pytest.mark.skip() @parametrize def test_method_create(self, client: GradientAI) -> None: - function = client.doagents.functions.create( + function = client.agents.functions.create( path_agent_uuid="agent_uuid", ) assert_matches_type(FunctionCreateResponse, function, path=["response"]) @@ -32,7 +32,7 @@ def test_method_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: - function = client.doagents.functions.create( + function = client.agents.functions.create( path_agent_uuid="agent_uuid", body_agent_uuid="agent_uuid", description="description", @@ -47,7 +47,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_create(self, client: GradientAI) -> None: - response = client.doagents.functions.with_raw_response.create( + response = client.agents.functions.with_raw_response.create( path_agent_uuid="agent_uuid", ) @@ -59,7 +59,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_create(self, client: GradientAI) -> None: - with client.doagents.functions.with_streaming_response.create( + with client.agents.functions.with_streaming_response.create( path_agent_uuid="agent_uuid", ) as response: assert not response.is_closed @@ -74,14 +74,14 @@ def test_streaming_response_create(self, client: GradientAI) -> None: @parametrize def test_path_params_create(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - client.doagents.functions.with_raw_response.create( + client.agents.functions.with_raw_response.create( path_agent_uuid="", ) @pytest.mark.skip() @parametrize def test_method_update(self, client: GradientAI) -> None: - function = client.doagents.functions.update( + function = client.agents.functions.update( path_function_uuid="function_uuid", path_agent_uuid="agent_uuid", ) @@ -90,7 +90,7 @@ def test_method_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: - function = client.doagents.functions.update( + function = client.agents.functions.update( path_function_uuid="function_uuid", path_agent_uuid="agent_uuid", body_agent_uuid="agent_uuid", @@ -107,7 +107,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_update(self, client: GradientAI) -> None: - response = client.doagents.functions.with_raw_response.update( + response = client.agents.functions.with_raw_response.update( path_function_uuid="function_uuid", path_agent_uuid="agent_uuid", ) @@ -120,7 +120,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: - with client.doagents.functions.with_streaming_response.update( + with client.agents.functions.with_streaming_response.update( path_function_uuid="function_uuid", path_agent_uuid="agent_uuid", ) as response: @@ -136,13 +136,13 @@ def test_streaming_response_update(self, client: GradientAI) -> None: @parametrize def test_path_params_update(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - client.doagents.functions.with_raw_response.update( + client.agents.functions.with_raw_response.update( path_function_uuid="function_uuid", path_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_function_uuid` but received ''"): - client.doagents.functions.with_raw_response.update( + client.agents.functions.with_raw_response.update( path_function_uuid="", path_agent_uuid="agent_uuid", ) @@ -150,7 +150,7 @@ def test_path_params_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_delete(self, client: GradientAI) -> None: - function = client.doagents.functions.delete( + function = client.agents.functions.delete( function_uuid="function_uuid", agent_uuid="agent_uuid", ) @@ -159,7 +159,7 @@ def test_method_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_delete(self, client: GradientAI) -> None: - response = client.doagents.functions.with_raw_response.delete( + response = client.agents.functions.with_raw_response.delete( function_uuid="function_uuid", agent_uuid="agent_uuid", ) @@ -172,7 +172,7 @@ def test_raw_response_delete(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_delete(self, client: GradientAI) -> None: - with client.doagents.functions.with_streaming_response.delete( + with client.agents.functions.with_streaming_response.delete( function_uuid="function_uuid", agent_uuid="agent_uuid", ) as response: @@ -188,13 +188,13 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: @parametrize def test_path_params_delete(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - client.doagents.functions.with_raw_response.delete( + client.agents.functions.with_raw_response.delete( function_uuid="function_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `function_uuid` but received ''"): - client.doagents.functions.with_raw_response.delete( + client.agents.functions.with_raw_response.delete( function_uuid="", agent_uuid="agent_uuid", ) @@ -206,7 +206,7 @@ class TestAsyncFunctions: @pytest.mark.skip() @parametrize async def test_method_create(self, async_client: AsyncGradientAI) -> None: - function = await async_client.doagents.functions.create( + function = await async_client.agents.functions.create( path_agent_uuid="agent_uuid", ) assert_matches_type(FunctionCreateResponse, function, path=["response"]) @@ -214,7 +214,7 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: - function = await async_client.doagents.functions.create( + function = await async_client.agents.functions.create( path_agent_uuid="agent_uuid", body_agent_uuid="agent_uuid", description="description", @@ -229,7 +229,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: - response = await async_client.doagents.functions.with_raw_response.create( + response = await async_client.agents.functions.with_raw_response.create( path_agent_uuid="agent_uuid", ) @@ -241,7 +241,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: - async with async_client.doagents.functions.with_streaming_response.create( + async with async_client.agents.functions.with_streaming_response.create( path_agent_uuid="agent_uuid", ) as response: assert not response.is_closed @@ -256,14 +256,14 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_create(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - await async_client.doagents.functions.with_raw_response.create( + await async_client.agents.functions.with_raw_response.create( path_agent_uuid="", ) @pytest.mark.skip() @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: - function = await async_client.doagents.functions.update( + function = await async_client.agents.functions.update( path_function_uuid="function_uuid", path_agent_uuid="agent_uuid", ) @@ -272,7 +272,7 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: - function = await async_client.doagents.functions.update( + function = await async_client.agents.functions.update( path_function_uuid="function_uuid", path_agent_uuid="agent_uuid", body_agent_uuid="agent_uuid", @@ -289,7 +289,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: - response = await async_client.doagents.functions.with_raw_response.update( + response = await async_client.agents.functions.with_raw_response.update( path_function_uuid="function_uuid", path_agent_uuid="agent_uuid", ) @@ -302,7 +302,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: - async with async_client.doagents.functions.with_streaming_response.update( + async with async_client.agents.functions.with_streaming_response.update( path_function_uuid="function_uuid", path_agent_uuid="agent_uuid", ) as response: @@ -318,13 +318,13 @@ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): - await async_client.doagents.functions.with_raw_response.update( + await async_client.agents.functions.with_raw_response.update( path_function_uuid="function_uuid", path_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_function_uuid` but received ''"): - await async_client.doagents.functions.with_raw_response.update( + await async_client.agents.functions.with_raw_response.update( path_function_uuid="", path_agent_uuid="agent_uuid", ) @@ -332,7 +332,7 @@ async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_delete(self, async_client: AsyncGradientAI) -> None: - function = await async_client.doagents.functions.delete( + function = await async_client.agents.functions.delete( function_uuid="function_uuid", agent_uuid="agent_uuid", ) @@ -341,7 +341,7 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: - response = await async_client.doagents.functions.with_raw_response.delete( + response = await async_client.agents.functions.with_raw_response.delete( function_uuid="function_uuid", agent_uuid="agent_uuid", ) @@ -354,7 +354,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: - async with async_client.doagents.functions.with_streaming_response.delete( + async with async_client.agents.functions.with_streaming_response.delete( function_uuid="function_uuid", agent_uuid="agent_uuid", ) as response: @@ -370,13 +370,13 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - await async_client.doagents.functions.with_raw_response.delete( + await async_client.agents.functions.with_raw_response.delete( function_uuid="function_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `function_uuid` but received ''"): - await async_client.doagents.functions.with_raw_response.delete( + await async_client.agents.functions.with_raw_response.delete( function_uuid="", agent_uuid="agent_uuid", ) diff --git a/tests/api_resources/doagents/test_knowledge_bases.py b/tests/api_resources/agents/test_knowledge_bases.py similarity index 82% rename from tests/api_resources/doagents/test_knowledge_bases.py rename to tests/api_resources/agents/test_knowledge_bases.py index f077caaa..dff80a9a 100644 --- a/tests/api_resources/doagents/test_knowledge_bases.py +++ b/tests/api_resources/agents/test_knowledge_bases.py @@ -9,7 +9,7 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.doagents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse +from gradientai.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -20,7 +20,7 @@ class TestKnowledgeBases: @pytest.mark.skip() @parametrize def test_method_attach(self, client: GradientAI) -> None: - knowledge_base = client.doagents.knowledge_bases.attach( + knowledge_base = client.agents.knowledge_bases.attach( "agent_uuid", ) assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) @@ -28,7 +28,7 @@ def test_method_attach(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_attach(self, client: GradientAI) -> None: - response = client.doagents.knowledge_bases.with_raw_response.attach( + response = client.agents.knowledge_bases.with_raw_response.attach( "agent_uuid", ) @@ -40,7 +40,7 @@ def test_raw_response_attach(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_attach(self, client: GradientAI) -> None: - with client.doagents.knowledge_bases.with_streaming_response.attach( + with client.agents.knowledge_bases.with_streaming_response.attach( "agent_uuid", ) as response: assert not response.is_closed @@ -55,14 +55,14 @@ def test_streaming_response_attach(self, client: GradientAI) -> None: @parametrize def test_path_params_attach(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - client.doagents.knowledge_bases.with_raw_response.attach( + client.agents.knowledge_bases.with_raw_response.attach( "", ) @pytest.mark.skip() @parametrize def test_method_attach_single(self, client: GradientAI) -> None: - knowledge_base = client.doagents.knowledge_bases.attach_single( + knowledge_base = client.agents.knowledge_bases.attach_single( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) @@ -71,7 +71,7 @@ def test_method_attach_single(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_attach_single(self, client: GradientAI) -> None: - response = client.doagents.knowledge_bases.with_raw_response.attach_single( + response = client.agents.knowledge_bases.with_raw_response.attach_single( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) @@ -84,7 +84,7 @@ def test_raw_response_attach_single(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_attach_single(self, client: GradientAI) -> None: - with client.doagents.knowledge_bases.with_streaming_response.attach_single( + with client.agents.knowledge_bases.with_streaming_response.attach_single( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) as response: @@ -100,13 +100,13 @@ def test_streaming_response_attach_single(self, client: GradientAI) -> None: @parametrize def test_path_params_attach_single(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - client.doagents.knowledge_bases.with_raw_response.attach_single( + client.agents.knowledge_bases.with_raw_response.attach_single( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): - client.doagents.knowledge_bases.with_raw_response.attach_single( + client.agents.knowledge_bases.with_raw_response.attach_single( knowledge_base_uuid="", agent_uuid="agent_uuid", ) @@ -114,7 +114,7 @@ def test_path_params_attach_single(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_detach(self, client: GradientAI) -> None: - knowledge_base = client.doagents.knowledge_bases.detach( + knowledge_base = client.agents.knowledge_bases.detach( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) @@ -123,7 +123,7 @@ def test_method_detach(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_detach(self, client: GradientAI) -> None: - response = client.doagents.knowledge_bases.with_raw_response.detach( + response = client.agents.knowledge_bases.with_raw_response.detach( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) @@ -136,7 +136,7 @@ def test_raw_response_detach(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_detach(self, client: GradientAI) -> None: - with client.doagents.knowledge_bases.with_streaming_response.detach( + with client.agents.knowledge_bases.with_streaming_response.detach( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) as response: @@ -152,13 +152,13 @@ def test_streaming_response_detach(self, client: GradientAI) -> None: @parametrize def test_path_params_detach(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - client.doagents.knowledge_bases.with_raw_response.detach( + client.agents.knowledge_bases.with_raw_response.detach( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): - client.doagents.knowledge_bases.with_raw_response.detach( + client.agents.knowledge_bases.with_raw_response.detach( knowledge_base_uuid="", agent_uuid="agent_uuid", ) @@ -170,7 +170,7 @@ class TestAsyncKnowledgeBases: @pytest.mark.skip() @parametrize async def test_method_attach(self, async_client: AsyncGradientAI) -> None: - knowledge_base = await async_client.doagents.knowledge_bases.attach( + knowledge_base = await async_client.agents.knowledge_bases.attach( "agent_uuid", ) assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) @@ -178,7 +178,7 @@ async def test_method_attach(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_raw_response_attach(self, async_client: AsyncGradientAI) -> None: - response = await async_client.doagents.knowledge_bases.with_raw_response.attach( + response = await async_client.agents.knowledge_bases.with_raw_response.attach( "agent_uuid", ) @@ -190,7 +190,7 @@ async def test_raw_response_attach(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_attach(self, async_client: AsyncGradientAI) -> None: - async with async_client.doagents.knowledge_bases.with_streaming_response.attach( + async with async_client.agents.knowledge_bases.with_streaming_response.attach( "agent_uuid", ) as response: assert not response.is_closed @@ -205,14 +205,14 @@ async def test_streaming_response_attach(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_attach(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - await async_client.doagents.knowledge_bases.with_raw_response.attach( + await async_client.agents.knowledge_bases.with_raw_response.attach( "", ) @pytest.mark.skip() @parametrize async def test_method_attach_single(self, async_client: AsyncGradientAI) -> None: - knowledge_base = await async_client.doagents.knowledge_bases.attach_single( + knowledge_base = await async_client.agents.knowledge_bases.attach_single( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) @@ -221,7 +221,7 @@ async def test_method_attach_single(self, async_client: AsyncGradientAI) -> None @pytest.mark.skip() @parametrize async def test_raw_response_attach_single(self, async_client: AsyncGradientAI) -> None: - response = await async_client.doagents.knowledge_bases.with_raw_response.attach_single( + response = await async_client.agents.knowledge_bases.with_raw_response.attach_single( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) @@ -234,7 +234,7 @@ async def test_raw_response_attach_single(self, async_client: AsyncGradientAI) - @pytest.mark.skip() @parametrize async def test_streaming_response_attach_single(self, async_client: AsyncGradientAI) -> None: - async with async_client.doagents.knowledge_bases.with_streaming_response.attach_single( + async with async_client.agents.knowledge_bases.with_streaming_response.attach_single( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) as response: @@ -250,13 +250,13 @@ async def test_streaming_response_attach_single(self, async_client: AsyncGradien @parametrize async def test_path_params_attach_single(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - await async_client.doagents.knowledge_bases.with_raw_response.attach_single( + await async_client.agents.knowledge_bases.with_raw_response.attach_single( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): - await async_client.doagents.knowledge_bases.with_raw_response.attach_single( + await async_client.agents.knowledge_bases.with_raw_response.attach_single( knowledge_base_uuid="", agent_uuid="agent_uuid", ) @@ -264,7 +264,7 @@ async def test_path_params_attach_single(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize async def test_method_detach(self, async_client: AsyncGradientAI) -> None: - knowledge_base = await async_client.doagents.knowledge_bases.detach( + knowledge_base = await async_client.agents.knowledge_bases.detach( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) @@ -273,7 +273,7 @@ async def test_method_detach(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_raw_response_detach(self, async_client: AsyncGradientAI) -> None: - response = await async_client.doagents.knowledge_bases.with_raw_response.detach( + response = await async_client.agents.knowledge_bases.with_raw_response.detach( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) @@ -286,7 +286,7 @@ async def test_raw_response_detach(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_detach(self, async_client: AsyncGradientAI) -> None: - async with async_client.doagents.knowledge_bases.with_streaming_response.detach( + async with async_client.agents.knowledge_bases.with_streaming_response.detach( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="agent_uuid", ) as response: @@ -302,13 +302,13 @@ async def test_streaming_response_detach(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_detach(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): - await async_client.doagents.knowledge_bases.with_raw_response.detach( + await async_client.agents.knowledge_bases.with_raw_response.detach( knowledge_base_uuid="knowledge_base_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): - await async_client.doagents.knowledge_bases.with_raw_response.detach( + await async_client.agents.knowledge_bases.with_raw_response.detach( knowledge_base_uuid="", agent_uuid="agent_uuid", ) diff --git a/tests/api_resources/doagents/test_versions.py b/tests/api_resources/agents/test_versions.py similarity index 84% rename from tests/api_resources/doagents/test_versions.py rename to tests/api_resources/agents/test_versions.py index ec5e293d..77fee4c6 100644 --- a/tests/api_resources/doagents/test_versions.py +++ b/tests/api_resources/agents/test_versions.py @@ -9,7 +9,7 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.doagents import ( +from gradientai.types.agents import ( VersionListResponse, VersionUpdateResponse, ) @@ -23,7 +23,7 @@ class TestVersions: @pytest.mark.skip() @parametrize def test_method_update(self, client: GradientAI) -> None: - version = client.doagents.versions.update( + version = client.agents.versions.update( path_uuid="uuid", ) assert_matches_type(VersionUpdateResponse, version, path=["response"]) @@ -31,7 +31,7 @@ def test_method_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: - version = client.doagents.versions.update( + version = client.agents.versions.update( path_uuid="uuid", body_uuid="uuid", version_hash="version_hash", @@ -41,7 +41,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_update(self, client: GradientAI) -> None: - response = client.doagents.versions.with_raw_response.update( + response = client.agents.versions.with_raw_response.update( path_uuid="uuid", ) @@ -53,7 +53,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: - with client.doagents.versions.with_streaming_response.update( + with client.agents.versions.with_streaming_response.update( path_uuid="uuid", ) as response: assert not response.is_closed @@ -68,14 +68,14 @@ def test_streaming_response_update(self, client: GradientAI) -> None: @parametrize def test_path_params_update(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - client.doagents.versions.with_raw_response.update( + client.agents.versions.with_raw_response.update( path_uuid="", ) @pytest.mark.skip() @parametrize def test_method_list(self, client: GradientAI) -> None: - version = client.doagents.versions.list( + version = client.agents.versions.list( uuid="uuid", ) assert_matches_type(VersionListResponse, version, path=["response"]) @@ -83,7 +83,7 @@ def test_method_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_list_with_all_params(self, client: GradientAI) -> None: - version = client.doagents.versions.list( + version = client.agents.versions.list( uuid="uuid", page=0, per_page=0, @@ -93,7 +93,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_list(self, client: GradientAI) -> None: - response = client.doagents.versions.with_raw_response.list( + response = client.agents.versions.with_raw_response.list( uuid="uuid", ) @@ -105,7 +105,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_list(self, client: GradientAI) -> None: - with client.doagents.versions.with_streaming_response.list( + with client.agents.versions.with_streaming_response.list( uuid="uuid", ) as response: assert not response.is_closed @@ -120,7 +120,7 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @parametrize def test_path_params_list(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.doagents.versions.with_raw_response.list( + client.agents.versions.with_raw_response.list( uuid="", ) @@ -131,7 +131,7 @@ class TestAsyncVersions: @pytest.mark.skip() @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: - version = await async_client.doagents.versions.update( + version = await async_client.agents.versions.update( path_uuid="uuid", ) assert_matches_type(VersionUpdateResponse, version, path=["response"]) @@ -139,7 +139,7 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: - version = await async_client.doagents.versions.update( + version = await async_client.agents.versions.update( path_uuid="uuid", body_uuid="uuid", version_hash="version_hash", @@ -149,7 +149,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: - response = await async_client.doagents.versions.with_raw_response.update( + response = await async_client.agents.versions.with_raw_response.update( path_uuid="uuid", ) @@ -161,7 +161,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: - async with async_client.doagents.versions.with_streaming_response.update( + async with async_client.agents.versions.with_streaming_response.update( path_uuid="uuid", ) as response: assert not response.is_closed @@ -176,14 +176,14 @@ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - await async_client.doagents.versions.with_raw_response.update( + await async_client.agents.versions.with_raw_response.update( path_uuid="", ) @pytest.mark.skip() @parametrize async def test_method_list(self, async_client: AsyncGradientAI) -> None: - version = await async_client.doagents.versions.list( + version = await async_client.agents.versions.list( uuid="uuid", ) assert_matches_type(VersionListResponse, version, path=["response"]) @@ -191,7 +191,7 @@ async def test_method_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: - version = await async_client.doagents.versions.list( + version = await async_client.agents.versions.list( uuid="uuid", page=0, per_page=0, @@ -201,7 +201,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: - response = await async_client.doagents.versions.with_raw_response.list( + response = await async_client.agents.versions.with_raw_response.list( uuid="uuid", ) @@ -213,7 +213,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: - async with async_client.doagents.versions.with_streaming_response.list( + async with async_client.agents.versions.with_streaming_response.list( uuid="uuid", ) as response: assert not response.is_closed @@ -228,6 +228,6 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @parametrize async def test_path_params_list(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.doagents.versions.with_raw_response.list( + await async_client.agents.versions.with_raw_response.list( uuid="", ) diff --git a/tests/api_resources/test_doagents.py b/tests/api_resources/test_agents.py similarity index 66% rename from tests/api_resources/test_doagents.py rename to tests/api_resources/test_agents.py index 9a8c5c91..f39ac4d5 100644 --- a/tests/api_resources/test_doagents.py +++ b/tests/api_resources/test_agents.py @@ -10,30 +10,30 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type from gradientai.types import ( - DoagentListResponse, - DoagentCreateResponse, - DoagentDeleteResponse, - DoagentUpdateResponse, - DoagentRetrieveResponse, - DoagentUpdateStatusResponse, + AgentListResponse, + AgentCreateResponse, + AgentDeleteResponse, + AgentUpdateResponse, + AgentRetrieveResponse, + AgentUpdateStatusResponse, ) base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") -class TestDoagents: +class TestAgents: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) @pytest.mark.skip() @parametrize def test_method_create(self, client: GradientAI) -> None: - doagent = client.doagents.create() - assert_matches_type(DoagentCreateResponse, doagent, path=["response"]) + agent = client.agents.create() + assert_matches_type(AgentCreateResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: - doagent = client.doagents.create( + agent = client.agents.create( anthropic_key_uuid="anthropic_key_uuid", description="description", instruction="instruction", @@ -45,61 +45,61 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: region="region", tags=["string"], ) - assert_matches_type(DoagentCreateResponse, doagent, path=["response"]) + assert_matches_type(AgentCreateResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize def test_raw_response_create(self, client: GradientAI) -> None: - response = client.doagents.with_raw_response.create() + response = client.agents.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - doagent = response.parse() - assert_matches_type(DoagentCreateResponse, doagent, path=["response"]) + agent = response.parse() + assert_matches_type(AgentCreateResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize def test_streaming_response_create(self, client: GradientAI) -> None: - with client.doagents.with_streaming_response.create() as response: + with client.agents.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - doagent = response.parse() - assert_matches_type(DoagentCreateResponse, doagent, path=["response"]) + agent = response.parse() + assert_matches_type(AgentCreateResponse, agent, path=["response"]) assert cast(Any, response.is_closed) is True @pytest.mark.skip() @parametrize def test_method_retrieve(self, client: GradientAI) -> None: - doagent = client.doagents.retrieve( + agent = client.agents.retrieve( "uuid", ) - assert_matches_type(DoagentRetrieveResponse, doagent, path=["response"]) + assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize def test_raw_response_retrieve(self, client: GradientAI) -> None: - response = client.doagents.with_raw_response.retrieve( + response = client.agents.with_raw_response.retrieve( "uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - doagent = response.parse() - assert_matches_type(DoagentRetrieveResponse, doagent, path=["response"]) + agent = response.parse() + assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize def test_streaming_response_retrieve(self, client: GradientAI) -> None: - with client.doagents.with_streaming_response.retrieve( + with client.agents.with_streaming_response.retrieve( "uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - doagent = response.parse() - assert_matches_type(DoagentRetrieveResponse, doagent, path=["response"]) + agent = response.parse() + assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) assert cast(Any, response.is_closed) is True @@ -107,22 +107,22 @@ def test_streaming_response_retrieve(self, client: GradientAI) -> None: @parametrize def test_path_params_retrieve(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.doagents.with_raw_response.retrieve( + client.agents.with_raw_response.retrieve( "", ) @pytest.mark.skip() @parametrize def test_method_update(self, client: GradientAI) -> None: - doagent = client.doagents.update( + agent = client.agents.update( path_uuid="uuid", ) - assert_matches_type(DoagentUpdateResponse, doagent, path=["response"]) + assert_matches_type(AgentUpdateResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: - doagent = client.doagents.update( + agent = client.agents.update( path_uuid="uuid", anthropic_key_uuid="anthropic_key_uuid", description="description", @@ -140,31 +140,31 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: top_p=0, body_uuid="uuid", ) - assert_matches_type(DoagentUpdateResponse, doagent, path=["response"]) + assert_matches_type(AgentUpdateResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize def test_raw_response_update(self, client: GradientAI) -> None: - response = client.doagents.with_raw_response.update( + response = client.agents.with_raw_response.update( path_uuid="uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - doagent = response.parse() - assert_matches_type(DoagentUpdateResponse, doagent, path=["response"]) + agent = response.parse() + assert_matches_type(AgentUpdateResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: - with client.doagents.with_streaming_response.update( + with client.agents.with_streaming_response.update( path_uuid="uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - doagent = response.parse() - assert_matches_type(DoagentUpdateResponse, doagent, path=["response"]) + agent = response.parse() + assert_matches_type(AgentUpdateResponse, agent, path=["response"]) assert cast(Any, response.is_closed) is True @@ -172,79 +172,79 @@ def test_streaming_response_update(self, client: GradientAI) -> None: @parametrize def test_path_params_update(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - client.doagents.with_raw_response.update( + client.agents.with_raw_response.update( path_uuid="", ) @pytest.mark.skip() @parametrize def test_method_list(self, client: GradientAI) -> None: - doagent = client.doagents.list() - assert_matches_type(DoagentListResponse, doagent, path=["response"]) + agent = client.agents.list() + assert_matches_type(AgentListResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize def test_method_list_with_all_params(self, client: GradientAI) -> None: - doagent = client.doagents.list( + agent = client.agents.list( only_deployed=True, page=0, per_page=0, ) - assert_matches_type(DoagentListResponse, doagent, path=["response"]) + assert_matches_type(AgentListResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize def test_raw_response_list(self, client: GradientAI) -> None: - response = client.doagents.with_raw_response.list() + response = client.agents.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - doagent = response.parse() - assert_matches_type(DoagentListResponse, doagent, path=["response"]) + agent = response.parse() + assert_matches_type(AgentListResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize def test_streaming_response_list(self, client: GradientAI) -> None: - with client.doagents.with_streaming_response.list() as response: + with client.agents.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - doagent = response.parse() - assert_matches_type(DoagentListResponse, doagent, path=["response"]) + agent = response.parse() + assert_matches_type(AgentListResponse, agent, path=["response"]) assert cast(Any, response.is_closed) is True @pytest.mark.skip() @parametrize def test_method_delete(self, client: GradientAI) -> None: - doagent = client.doagents.delete( + agent = client.agents.delete( "uuid", ) - assert_matches_type(DoagentDeleteResponse, doagent, path=["response"]) + assert_matches_type(AgentDeleteResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize def test_raw_response_delete(self, client: GradientAI) -> None: - response = client.doagents.with_raw_response.delete( + response = client.agents.with_raw_response.delete( "uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - doagent = response.parse() - assert_matches_type(DoagentDeleteResponse, doagent, path=["response"]) + agent = response.parse() + assert_matches_type(AgentDeleteResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize def test_streaming_response_delete(self, client: GradientAI) -> None: - with client.doagents.with_streaming_response.delete( + with client.agents.with_streaming_response.delete( "uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - doagent = response.parse() - assert_matches_type(DoagentDeleteResponse, doagent, path=["response"]) + agent = response.parse() + assert_matches_type(AgentDeleteResponse, agent, path=["response"]) assert cast(Any, response.is_closed) is True @@ -252,51 +252,51 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: @parametrize def test_path_params_delete(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.doagents.with_raw_response.delete( + client.agents.with_raw_response.delete( "", ) @pytest.mark.skip() @parametrize def test_method_update_status(self, client: GradientAI) -> None: - doagent = client.doagents.update_status( + agent = client.agents.update_status( path_uuid="uuid", ) - assert_matches_type(DoagentUpdateStatusResponse, doagent, path=["response"]) + assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize def test_method_update_status_with_all_params(self, client: GradientAI) -> None: - doagent = client.doagents.update_status( + agent = client.agents.update_status( path_uuid="uuid", body_uuid="uuid", visibility="VISIBILITY_UNKNOWN", ) - assert_matches_type(DoagentUpdateStatusResponse, doagent, path=["response"]) + assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize def test_raw_response_update_status(self, client: GradientAI) -> None: - response = client.doagents.with_raw_response.update_status( + response = client.agents.with_raw_response.update_status( path_uuid="uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - doagent = response.parse() - assert_matches_type(DoagentUpdateStatusResponse, doagent, path=["response"]) + agent = response.parse() + assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize def test_streaming_response_update_status(self, client: GradientAI) -> None: - with client.doagents.with_streaming_response.update_status( + with client.agents.with_streaming_response.update_status( path_uuid="uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - doagent = response.parse() - assert_matches_type(DoagentUpdateStatusResponse, doagent, path=["response"]) + agent = response.parse() + assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) assert cast(Any, response.is_closed) is True @@ -304,24 +304,24 @@ def test_streaming_response_update_status(self, client: GradientAI) -> None: @parametrize def test_path_params_update_status(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - client.doagents.with_raw_response.update_status( + client.agents.with_raw_response.update_status( path_uuid="", ) -class TestAsyncDoagents: +class TestAsyncAgents: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @pytest.mark.skip() @parametrize async def test_method_create(self, async_client: AsyncGradientAI) -> None: - doagent = await async_client.doagents.create() - assert_matches_type(DoagentCreateResponse, doagent, path=["response"]) + agent = await async_client.agents.create() + assert_matches_type(AgentCreateResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: - doagent = await async_client.doagents.create( + agent = await async_client.agents.create( anthropic_key_uuid="anthropic_key_uuid", description="description", instruction="instruction", @@ -333,61 +333,61 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI region="region", tags=["string"], ) - assert_matches_type(DoagentCreateResponse, doagent, path=["response"]) + assert_matches_type(AgentCreateResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: - response = await async_client.doagents.with_raw_response.create() + response = await async_client.agents.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - doagent = await response.parse() - assert_matches_type(DoagentCreateResponse, doagent, path=["response"]) + agent = await response.parse() + assert_matches_type(AgentCreateResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: - async with async_client.doagents.with_streaming_response.create() as response: + async with async_client.agents.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - doagent = await response.parse() - assert_matches_type(DoagentCreateResponse, doagent, path=["response"]) + agent = await response.parse() + assert_matches_type(AgentCreateResponse, agent, path=["response"]) assert cast(Any, response.is_closed) is True @pytest.mark.skip() @parametrize async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: - doagent = await async_client.doagents.retrieve( + agent = await async_client.agents.retrieve( "uuid", ) - assert_matches_type(DoagentRetrieveResponse, doagent, path=["response"]) + assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: - response = await async_client.doagents.with_raw_response.retrieve( + response = await async_client.agents.with_raw_response.retrieve( "uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - doagent = await response.parse() - assert_matches_type(DoagentRetrieveResponse, doagent, path=["response"]) + agent = await response.parse() + assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: - async with async_client.doagents.with_streaming_response.retrieve( + async with async_client.agents.with_streaming_response.retrieve( "uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - doagent = await response.parse() - assert_matches_type(DoagentRetrieveResponse, doagent, path=["response"]) + agent = await response.parse() + assert_matches_type(AgentRetrieveResponse, agent, path=["response"]) assert cast(Any, response.is_closed) is True @@ -395,22 +395,22 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) @parametrize async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.doagents.with_raw_response.retrieve( + await async_client.agents.with_raw_response.retrieve( "", ) @pytest.mark.skip() @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: - doagent = await async_client.doagents.update( + agent = await async_client.agents.update( path_uuid="uuid", ) - assert_matches_type(DoagentUpdateResponse, doagent, path=["response"]) + assert_matches_type(AgentUpdateResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: - doagent = await async_client.doagents.update( + agent = await async_client.agents.update( path_uuid="uuid", anthropic_key_uuid="anthropic_key_uuid", description="description", @@ -428,31 +428,31 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI top_p=0, body_uuid="uuid", ) - assert_matches_type(DoagentUpdateResponse, doagent, path=["response"]) + assert_matches_type(AgentUpdateResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: - response = await async_client.doagents.with_raw_response.update( + response = await async_client.agents.with_raw_response.update( path_uuid="uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - doagent = await response.parse() - assert_matches_type(DoagentUpdateResponse, doagent, path=["response"]) + agent = await response.parse() + assert_matches_type(AgentUpdateResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: - async with async_client.doagents.with_streaming_response.update( + async with async_client.agents.with_streaming_response.update( path_uuid="uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - doagent = await response.parse() - assert_matches_type(DoagentUpdateResponse, doagent, path=["response"]) + agent = await response.parse() + assert_matches_type(AgentUpdateResponse, agent, path=["response"]) assert cast(Any, response.is_closed) is True @@ -460,79 +460,79 @@ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - await async_client.doagents.with_raw_response.update( + await async_client.agents.with_raw_response.update( path_uuid="", ) @pytest.mark.skip() @parametrize async def test_method_list(self, async_client: AsyncGradientAI) -> None: - doagent = await async_client.doagents.list() - assert_matches_type(DoagentListResponse, doagent, path=["response"]) + agent = await async_client.agents.list() + assert_matches_type(AgentListResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: - doagent = await async_client.doagents.list( + agent = await async_client.agents.list( only_deployed=True, page=0, per_page=0, ) - assert_matches_type(DoagentListResponse, doagent, path=["response"]) + assert_matches_type(AgentListResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: - response = await async_client.doagents.with_raw_response.list() + response = await async_client.agents.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - doagent = await response.parse() - assert_matches_type(DoagentListResponse, doagent, path=["response"]) + agent = await response.parse() + assert_matches_type(AgentListResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: - async with async_client.doagents.with_streaming_response.list() as response: + async with async_client.agents.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - doagent = await response.parse() - assert_matches_type(DoagentListResponse, doagent, path=["response"]) + agent = await response.parse() + assert_matches_type(AgentListResponse, agent, path=["response"]) assert cast(Any, response.is_closed) is True @pytest.mark.skip() @parametrize async def test_method_delete(self, async_client: AsyncGradientAI) -> None: - doagent = await async_client.doagents.delete( + agent = await async_client.agents.delete( "uuid", ) - assert_matches_type(DoagentDeleteResponse, doagent, path=["response"]) + assert_matches_type(AgentDeleteResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: - response = await async_client.doagents.with_raw_response.delete( + response = await async_client.agents.with_raw_response.delete( "uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - doagent = await response.parse() - assert_matches_type(DoagentDeleteResponse, doagent, path=["response"]) + agent = await response.parse() + assert_matches_type(AgentDeleteResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: - async with async_client.doagents.with_streaming_response.delete( + async with async_client.agents.with_streaming_response.delete( "uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - doagent = await response.parse() - assert_matches_type(DoagentDeleteResponse, doagent, path=["response"]) + agent = await response.parse() + assert_matches_type(AgentDeleteResponse, agent, path=["response"]) assert cast(Any, response.is_closed) is True @@ -540,51 +540,51 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> @parametrize async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.doagents.with_raw_response.delete( + await async_client.agents.with_raw_response.delete( "", ) @pytest.mark.skip() @parametrize async def test_method_update_status(self, async_client: AsyncGradientAI) -> None: - doagent = await async_client.doagents.update_status( + agent = await async_client.agents.update_status( path_uuid="uuid", ) - assert_matches_type(DoagentUpdateStatusResponse, doagent, path=["response"]) + assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_update_status_with_all_params(self, async_client: AsyncGradientAI) -> None: - doagent = await async_client.doagents.update_status( + agent = await async_client.agents.update_status( path_uuid="uuid", body_uuid="uuid", visibility="VISIBILITY_UNKNOWN", ) - assert_matches_type(DoagentUpdateStatusResponse, doagent, path=["response"]) + assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize async def test_raw_response_update_status(self, async_client: AsyncGradientAI) -> None: - response = await async_client.doagents.with_raw_response.update_status( + response = await async_client.agents.with_raw_response.update_status( path_uuid="uuid", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" - doagent = await response.parse() - assert_matches_type(DoagentUpdateStatusResponse, doagent, path=["response"]) + agent = await response.parse() + assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) @pytest.mark.skip() @parametrize async def test_streaming_response_update_status(self, async_client: AsyncGradientAI) -> None: - async with async_client.doagents.with_streaming_response.update_status( + async with async_client.agents.with_streaming_response.update_status( path_uuid="uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" - doagent = await response.parse() - assert_matches_type(DoagentUpdateStatusResponse, doagent, path=["response"]) + agent = await response.parse() + assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) assert cast(Any, response.is_closed) is True @@ -592,6 +592,6 @@ async def test_streaming_response_update_status(self, async_client: AsyncGradien @parametrize async def test_path_params_update_status(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - await async_client.doagents.with_raw_response.update_status( + await async_client.agents.with_raw_response.update_status( path_uuid="", ) diff --git a/tests/test_client.py b/tests/test_client.py index 4a26cbd0..d83082e3 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -724,7 +724,7 @@ def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, clien respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=httpx.TimeoutException("Test timeout error")) with pytest.raises(APITimeoutError): - client.doagents.versions.with_streaming_response.list(uuid="uuid").__enter__() + client.agents.versions.with_streaming_response.list(uuid="uuid").__enter__() assert _get_open_connections(self.client) == 0 @@ -734,7 +734,7 @@ def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, client respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(return_value=httpx.Response(500)) with pytest.raises(APIStatusError): - client.doagents.versions.with_streaming_response.list(uuid="uuid").__enter__() + client.agents.versions.with_streaming_response.list(uuid="uuid").__enter__() assert _get_open_connections(self.client) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) @@ -763,7 +763,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=retry_handler) - response = client.doagents.versions.with_raw_response.list(uuid="uuid") + response = client.agents.versions.with_raw_response.list(uuid="uuid") assert response.retries_taken == failures_before_success assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success @@ -787,7 +787,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=retry_handler) - response = client.doagents.versions.with_raw_response.list( + response = client.agents.versions.with_raw_response.list( uuid="uuid", extra_headers={"x-stainless-retry-count": Omit()} ) @@ -812,7 +812,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=retry_handler) - response = client.doagents.versions.with_raw_response.list( + response = client.agents.versions.with_raw_response.list( uuid="uuid", extra_headers={"x-stainless-retry-count": "42"} ) @@ -1544,7 +1544,7 @@ async def test_retrying_timeout_errors_doesnt_leak( respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=httpx.TimeoutException("Test timeout error")) with pytest.raises(APITimeoutError): - await async_client.doagents.versions.with_streaming_response.list(uuid="uuid").__aenter__() + await async_client.agents.versions.with_streaming_response.list(uuid="uuid").__aenter__() assert _get_open_connections(self.client) == 0 @@ -1556,7 +1556,7 @@ async def test_retrying_status_errors_doesnt_leak( respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(return_value=httpx.Response(500)) with pytest.raises(APIStatusError): - await async_client.doagents.versions.with_streaming_response.list(uuid="uuid").__aenter__() + await async_client.agents.versions.with_streaming_response.list(uuid="uuid").__aenter__() assert _get_open_connections(self.client) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) @@ -1586,7 +1586,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=retry_handler) - response = await client.doagents.versions.with_raw_response.list(uuid="uuid") + response = await client.agents.versions.with_raw_response.list(uuid="uuid") assert response.retries_taken == failures_before_success assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success @@ -1611,7 +1611,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=retry_handler) - response = await client.doagents.versions.with_raw_response.list( + response = await client.agents.versions.with_raw_response.list( uuid="uuid", extra_headers={"x-stainless-retry-count": Omit()} ) @@ -1637,7 +1637,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=retry_handler) - response = await client.doagents.versions.with_raw_response.list( + response = await client.agents.versions.with_raw_response.list( uuid="uuid", extra_headers={"x-stainless-retry-count": "42"} ) From 10b79fb1d51bcff6ed0d18e5ccd18fd1cd75af9f Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 21 Jun 2025 02:22:54 +0000 Subject: [PATCH 28/41] chore(internal): codegen related update --- .github/workflows/ci.yml | 6 +++--- .stats.yml | 2 +- scripts/utils/upload-artifact.sh | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 08bd7a02..6bfd00b1 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -16,7 +16,7 @@ jobs: lint: timeout-minutes: 10 name: lint - runs-on: ${{ github.repository == 'stainless-sdks/digitalocean-genai-sdk-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} + runs-on: ${{ github.repository == 'stainless-sdks/gradientai-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} steps: - uses: actions/checkout@v4 @@ -35,7 +35,7 @@ jobs: run: ./scripts/lint upload: - if: github.repository == 'stainless-sdks/digitalocean-genai-sdk-python' + if: github.repository == 'stainless-sdks/gradientai-python' timeout-minutes: 10 name: upload permissions: @@ -61,7 +61,7 @@ jobs: test: timeout-minutes: 10 name: test - runs-on: ${{ github.repository == 'stainless-sdks/digitalocean-genai-sdk-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} + runs-on: ${{ github.repository == 'stainless-sdks/gradientai-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} steps: - uses: actions/checkout@v4 diff --git a/.stats.yml b/.stats.yml index 8f85d58c..c2144164 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 58 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fdigitalocean-genai-sdk-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 config_hash: a2c4c23eb1e8655fbfb2b6930ce0fd46 diff --git a/scripts/utils/upload-artifact.sh b/scripts/utils/upload-artifact.sh index c1019559..eb717c71 100755 --- a/scripts/utils/upload-artifact.sh +++ b/scripts/utils/upload-artifact.sh @@ -18,7 +18,7 @@ UPLOAD_RESPONSE=$(tar -cz . | curl -v -X PUT \ if echo "$UPLOAD_RESPONSE" | grep -q "HTTP/[0-9.]* 200"; then echo -e "\033[32mUploaded build to Stainless storage.\033[0m" - echo -e "\033[32mInstallation: pip install --pre 'https://pkg.stainless.com/s/digitalocean-genai-sdk-python/$SHA'\033[0m" + echo -e "\033[32mInstallation: pip install --pre 'https://pkg.stainless.com/s/gradientai-python/$SHA'\033[0m" else echo -e "\033[31mFailed to upload artifact.\033[0m" exit 1 From 494afde754f735d1ba95011fc83d23d2410fcfdd Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 21 Jun 2025 04:15:38 +0000 Subject: [PATCH 29/41] feat(client): add support for aiohttp --- README.md | 36 ++++++++++++++++ pyproject.toml | 2 + requirements-dev.lock | 27 ++++++++++++ requirements.lock | 27 ++++++++++++ src/gradientai/__init__.py | 3 +- src/gradientai/_base_client.py | 22 ++++++++++ tests/api_resources/agents/test_api_keys.py | 4 +- .../api_resources/agents/test_child_agents.py | 4 +- tests/api_resources/agents/test_functions.py | 4 +- .../agents/test_knowledge_bases.py | 4 +- tests/api_resources/agents/test_versions.py | 4 +- tests/api_resources/chat/test_completions.py | 4 +- .../api_resources/inference/test_api_keys.py | 4 +- tests/api_resources/inference/test_models.py | 4 +- .../knowledge_bases/test_data_sources.py | 4 +- .../providers/anthropic/test_keys.py | 4 +- .../providers/openai/test_keys.py | 4 +- tests/api_resources/test_agents.py | 4 +- tests/api_resources/test_indexing_jobs.py | 4 +- tests/api_resources/test_knowledge_bases.py | 4 +- tests/api_resources/test_models.py | 4 +- tests/api_resources/test_regions.py | 4 +- tests/conftest.py | 43 ++++++++++++++++--- 23 files changed, 201 insertions(+), 23 deletions(-) diff --git a/README.md b/README.md index 36edcfbd..efae1613 100644 --- a/README.md +++ b/README.md @@ -72,6 +72,42 @@ asyncio.run(main()) Functionality between the synchronous and asynchronous clients is otherwise identical. +### With aiohttp + +By default, the async client uses `httpx` for HTTP requests. However, for improved concurrency performance you may also use `aiohttp` as the HTTP backend. + +You can enable this by installing `aiohttp`: + +```sh +# install from PyPI +pip install --pre c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python[aiohttp] +``` + +Then you can enable it by instantiating the client with `http_client=DefaultAioHttpClient()`: + +```python +import os +import asyncio +from gradientai import DefaultAioHttpClient +from gradientai import AsyncGradientAI + + +async def main() -> None: + async with AsyncGradientAI( + api_key=os.environ.get( + "DIGITALOCEAN_GENAI_SDK_API_KEY" + ), # This is the default and can be omitted + http_client=DefaultAioHttpClient(), + ) as client: + versions = await client.agents.versions.list( + uuid="REPLACE_ME", + ) + print(versions.agent_versions) + + +asyncio.run(main()) +``` + ## Using types Nested request parameters are [TypedDicts](https://docs.python.org/3/library/typing.html#typing.TypedDict). Responses are [Pydantic models](https://docs.pydantic.dev) which also provide helper methods for things like: diff --git a/pyproject.toml b/pyproject.toml index 8f36a952..22cad738 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -37,6 +37,8 @@ classifiers = [ Homepage = "https://github.com/digitalocean/genai-python" Repository = "https://github.com/digitalocean/genai-python" +[project.optional-dependencies] +aiohttp = ["aiohttp", "httpx_aiohttp>=0.1.6"] [tool.rye] managed = true diff --git a/requirements-dev.lock b/requirements-dev.lock index 1e074a56..85b6a829 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -10,6 +10,13 @@ # universal: false -e file:. +aiohappyeyeballs==2.6.1 + # via aiohttp +aiohttp==3.12.8 + # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python + # via httpx-aiohttp +aiosignal==1.3.2 + # via aiohttp annotated-types==0.6.0 # via pydantic anyio==4.4.0 @@ -17,6 +24,10 @@ anyio==4.4.0 # via httpx argcomplete==3.1.2 # via nox +async-timeout==5.0.1 + # via aiohttp +attrs==25.3.0 + # via aiohttp certifi==2023.7.22 # via httpcore # via httpx @@ -34,16 +45,23 @@ execnet==2.1.1 # via pytest-xdist filelock==3.12.4 # via virtualenv +frozenlist==1.6.2 + # via aiohttp + # via aiosignal h11==0.14.0 # via httpcore httpcore==1.0.2 # via httpx httpx==0.28.1 # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python + # via httpx-aiohttp # via respx +httpx-aiohttp==0.1.6 + # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python idna==3.4 # via anyio # via httpx + # via yarl importlib-metadata==7.0.0 iniconfig==2.0.0 # via pytest @@ -51,6 +69,9 @@ markdown-it-py==3.0.0 # via rich mdurl==0.1.2 # via markdown-it-py +multidict==6.4.4 + # via aiohttp + # via yarl mypy==1.14.1 mypy-extensions==1.0.0 # via mypy @@ -65,6 +86,9 @@ platformdirs==3.11.0 # via virtualenv pluggy==1.5.0 # via pytest +propcache==0.3.1 + # via aiohttp + # via yarl pydantic==2.10.3 # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python pydantic-core==2.27.1 @@ -98,11 +122,14 @@ tomli==2.0.2 typing-extensions==4.12.2 # via anyio # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python + # via multidict # via mypy # via pydantic # via pydantic-core # via pyright virtualenv==20.24.5 # via nox +yarl==1.20.0 + # via aiohttp zipp==3.17.0 # via importlib-metadata diff --git a/requirements.lock b/requirements.lock index dab2f6ce..47944bd5 100644 --- a/requirements.lock +++ b/requirements.lock @@ -10,11 +10,22 @@ # universal: false -e file:. +aiohappyeyeballs==2.6.1 + # via aiohttp +aiohttp==3.12.8 + # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python + # via httpx-aiohttp +aiosignal==1.3.2 + # via aiohttp annotated-types==0.6.0 # via pydantic anyio==4.4.0 # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python # via httpx +async-timeout==5.0.1 + # via aiohttp +attrs==25.3.0 + # via aiohttp certifi==2023.7.22 # via httpcore # via httpx @@ -22,15 +33,28 @@ distro==1.8.0 # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python exceptiongroup==1.2.2 # via anyio +frozenlist==1.6.2 + # via aiohttp + # via aiosignal h11==0.14.0 # via httpcore httpcore==1.0.2 # via httpx httpx==0.28.1 # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python + # via httpx-aiohttp +httpx-aiohttp==0.1.6 + # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python idna==3.4 # via anyio # via httpx + # via yarl +multidict==6.4.4 + # via aiohttp + # via yarl +propcache==0.3.1 + # via aiohttp + # via yarl pydantic==2.10.3 # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python pydantic-core==2.27.1 @@ -41,5 +65,8 @@ sniffio==1.3.0 typing-extensions==4.12.2 # via anyio # via c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python + # via multidict # via pydantic # via pydantic-core +yarl==1.20.0 + # via aiohttp diff --git a/src/gradientai/__init__.py b/src/gradientai/__init__.py index e0f0a60b..3316fe47 100644 --- a/src/gradientai/__init__.py +++ b/src/gradientai/__init__.py @@ -36,7 +36,7 @@ UnprocessableEntityError, APIResponseValidationError, ) -from ._base_client import DefaultHttpxClient, DefaultAsyncHttpxClient +from ._base_client import DefaultHttpxClient, DefaultAioHttpClient, DefaultAsyncHttpxClient from ._utils._logs import setup_logging as _setup_logging __all__ = [ @@ -78,6 +78,7 @@ "DEFAULT_CONNECTION_LIMITS", "DefaultHttpxClient", "DefaultAsyncHttpxClient", + "DefaultAioHttpClient", ] if not _t.TYPE_CHECKING: diff --git a/src/gradientai/_base_client.py b/src/gradientai/_base_client.py index aa3b35f1..6dce600b 100644 --- a/src/gradientai/_base_client.py +++ b/src/gradientai/_base_client.py @@ -1289,6 +1289,24 @@ def __init__(self, **kwargs: Any) -> None: super().__init__(**kwargs) +try: + import httpx_aiohttp +except ImportError: + + class _DefaultAioHttpClient(httpx.AsyncClient): + def __init__(self, **_kwargs: Any) -> None: + raise RuntimeError("To use the aiohttp client you must have installed the package with the `aiohttp` extra") +else: + + class _DefaultAioHttpClient(httpx_aiohttp.HttpxAiohttpClient): # type: ignore + def __init__(self, **kwargs: Any) -> None: + kwargs.setdefault("timeout", DEFAULT_TIMEOUT) + kwargs.setdefault("limits", DEFAULT_CONNECTION_LIMITS) + kwargs.setdefault("follow_redirects", True) + + super().__init__(**kwargs) + + if TYPE_CHECKING: DefaultAsyncHttpxClient = httpx.AsyncClient """An alias to `httpx.AsyncClient` that provides the same defaults that this SDK @@ -1297,8 +1315,12 @@ def __init__(self, **kwargs: Any) -> None: This is useful because overriding the `http_client` with your own instance of `httpx.AsyncClient` will result in httpx's defaults being used, not ours. """ + + DefaultAioHttpClient = httpx.AsyncClient + """An alias to `httpx.AsyncClient` that changes the default HTTP transport to `aiohttp`.""" else: DefaultAsyncHttpxClient = _DefaultAsyncHttpxClient + DefaultAioHttpClient = _DefaultAioHttpClient class AsyncHttpxClientWrapper(DefaultAsyncHttpxClient): diff --git a/tests/api_resources/agents/test_api_keys.py b/tests/api_resources/agents/test_api_keys.py index e8489258..beb9666a 100644 --- a/tests/api_resources/agents/test_api_keys.py +++ b/tests/api_resources/agents/test_api_keys.py @@ -297,7 +297,9 @@ def test_path_params_regenerate(self, client: GradientAI) -> None: class TestAsyncAPIKeys: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/agents/test_child_agents.py b/tests/api_resources/agents/test_child_agents.py index 14af3b93..daa7b10e 100644 --- a/tests/api_resources/agents/test_child_agents.py +++ b/tests/api_resources/agents/test_child_agents.py @@ -253,7 +253,9 @@ def test_path_params_view(self, client: GradientAI) -> None: class TestAsyncChildAgents: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/agents/test_functions.py b/tests/api_resources/agents/test_functions.py index bfb05fa6..5a3693cb 100644 --- a/tests/api_resources/agents/test_functions.py +++ b/tests/api_resources/agents/test_functions.py @@ -201,7 +201,9 @@ def test_path_params_delete(self, client: GradientAI) -> None: class TestAsyncFunctions: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/agents/test_knowledge_bases.py b/tests/api_resources/agents/test_knowledge_bases.py index dff80a9a..e62c05ff 100644 --- a/tests/api_resources/agents/test_knowledge_bases.py +++ b/tests/api_resources/agents/test_knowledge_bases.py @@ -165,7 +165,9 @@ def test_path_params_detach(self, client: GradientAI) -> None: class TestAsyncKnowledgeBases: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/agents/test_versions.py b/tests/api_resources/agents/test_versions.py index 77fee4c6..79f73672 100644 --- a/tests/api_resources/agents/test_versions.py +++ b/tests/api_resources/agents/test_versions.py @@ -126,7 +126,9 @@ def test_path_params_list(self, client: GradientAI) -> None: class TestAsyncVersions: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index 17319d86..b4c09579 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -100,7 +100,9 @@ def test_streaming_response_create(self, client: GradientAI) -> None: class TestAsyncCompletions: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/inference/test_api_keys.py b/tests/api_resources/inference/test_api_keys.py index d84572c7..90bf95b9 100644 --- a/tests/api_resources/inference/test_api_keys.py +++ b/tests/api_resources/inference/test_api_keys.py @@ -234,7 +234,9 @@ def test_path_params_update_regenerate(self, client: GradientAI) -> None: class TestAsyncAPIKeys: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/inference/test_models.py b/tests/api_resources/inference/test_models.py index 936801cb..569345ed 100644 --- a/tests/api_resources/inference/test_models.py +++ b/tests/api_resources/inference/test_models.py @@ -89,7 +89,9 @@ def test_streaming_response_list(self, client: GradientAI) -> None: class TestAsyncModels: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/knowledge_bases/test_data_sources.py b/tests/api_resources/knowledge_bases/test_data_sources.py index ce9c390e..9c466e2f 100644 --- a/tests/api_resources/knowledge_bases/test_data_sources.py +++ b/tests/api_resources/knowledge_bases/test_data_sources.py @@ -197,7 +197,9 @@ def test_path_params_delete(self, client: GradientAI) -> None: class TestAsyncDataSources: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/providers/anthropic/test_keys.py b/tests/api_resources/providers/anthropic/test_keys.py index fab973bf..86ec19f4 100644 --- a/tests/api_resources/providers/anthropic/test_keys.py +++ b/tests/api_resources/providers/anthropic/test_keys.py @@ -289,7 +289,9 @@ def test_path_params_list_agents(self, client: GradientAI) -> None: class TestAsyncKeys: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/providers/openai/test_keys.py b/tests/api_resources/providers/openai/test_keys.py index 1bb270b1..ce5cb4f5 100644 --- a/tests/api_resources/providers/openai/test_keys.py +++ b/tests/api_resources/providers/openai/test_keys.py @@ -289,7 +289,9 @@ def test_path_params_retrieve_agents(self, client: GradientAI) -> None: class TestAsyncKeys: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/test_agents.py b/tests/api_resources/test_agents.py index f39ac4d5..2cc0e080 100644 --- a/tests/api_resources/test_agents.py +++ b/tests/api_resources/test_agents.py @@ -310,7 +310,9 @@ def test_path_params_update_status(self, client: GradientAI) -> None: class TestAsyncAgents: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/test_indexing_jobs.py b/tests/api_resources/test_indexing_jobs.py index d44a75ae..6a50d9b5 100644 --- a/tests/api_resources/test_indexing_jobs.py +++ b/tests/api_resources/test_indexing_jobs.py @@ -234,7 +234,9 @@ def test_path_params_update_cancel(self, client: GradientAI) -> None: class TestAsyncIndexingJobs: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/test_knowledge_bases.py b/tests/api_resources/test_knowledge_bases.py index c9171644..508820ce 100644 --- a/tests/api_resources/test_knowledge_bases.py +++ b/tests/api_resources/test_knowledge_bases.py @@ -273,7 +273,9 @@ def test_path_params_delete(self, client: GradientAI) -> None: class TestAsyncKnowledgeBases: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/test_models.py b/tests/api_resources/test_models.py index 946b2eb9..5e119f71 100644 --- a/tests/api_resources/test_models.py +++ b/tests/api_resources/test_models.py @@ -58,7 +58,9 @@ def test_streaming_response_list(self, client: GradientAI) -> None: class TestAsyncModels: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/api_resources/test_regions.py b/tests/api_resources/test_regions.py index 64c84612..8e25617f 100644 --- a/tests/api_resources/test_regions.py +++ b/tests/api_resources/test_regions.py @@ -56,7 +56,9 @@ def test_streaming_response_list(self, client: GradientAI) -> None: class TestAsyncRegions: - parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) @pytest.mark.skip() @parametrize diff --git a/tests/conftest.py b/tests/conftest.py index 8432d29e..23079a7e 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -6,10 +6,12 @@ import logging from typing import TYPE_CHECKING, Iterator, AsyncIterator +import httpx import pytest from pytest_asyncio import is_async_test -from gradientai import GradientAI, AsyncGradientAI +from gradientai import GradientAI, AsyncGradientAI, DefaultAioHttpClient +from gradientai._utils import is_dict if TYPE_CHECKING: from _pytest.fixtures import FixtureRequest # pyright: ignore[reportPrivateImportUsage] @@ -27,6 +29,19 @@ def pytest_collection_modifyitems(items: list[pytest.Function]) -> None: for async_test in pytest_asyncio_tests: async_test.add_marker(session_scope_marker, append=False) + # We skip tests that use both the aiohttp client and respx_mock as respx_mock + # doesn't support custom transports. + for item in items: + if "async_client" not in item.fixturenames or "respx_mock" not in item.fixturenames: + continue + + if not hasattr(item, "callspec"): + continue + + async_client_param = item.callspec.params.get("async_client") + if is_dict(async_client_param) and async_client_param.get("http_client") == "aiohttp": + item.add_marker(pytest.mark.skip(reason="aiohttp client is not compatible with respx_mock")) + base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -45,9 +60,25 @@ def client(request: FixtureRequest) -> Iterator[GradientAI]: @pytest.fixture(scope="session") async def async_client(request: FixtureRequest) -> AsyncIterator[AsyncGradientAI]: - strict = getattr(request, "param", True) - if not isinstance(strict, bool): - raise TypeError(f"Unexpected fixture parameter type {type(strict)}, expected {bool}") - - async with AsyncGradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=strict) as client: + param = getattr(request, "param", True) + + # defaults + strict = True + http_client: None | httpx.AsyncClient = None + + if isinstance(param, bool): + strict = param + elif is_dict(param): + strict = param.get("strict", True) + assert isinstance(strict, bool) + + http_client_type = param.get("http_client", "httpx") + if http_client_type == "aiohttp": + http_client = DefaultAioHttpClient() + else: + raise TypeError(f"Unexpected fixture parameter type {type(param)}, expected bool or dict") + + async with AsyncGradientAI( + base_url=base_url, api_key=api_key, _strict_response_validation=strict, http_client=http_client + ) as client: yield client From 359c8d88cec1d60f0beb810b5a0139443d0a3348 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 21 Jun 2025 15:29:45 +0000 Subject: [PATCH 30/41] feat(api): update via SDK Studio --- .stats.yml | 2 +- README.md | 8 ++++---- src/gradientai/_client.py | 12 ++++++------ tests/test_client.py | 4 ++-- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/.stats.yml b/.stats.yml index c2144164..17f19856 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 58 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 -config_hash: a2c4c23eb1e8655fbfb2b6930ce0fd46 +config_hash: 48e21c88c078b1d478257b2da0c840b2 diff --git a/README.md b/README.md index efae1613..546252a1 100644 --- a/README.md +++ b/README.md @@ -29,7 +29,7 @@ from gradientai import GradientAI client = GradientAI( api_key=os.environ.get( - "DIGITALOCEAN_GENAI_SDK_API_KEY" + "DIGITALOCEAN_GRADIENTAI_API_KEY" ), # This is the default and can be omitted ) @@ -41,7 +41,7 @@ print(versions.agent_versions) While you can provide an `api_key` keyword argument, we recommend using [python-dotenv](https://pypi.org/project/python-dotenv/) -to add `DIGITALOCEAN_GENAI_SDK_API_KEY="My API Key"` to your `.env` file +to add `DIGITALOCEAN_GRADIENTAI_API_KEY="My API Key"` to your `.env` file so that your API Key is not stored in source control. ## Async usage @@ -55,7 +55,7 @@ from gradientai import AsyncGradientAI client = AsyncGradientAI( api_key=os.environ.get( - "DIGITALOCEAN_GENAI_SDK_API_KEY" + "DIGITALOCEAN_GRADIENTAI_API_KEY" ), # This is the default and can be omitted ) @@ -95,7 +95,7 @@ from gradientai import AsyncGradientAI async def main() -> None: async with AsyncGradientAI( api_key=os.environ.get( - "DIGITALOCEAN_GENAI_SDK_API_KEY" + "DIGITALOCEAN_GRADIENTAI_API_KEY" ), # This is the default and can be omitted http_client=DefaultAioHttpClient(), ) as client: diff --git a/src/gradientai/_client.py b/src/gradientai/_client.py index 0a5eb9a1..f83fb8a7 100644 --- a/src/gradientai/_client.py +++ b/src/gradientai/_client.py @@ -82,13 +82,13 @@ def __init__( ) -> None: """Construct a new synchronous GradientAI client instance. - This automatically infers the `api_key` argument from the `DIGITALOCEAN_GENAI_SDK_API_KEY` environment variable if it is not provided. + This automatically infers the `api_key` argument from the `DIGITALOCEAN_GRADIENTAI_API_KEY` environment variable if it is not provided. """ if api_key is None: - api_key = os.environ.get("DIGITALOCEAN_GENAI_SDK_API_KEY") + api_key = os.environ.get("DIGITALOCEAN_GRADIENTAI_API_KEY") if api_key is None: raise GradientAIError( - "The api_key client option must be set either by passing api_key to the client or by setting the DIGITALOCEAN_GENAI_SDK_API_KEY environment variable" + "The api_key client option must be set either by passing api_key to the client or by setting the DIGITALOCEAN_GRADIENTAI_API_KEY environment variable" ) self.api_key = api_key @@ -301,13 +301,13 @@ def __init__( ) -> None: """Construct a new async AsyncGradientAI client instance. - This automatically infers the `api_key` argument from the `DIGITALOCEAN_GENAI_SDK_API_KEY` environment variable if it is not provided. + This automatically infers the `api_key` argument from the `DIGITALOCEAN_GRADIENTAI_API_KEY` environment variable if it is not provided. """ if api_key is None: - api_key = os.environ.get("DIGITALOCEAN_GENAI_SDK_API_KEY") + api_key = os.environ.get("DIGITALOCEAN_GRADIENTAI_API_KEY") if api_key is None: raise GradientAIError( - "The api_key client option must be set either by passing api_key to the client or by setting the DIGITALOCEAN_GENAI_SDK_API_KEY environment variable" + "The api_key client option must be set either by passing api_key to the client or by setting the DIGITALOCEAN_GRADIENTAI_API_KEY environment variable" ) self.api_key = api_key diff --git a/tests/test_client.py b/tests/test_client.py index d83082e3..f80be1ea 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -341,7 +341,7 @@ def test_validate_headers(self) -> None: assert request.headers.get("Authorization") == f"Bearer {api_key}" with pytest.raises(GradientAIError): - with update_env(**{"DIGITALOCEAN_GENAI_SDK_API_KEY": Omit()}): + with update_env(**{"DIGITALOCEAN_GRADIENTAI_API_KEY": Omit()}): client2 = GradientAI(base_url=base_url, api_key=None, _strict_response_validation=True) _ = client2 @@ -1153,7 +1153,7 @@ def test_validate_headers(self) -> None: assert request.headers.get("Authorization") == f"Bearer {api_key}" with pytest.raises(GradientAIError): - with update_env(**{"DIGITALOCEAN_GENAI_SDK_API_KEY": Omit()}): + with update_env(**{"DIGITALOCEAN_GRADIENTAI_API_KEY": Omit()}): client2 = AsyncGradientAI(base_url=base_url, api_key=None, _strict_response_validation=True) _ = client2 From c33920aba0dc1f3b8f4f890ce706c86fd452dd6b Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 21 Jun 2025 15:30:36 +0000 Subject: [PATCH 31/41] feat(api): update via SDK Studio --- .github/workflows/create-releases.yml | 38 +++++++++++++++++++ .github/workflows/publish-pypi.yml | 8 +--- .github/workflows/release-doctor.yml | 3 +- .stats.yml | 2 +- CONTRIBUTING.md | 4 +- README.md | 6 +-- bin/check-release-environment | 4 ++ pyproject.toml | 6 +-- src/gradientai/resources/agents/agents.py | 8 ++-- src/gradientai/resources/agents/api_keys.py | 8 ++-- .../resources/agents/child_agents.py | 8 ++-- src/gradientai/resources/agents/functions.py | 8 ++-- .../resources/agents/knowledge_bases.py | 8 ++-- src/gradientai/resources/agents/versions.py | 8 ++-- src/gradientai/resources/chat/chat.py | 8 ++-- src/gradientai/resources/chat/completions.py | 8 ++-- src/gradientai/resources/indexing_jobs.py | 8 ++-- .../resources/inference/api_keys.py | 8 ++-- .../resources/inference/inference.py | 8 ++-- src/gradientai/resources/inference/models.py | 8 ++-- .../resources/knowledge_bases/data_sources.py | 8 ++-- .../knowledge_bases/knowledge_bases.py | 8 ++-- src/gradientai/resources/models.py | 8 ++-- .../providers/anthropic/anthropic.py | 8 ++-- .../resources/providers/anthropic/keys.py | 8 ++-- .../resources/providers/openai/keys.py | 8 ++-- .../resources/providers/openai/openai.py | 8 ++-- .../resources/providers/providers.py | 8 ++-- src/gradientai/resources/regions.py | 8 ++-- 29 files changed, 139 insertions(+), 100 deletions(-) create mode 100644 .github/workflows/create-releases.yml diff --git a/.github/workflows/create-releases.yml b/.github/workflows/create-releases.yml new file mode 100644 index 00000000..04dac49f --- /dev/null +++ b/.github/workflows/create-releases.yml @@ -0,0 +1,38 @@ +name: Create releases +on: + schedule: + - cron: '0 5 * * *' # every day at 5am UTC + push: + branches: + - main + +jobs: + release: + name: release + if: github.ref == 'refs/heads/main' && github.repository == 'digitalocean/gradientai-python' + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - uses: stainless-api/trigger-release-please@v1 + id: release + with: + repo: ${{ github.event.repository.full_name }} + stainless-api-key: ${{ secrets.STAINLESS_API_KEY }} + + - name: Install Rye + if: ${{ steps.release.outputs.releases_created }} + run: | + curl -sSf https://rye.astral.sh/get | bash + echo "$HOME/.rye/shims" >> $GITHUB_PATH + env: + RYE_VERSION: '0.44.0' + RYE_INSTALL_OPTION: '--yes' + + - name: Publish to PyPI + if: ${{ steps.release.outputs.releases_created }} + run: | + bash ./bin/publish-pypi + env: + PYPI_TOKEN: ${{ secrets.GRADIENT_AI_PYPI_TOKEN || secrets.PYPI_TOKEN }} diff --git a/.github/workflows/publish-pypi.yml b/.github/workflows/publish-pypi.yml index 34110cd4..bff3a970 100644 --- a/.github/workflows/publish-pypi.yml +++ b/.github/workflows/publish-pypi.yml @@ -1,13 +1,9 @@ -# This workflow is triggered when a GitHub release is created. -# It can also be run manually to re-publish to PyPI in case it failed for some reason. -# You can run this workflow by navigating to https://www.github.com/digitalocean/genai-python/actions/workflows/publish-pypi.yml +# workflow for re-running publishing to PyPI in case it fails for some reason +# you can run this workflow by navigating to https://www.github.com/digitalocean/gradientai-python/actions/workflows/publish-pypi.yml name: Publish PyPI on: workflow_dispatch: - release: - types: [published] - jobs: publish: name: publish diff --git a/.github/workflows/release-doctor.yml b/.github/workflows/release-doctor.yml index 9845ae8d..94e02117 100644 --- a/.github/workflows/release-doctor.yml +++ b/.github/workflows/release-doctor.yml @@ -9,7 +9,7 @@ jobs: release_doctor: name: release doctor runs-on: ubuntu-latest - if: github.repository == 'digitalocean/genai-python' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch' || startsWith(github.head_ref, 'release-please') || github.head_ref == 'next') + if: github.repository == 'digitalocean/gradientai-python' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch' || startsWith(github.head_ref, 'release-please') || github.head_ref == 'next') steps: - uses: actions/checkout@v4 @@ -18,4 +18,5 @@ jobs: run: | bash ./bin/check-release-environment env: + STAINLESS_API_KEY: ${{ secrets.STAINLESS_API_KEY }} PYPI_TOKEN: ${{ secrets.GRADIENT_AI_PYPI_TOKEN || secrets.PYPI_TOKEN }} diff --git a/.stats.yml b/.stats.yml index 17f19856..9e73986b 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 58 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 -config_hash: 48e21c88c078b1d478257b2da0c840b2 +config_hash: bae6be3845572f2dadf83c0aad336142 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index fe7e0d7c..086907ef 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -62,7 +62,7 @@ If you’d like to use the repository from source, you can either install from g To install via git: ```sh -$ pip install git+ssh://git@github.com/digitalocean/genai-python.git +$ pip install git+ssh://git@github.com/digitalocean/gradientai-python.git ``` Alternatively, you can build from source and install the wheel file: @@ -120,7 +120,7 @@ the changes aren't made through the automated pipeline, you may want to make rel ### Publish with a GitHub workflow -You can release to package managers by using [the `Publish PyPI` GitHub action](https://www.github.com/digitalocean/genai-python/actions/workflows/publish-pypi.yml). This requires a setup organization or repository secret to be set up. +You can release to package managers by using [the `Publish PyPI` GitHub action](https://www.github.com/digitalocean/gradientai-python/actions/workflows/publish-pypi.yml). This requires a setup organization or repository secret to be set up. ### Publish manually diff --git a/README.md b/README.md index 546252a1..3bba3198 100644 --- a/README.md +++ b/README.md @@ -271,9 +271,9 @@ version = response.parse() # get the object that `agents.versions.list()` would print(version.agent_versions) ``` -These methods return an [`APIResponse`](https://github.com/digitalocean/genai-python/tree/main/src/gradientai/_response.py) object. +These methods return an [`APIResponse`](https://github.com/digitalocean/gradientai-python/tree/main/src/gradientai/_response.py) object. -The async client returns an [`AsyncAPIResponse`](https://github.com/digitalocean/genai-python/tree/main/src/gradientai/_response.py) with the same structure, the only difference being `await`able methods for reading the response content. +The async client returns an [`AsyncAPIResponse`](https://github.com/digitalocean/gradientai-python/tree/main/src/gradientai/_response.py) with the same structure, the only difference being `await`able methods for reading the response content. #### `.with_streaming_response` @@ -379,7 +379,7 @@ This package generally follows [SemVer](https://semver.org/spec/v2.0.0.html) con We take backwards-compatibility seriously and work hard to ensure you can rely on a smooth upgrade experience. -We are keen for your feedback; please open an [issue](https://www.github.com/digitalocean/genai-python/issues) with questions, bugs, or suggestions. +We are keen for your feedback; please open an [issue](https://www.github.com/digitalocean/gradientai-python/issues) with questions, bugs, or suggestions. ### Determining the installed version diff --git a/bin/check-release-environment b/bin/check-release-environment index b1bd8969..78967e8b 100644 --- a/bin/check-release-environment +++ b/bin/check-release-environment @@ -2,6 +2,10 @@ errors=() +if [ -z "${STAINLESS_API_KEY}" ]; then + errors+=("The STAINLESS_API_KEY secret has not been set. Please contact Stainless for an API key & set it in your organization secrets on GitHub.") +fi + if [ -z "${PYPI_TOKEN}" ]; then errors+=("The GRADIENT_AI_PYPI_TOKEN secret has not been set. Please set it in either this repository's secrets or your organization secrets.") fi diff --git a/pyproject.toml b/pyproject.toml index 22cad738..1c89346a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -34,8 +34,8 @@ classifiers = [ ] [project.urls] -Homepage = "https://github.com/digitalocean/genai-python" -Repository = "https://github.com/digitalocean/genai-python" +Homepage = "https://github.com/digitalocean/gradientai-python" +Repository = "https://github.com/digitalocean/gradientai-python" [project.optional-dependencies] aiohttp = ["aiohttp", "httpx_aiohttp>=0.1.6"] @@ -124,7 +124,7 @@ path = "README.md" [[tool.hatch.metadata.hooks.fancy-pypi-readme.substitutions]] # replace relative links with absolute links pattern = '\[(.+?)\]\(((?!https?://)\S+?)\)' -replacement = '[\1](https://github.com/digitalocean/genai-python/tree/main/\g<2>)' +replacement = '[\1](https://github.com/digitalocean/gradientai-python/tree/main/\g<2>)' [tool.pytest.ini_options] testpaths = ["tests"] diff --git a/src/gradientai/resources/agents/agents.py b/src/gradientai/resources/agents/agents.py index 78439d33..63f0c4d4 100644 --- a/src/gradientai/resources/agents/agents.py +++ b/src/gradientai/resources/agents/agents.py @@ -104,7 +104,7 @@ def with_raw_response(self) -> AgentsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AgentsResourceWithRawResponse(self) @@ -113,7 +113,7 @@ def with_streaming_response(self) -> AgentsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AgentsResourceWithStreamingResponse(self) @@ -472,7 +472,7 @@ def with_raw_response(self) -> AsyncAgentsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncAgentsResourceWithRawResponse(self) @@ -481,7 +481,7 @@ def with_streaming_response(self) -> AsyncAgentsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncAgentsResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/agents/api_keys.py b/src/gradientai/resources/agents/api_keys.py index 155e3adc..1cf2278e 100644 --- a/src/gradientai/resources/agents/api_keys.py +++ b/src/gradientai/resources/agents/api_keys.py @@ -32,7 +32,7 @@ def with_raw_response(self) -> APIKeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return APIKeysResourceWithRawResponse(self) @@ -41,7 +41,7 @@ def with_streaming_response(self) -> APIKeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return APIKeysResourceWithStreamingResponse(self) @@ -278,7 +278,7 @@ def with_raw_response(self) -> AsyncAPIKeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncAPIKeysResourceWithRawResponse(self) @@ -287,7 +287,7 @@ def with_streaming_response(self) -> AsyncAPIKeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncAPIKeysResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/agents/child_agents.py b/src/gradientai/resources/agents/child_agents.py index 9031d8ce..ad30f106 100644 --- a/src/gradientai/resources/agents/child_agents.py +++ b/src/gradientai/resources/agents/child_agents.py @@ -31,7 +31,7 @@ def with_raw_response(self) -> ChildAgentsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return ChildAgentsResourceWithRawResponse(self) @@ -40,7 +40,7 @@ def with_streaming_response(self) -> ChildAgentsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return ChildAgentsResourceWithStreamingResponse(self) @@ -245,7 +245,7 @@ def with_raw_response(self) -> AsyncChildAgentsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncChildAgentsResourceWithRawResponse(self) @@ -254,7 +254,7 @@ def with_streaming_response(self) -> AsyncChildAgentsResourceWithStreamingRespon """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncChildAgentsResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/agents/functions.py b/src/gradientai/resources/agents/functions.py index 67a811cc..8c5f3f49 100644 --- a/src/gradientai/resources/agents/functions.py +++ b/src/gradientai/resources/agents/functions.py @@ -30,7 +30,7 @@ def with_raw_response(self) -> FunctionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return FunctionsResourceWithRawResponse(self) @@ -39,7 +39,7 @@ def with_streaming_response(self) -> FunctionsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return FunctionsResourceWithStreamingResponse(self) @@ -205,7 +205,7 @@ def with_raw_response(self) -> AsyncFunctionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncFunctionsResourceWithRawResponse(self) @@ -214,7 +214,7 @@ def with_streaming_response(self) -> AsyncFunctionsResourceWithStreamingResponse """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncFunctionsResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/agents/knowledge_bases.py b/src/gradientai/resources/agents/knowledge_bases.py index 3b9b0cd2..a5486c34 100644 --- a/src/gradientai/resources/agents/knowledge_bases.py +++ b/src/gradientai/resources/agents/knowledge_bases.py @@ -27,7 +27,7 @@ def with_raw_response(self) -> KnowledgeBasesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return KnowledgeBasesResourceWithRawResponse(self) @@ -36,7 +36,7 @@ def with_streaming_response(self) -> KnowledgeBasesResourceWithStreamingResponse """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return KnowledgeBasesResourceWithStreamingResponse(self) @@ -166,7 +166,7 @@ def with_raw_response(self) -> AsyncKnowledgeBasesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncKnowledgeBasesResourceWithRawResponse(self) @@ -175,7 +175,7 @@ def with_streaming_response(self) -> AsyncKnowledgeBasesResourceWithStreamingRes """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncKnowledgeBasesResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/agents/versions.py b/src/gradientai/resources/agents/versions.py index 86dbf99f..65a35472 100644 --- a/src/gradientai/resources/agents/versions.py +++ b/src/gradientai/resources/agents/versions.py @@ -29,7 +29,7 @@ def with_raw_response(self) -> VersionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return VersionsResourceWithRawResponse(self) @@ -38,7 +38,7 @@ def with_streaming_response(self) -> VersionsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return VersionsResourceWithStreamingResponse(self) @@ -147,7 +147,7 @@ def with_raw_response(self) -> AsyncVersionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncVersionsResourceWithRawResponse(self) @@ -156,7 +156,7 @@ def with_streaming_response(self) -> AsyncVersionsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncVersionsResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/chat/chat.py b/src/gradientai/resources/chat/chat.py index ac19d849..6fa2925d 100644 --- a/src/gradientai/resources/chat/chat.py +++ b/src/gradientai/resources/chat/chat.py @@ -27,7 +27,7 @@ def with_raw_response(self) -> ChatResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return ChatResourceWithRawResponse(self) @@ -36,7 +36,7 @@ def with_streaming_response(self) -> ChatResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return ChatResourceWithStreamingResponse(self) @@ -52,7 +52,7 @@ def with_raw_response(self) -> AsyncChatResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncChatResourceWithRawResponse(self) @@ -61,7 +61,7 @@ def with_streaming_response(self) -> AsyncChatResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncChatResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/chat/completions.py b/src/gradientai/resources/chat/completions.py index 62ab8f0d..2d7c94c3 100644 --- a/src/gradientai/resources/chat/completions.py +++ b/src/gradientai/resources/chat/completions.py @@ -30,7 +30,7 @@ def with_raw_response(self) -> CompletionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return CompletionsResourceWithRawResponse(self) @@ -39,7 +39,7 @@ def with_streaming_response(self) -> CompletionsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return CompletionsResourceWithStreamingResponse(self) @@ -193,7 +193,7 @@ def with_raw_response(self) -> AsyncCompletionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncCompletionsResourceWithRawResponse(self) @@ -202,7 +202,7 @@ def with_streaming_response(self) -> AsyncCompletionsResourceWithStreamingRespon """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncCompletionsResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/indexing_jobs.py b/src/gradientai/resources/indexing_jobs.py index fcbcf43d..71c59023 100644 --- a/src/gradientai/resources/indexing_jobs.py +++ b/src/gradientai/resources/indexing_jobs.py @@ -34,7 +34,7 @@ def with_raw_response(self) -> IndexingJobsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return IndexingJobsResourceWithRawResponse(self) @@ -43,7 +43,7 @@ def with_streaming_response(self) -> IndexingJobsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return IndexingJobsResourceWithStreamingResponse(self) @@ -260,7 +260,7 @@ def with_raw_response(self) -> AsyncIndexingJobsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncIndexingJobsResourceWithRawResponse(self) @@ -269,7 +269,7 @@ def with_streaming_response(self) -> AsyncIndexingJobsResourceWithStreamingRespo """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncIndexingJobsResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/inference/api_keys.py b/src/gradientai/resources/inference/api_keys.py index c00212f8..6759d09c 100644 --- a/src/gradientai/resources/inference/api_keys.py +++ b/src/gradientai/resources/inference/api_keys.py @@ -32,7 +32,7 @@ def with_raw_response(self) -> APIKeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return APIKeysResourceWithRawResponse(self) @@ -41,7 +41,7 @@ def with_streaming_response(self) -> APIKeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return APIKeysResourceWithStreamingResponse(self) @@ -252,7 +252,7 @@ def with_raw_response(self) -> AsyncAPIKeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncAPIKeysResourceWithRawResponse(self) @@ -261,7 +261,7 @@ def with_streaming_response(self) -> AsyncAPIKeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncAPIKeysResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/inference/inference.py b/src/gradientai/resources/inference/inference.py index 325353dc..209d6f17 100644 --- a/src/gradientai/resources/inference/inference.py +++ b/src/gradientai/resources/inference/inference.py @@ -39,7 +39,7 @@ def with_raw_response(self) -> InferenceResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return InferenceResourceWithRawResponse(self) @@ -48,7 +48,7 @@ def with_streaming_response(self) -> InferenceResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return InferenceResourceWithStreamingResponse(self) @@ -68,7 +68,7 @@ def with_raw_response(self) -> AsyncInferenceResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncInferenceResourceWithRawResponse(self) @@ -77,7 +77,7 @@ def with_streaming_response(self) -> AsyncInferenceResourceWithStreamingResponse """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncInferenceResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/inference/models.py b/src/gradientai/resources/inference/models.py index da327695..42e1dcb2 100644 --- a/src/gradientai/resources/inference/models.py +++ b/src/gradientai/resources/inference/models.py @@ -27,7 +27,7 @@ def with_raw_response(self) -> ModelsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return ModelsResourceWithRawResponse(self) @@ -36,7 +36,7 @@ def with_streaming_response(self) -> ModelsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return ModelsResourceWithStreamingResponse(self) @@ -106,7 +106,7 @@ def with_raw_response(self) -> AsyncModelsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncModelsResourceWithRawResponse(self) @@ -115,7 +115,7 @@ def with_streaming_response(self) -> AsyncModelsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncModelsResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/knowledge_bases/data_sources.py b/src/gradientai/resources/knowledge_bases/data_sources.py index b549b3dc..bcd48b74 100644 --- a/src/gradientai/resources/knowledge_bases/data_sources.py +++ b/src/gradientai/resources/knowledge_bases/data_sources.py @@ -35,7 +35,7 @@ def with_raw_response(self) -> DataSourcesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return DataSourcesResourceWithRawResponse(self) @@ -44,7 +44,7 @@ def with_streaming_response(self) -> DataSourcesResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return DataSourcesResourceWithStreamingResponse(self) @@ -202,7 +202,7 @@ def with_raw_response(self) -> AsyncDataSourcesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncDataSourcesResourceWithRawResponse(self) @@ -211,7 +211,7 @@ def with_streaming_response(self) -> AsyncDataSourcesResourceWithStreamingRespon """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncDataSourcesResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/knowledge_bases/knowledge_bases.py b/src/gradientai/resources/knowledge_bases/knowledge_bases.py index cf0cd8d8..2cab4f7b 100644 --- a/src/gradientai/resources/knowledge_bases/knowledge_bases.py +++ b/src/gradientai/resources/knowledge_bases/knowledge_bases.py @@ -46,7 +46,7 @@ def with_raw_response(self) -> KnowledgeBasesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return KnowledgeBasesResourceWithRawResponse(self) @@ -55,7 +55,7 @@ def with_streaming_response(self) -> KnowledgeBasesResourceWithStreamingResponse """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return KnowledgeBasesResourceWithStreamingResponse(self) @@ -322,7 +322,7 @@ def with_raw_response(self) -> AsyncKnowledgeBasesResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncKnowledgeBasesResourceWithRawResponse(self) @@ -331,7 +331,7 @@ def with_streaming_response(self) -> AsyncKnowledgeBasesResourceWithStreamingRes """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncKnowledgeBasesResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/models.py b/src/gradientai/resources/models.py index 2c7b40ab..c8e78b9b 100644 --- a/src/gradientai/resources/models.py +++ b/src/gradientai/resources/models.py @@ -31,7 +31,7 @@ def with_raw_response(self) -> ModelsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return ModelsResourceWithRawResponse(self) @@ -40,7 +40,7 @@ def with_streaming_response(self) -> ModelsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return ModelsResourceWithStreamingResponse(self) @@ -128,7 +128,7 @@ def with_raw_response(self) -> AsyncModelsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncModelsResourceWithRawResponse(self) @@ -137,7 +137,7 @@ def with_streaming_response(self) -> AsyncModelsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncModelsResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/providers/anthropic/anthropic.py b/src/gradientai/resources/providers/anthropic/anthropic.py index 64783563..23a914e9 100644 --- a/src/gradientai/resources/providers/anthropic/anthropic.py +++ b/src/gradientai/resources/providers/anthropic/anthropic.py @@ -27,7 +27,7 @@ def with_raw_response(self) -> AnthropicResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AnthropicResourceWithRawResponse(self) @@ -36,7 +36,7 @@ def with_streaming_response(self) -> AnthropicResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AnthropicResourceWithStreamingResponse(self) @@ -52,7 +52,7 @@ def with_raw_response(self) -> AsyncAnthropicResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncAnthropicResourceWithRawResponse(self) @@ -61,7 +61,7 @@ def with_streaming_response(self) -> AsyncAnthropicResourceWithStreamingResponse """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncAnthropicResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/providers/anthropic/keys.py b/src/gradientai/resources/providers/anthropic/keys.py index 9c1f6391..d1a33290 100644 --- a/src/gradientai/resources/providers/anthropic/keys.py +++ b/src/gradientai/resources/providers/anthropic/keys.py @@ -33,7 +33,7 @@ def with_raw_response(self) -> KeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return KeysResourceWithRawResponse(self) @@ -42,7 +42,7 @@ def with_streaming_response(self) -> KeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return KeysResourceWithStreamingResponse(self) @@ -315,7 +315,7 @@ def with_raw_response(self) -> AsyncKeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncKeysResourceWithRawResponse(self) @@ -324,7 +324,7 @@ def with_streaming_response(self) -> AsyncKeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncKeysResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/providers/openai/keys.py b/src/gradientai/resources/providers/openai/keys.py index 9bfaba8e..01cfee75 100644 --- a/src/gradientai/resources/providers/openai/keys.py +++ b/src/gradientai/resources/providers/openai/keys.py @@ -33,7 +33,7 @@ def with_raw_response(self) -> KeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return KeysResourceWithRawResponse(self) @@ -42,7 +42,7 @@ def with_streaming_response(self) -> KeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return KeysResourceWithStreamingResponse(self) @@ -313,7 +313,7 @@ def with_raw_response(self) -> AsyncKeysResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncKeysResourceWithRawResponse(self) @@ -322,7 +322,7 @@ def with_streaming_response(self) -> AsyncKeysResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncKeysResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/providers/openai/openai.py b/src/gradientai/resources/providers/openai/openai.py index d29fd062..b02dc2e1 100644 --- a/src/gradientai/resources/providers/openai/openai.py +++ b/src/gradientai/resources/providers/openai/openai.py @@ -27,7 +27,7 @@ def with_raw_response(self) -> OpenAIResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return OpenAIResourceWithRawResponse(self) @@ -36,7 +36,7 @@ def with_streaming_response(self) -> OpenAIResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return OpenAIResourceWithStreamingResponse(self) @@ -52,7 +52,7 @@ def with_raw_response(self) -> AsyncOpenAIResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncOpenAIResourceWithRawResponse(self) @@ -61,7 +61,7 @@ def with_streaming_response(self) -> AsyncOpenAIResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncOpenAIResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/providers/providers.py b/src/gradientai/resources/providers/providers.py index 50e3db1a..ef942f73 100644 --- a/src/gradientai/resources/providers/providers.py +++ b/src/gradientai/resources/providers/providers.py @@ -39,7 +39,7 @@ def with_raw_response(self) -> ProvidersResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return ProvidersResourceWithRawResponse(self) @@ -48,7 +48,7 @@ def with_streaming_response(self) -> ProvidersResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return ProvidersResourceWithStreamingResponse(self) @@ -68,7 +68,7 @@ def with_raw_response(self) -> AsyncProvidersResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncProvidersResourceWithRawResponse(self) @@ -77,7 +77,7 @@ def with_streaming_response(self) -> AsyncProvidersResourceWithStreamingResponse """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncProvidersResourceWithStreamingResponse(self) diff --git a/src/gradientai/resources/regions.py b/src/gradientai/resources/regions.py index 43c2038b..4c50d9e6 100644 --- a/src/gradientai/resources/regions.py +++ b/src/gradientai/resources/regions.py @@ -28,7 +28,7 @@ def with_raw_response(self) -> RegionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return RegionsResourceWithRawResponse(self) @@ -37,7 +37,7 @@ def with_streaming_response(self) -> RegionsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return RegionsResourceWithStreamingResponse(self) @@ -97,7 +97,7 @@ def with_raw_response(self) -> AsyncRegionsResourceWithRawResponse: This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. - For more information, see https://www.github.com/digitalocean/genai-python#accessing-raw-response-data-eg-headers + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers """ return AsyncRegionsResourceWithRawResponse(self) @@ -106,7 +106,7 @@ def with_streaming_response(self) -> AsyncRegionsResourceWithStreamingResponse: """ An alternative to `.with_raw_response` that doesn't eagerly read the response body. - For more information, see https://www.github.com/digitalocean/genai-python#with_streaming_response + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response """ return AsyncRegionsResourceWithStreamingResponse(self) From 34382c06c5d61ac97572cb4977d020e1ede9d4ff Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 21 Jun 2025 15:31:27 +0000 Subject: [PATCH 32/41] feat(api): update via SDK Studio --- .stats.yml | 2 +- README.md | 14 ++++---------- src/gradientai/_client.py | 12 ++++++------ tests/test_client.py | 4 ++-- 4 files changed, 13 insertions(+), 19 deletions(-) diff --git a/.stats.yml b/.stats.yml index 9e73986b..34b3d279 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 58 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 -config_hash: bae6be3845572f2dadf83c0aad336142 +config_hash: a17cf79d9650def96874dbd8e2416faf diff --git a/README.md b/README.md index 3bba3198..bd72811f 100644 --- a/README.md +++ b/README.md @@ -28,9 +28,7 @@ import os from gradientai import GradientAI client = GradientAI( - api_key=os.environ.get( - "DIGITALOCEAN_GRADIENTAI_API_KEY" - ), # This is the default and can be omitted + api_key=os.environ.get("GRADIENTAI_API_KEY"), # This is the default and can be omitted ) versions = client.agents.versions.list( @@ -41,7 +39,7 @@ print(versions.agent_versions) While you can provide an `api_key` keyword argument, we recommend using [python-dotenv](https://pypi.org/project/python-dotenv/) -to add `DIGITALOCEAN_GRADIENTAI_API_KEY="My API Key"` to your `.env` file +to add `GRADIENTAI_API_KEY="My API Key"` to your `.env` file so that your API Key is not stored in source control. ## Async usage @@ -54,9 +52,7 @@ import asyncio from gradientai import AsyncGradientAI client = AsyncGradientAI( - api_key=os.environ.get( - "DIGITALOCEAN_GRADIENTAI_API_KEY" - ), # This is the default and can be omitted + api_key=os.environ.get("GRADIENTAI_API_KEY"), # This is the default and can be omitted ) @@ -94,9 +90,7 @@ from gradientai import AsyncGradientAI async def main() -> None: async with AsyncGradientAI( - api_key=os.environ.get( - "DIGITALOCEAN_GRADIENTAI_API_KEY" - ), # This is the default and can be omitted + api_key=os.environ.get("GRADIENTAI_API_KEY"), # This is the default and can be omitted http_client=DefaultAioHttpClient(), ) as client: versions = await client.agents.versions.list( diff --git a/src/gradientai/_client.py b/src/gradientai/_client.py index f83fb8a7..8710fe68 100644 --- a/src/gradientai/_client.py +++ b/src/gradientai/_client.py @@ -82,13 +82,13 @@ def __init__( ) -> None: """Construct a new synchronous GradientAI client instance. - This automatically infers the `api_key` argument from the `DIGITALOCEAN_GRADIENTAI_API_KEY` environment variable if it is not provided. + This automatically infers the `api_key` argument from the `GRADIENTAI_API_KEY` environment variable if it is not provided. """ if api_key is None: - api_key = os.environ.get("DIGITALOCEAN_GRADIENTAI_API_KEY") + api_key = os.environ.get("GRADIENTAI_API_KEY") if api_key is None: raise GradientAIError( - "The api_key client option must be set either by passing api_key to the client or by setting the DIGITALOCEAN_GRADIENTAI_API_KEY environment variable" + "The api_key client option must be set either by passing api_key to the client or by setting the GRADIENTAI_API_KEY environment variable" ) self.api_key = api_key @@ -301,13 +301,13 @@ def __init__( ) -> None: """Construct a new async AsyncGradientAI client instance. - This automatically infers the `api_key` argument from the `DIGITALOCEAN_GRADIENTAI_API_KEY` environment variable if it is not provided. + This automatically infers the `api_key` argument from the `GRADIENTAI_API_KEY` environment variable if it is not provided. """ if api_key is None: - api_key = os.environ.get("DIGITALOCEAN_GRADIENTAI_API_KEY") + api_key = os.environ.get("GRADIENTAI_API_KEY") if api_key is None: raise GradientAIError( - "The api_key client option must be set either by passing api_key to the client or by setting the DIGITALOCEAN_GRADIENTAI_API_KEY environment variable" + "The api_key client option must be set either by passing api_key to the client or by setting the GRADIENTAI_API_KEY environment variable" ) self.api_key = api_key diff --git a/tests/test_client.py b/tests/test_client.py index f80be1ea..f19a5edb 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -341,7 +341,7 @@ def test_validate_headers(self) -> None: assert request.headers.get("Authorization") == f"Bearer {api_key}" with pytest.raises(GradientAIError): - with update_env(**{"DIGITALOCEAN_GRADIENTAI_API_KEY": Omit()}): + with update_env(**{"GRADIENTAI_API_KEY": Omit()}): client2 = GradientAI(base_url=base_url, api_key=None, _strict_response_validation=True) _ = client2 @@ -1153,7 +1153,7 @@ def test_validate_headers(self) -> None: assert request.headers.get("Authorization") == f"Bearer {api_key}" with pytest.raises(GradientAIError): - with update_env(**{"DIGITALOCEAN_GRADIENTAI_API_KEY": Omit()}): + with update_env(**{"GRADIENTAI_API_KEY": Omit()}): client2 = AsyncGradientAI(base_url=base_url, api_key=None, _strict_response_validation=True) _ = client2 From 41268721eafd33fcca5688ca5dff7401f25bdeb2 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 24 Jun 2025 02:22:04 +0000 Subject: [PATCH 33/41] chore(internal): codegen related update --- .github/workflows/create-releases.yml | 38 --------------------------- .github/workflows/publish-pypi.yml | 8 ++++-- .github/workflows/release-doctor.yml | 1 - bin/check-release-environment | 4 --- 4 files changed, 6 insertions(+), 45 deletions(-) delete mode 100644 .github/workflows/create-releases.yml diff --git a/.github/workflows/create-releases.yml b/.github/workflows/create-releases.yml deleted file mode 100644 index 04dac49f..00000000 --- a/.github/workflows/create-releases.yml +++ /dev/null @@ -1,38 +0,0 @@ -name: Create releases -on: - schedule: - - cron: '0 5 * * *' # every day at 5am UTC - push: - branches: - - main - -jobs: - release: - name: release - if: github.ref == 'refs/heads/main' && github.repository == 'digitalocean/gradientai-python' - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v4 - - - uses: stainless-api/trigger-release-please@v1 - id: release - with: - repo: ${{ github.event.repository.full_name }} - stainless-api-key: ${{ secrets.STAINLESS_API_KEY }} - - - name: Install Rye - if: ${{ steps.release.outputs.releases_created }} - run: | - curl -sSf https://rye.astral.sh/get | bash - echo "$HOME/.rye/shims" >> $GITHUB_PATH - env: - RYE_VERSION: '0.44.0' - RYE_INSTALL_OPTION: '--yes' - - - name: Publish to PyPI - if: ${{ steps.release.outputs.releases_created }} - run: | - bash ./bin/publish-pypi - env: - PYPI_TOKEN: ${{ secrets.GRADIENT_AI_PYPI_TOKEN || secrets.PYPI_TOKEN }} diff --git a/.github/workflows/publish-pypi.yml b/.github/workflows/publish-pypi.yml index bff3a970..3dcd6c42 100644 --- a/.github/workflows/publish-pypi.yml +++ b/.github/workflows/publish-pypi.yml @@ -1,9 +1,13 @@ -# workflow for re-running publishing to PyPI in case it fails for some reason -# you can run this workflow by navigating to https://www.github.com/digitalocean/gradientai-python/actions/workflows/publish-pypi.yml +# This workflow is triggered when a GitHub release is created. +# It can also be run manually to re-publish to PyPI in case it failed for some reason. +# You can run this workflow by navigating to https://www.github.com/digitalocean/gradientai-python/actions/workflows/publish-pypi.yml name: Publish PyPI on: workflow_dispatch: + release: + types: [published] + jobs: publish: name: publish diff --git a/.github/workflows/release-doctor.yml b/.github/workflows/release-doctor.yml index 94e02117..d49e26c2 100644 --- a/.github/workflows/release-doctor.yml +++ b/.github/workflows/release-doctor.yml @@ -18,5 +18,4 @@ jobs: run: | bash ./bin/check-release-environment env: - STAINLESS_API_KEY: ${{ secrets.STAINLESS_API_KEY }} PYPI_TOKEN: ${{ secrets.GRADIENT_AI_PYPI_TOKEN || secrets.PYPI_TOKEN }} diff --git a/bin/check-release-environment b/bin/check-release-environment index 78967e8b..b1bd8969 100644 --- a/bin/check-release-environment +++ b/bin/check-release-environment @@ -2,10 +2,6 @@ errors=() -if [ -z "${STAINLESS_API_KEY}" ]; then - errors+=("The STAINLESS_API_KEY secret has not been set. Please contact Stainless for an API key & set it in your organization secrets on GitHub.") -fi - if [ -z "${PYPI_TOKEN}" ]; then errors+=("The GRADIENT_AI_PYPI_TOKEN secret has not been set. Please set it in either this repository's secrets or your organization secrets.") fi From 75b45398c18e75be3389be20479f54521c2e474a Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 24 Jun 2025 04:40:04 +0000 Subject: [PATCH 34/41] chore(tests): skip some failing tests on the latest python versions --- tests/test_client.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/test_client.py b/tests/test_client.py index f19a5edb..11ebd21b 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -191,6 +191,7 @@ def test_copy_signature(self) -> None: copy_param = copy_signature.parameters.get(name) assert copy_param is not None, f"copy() signature is missing the {name} param" + @pytest.mark.skipif(sys.version_info >= (3, 10), reason="fails because of a memory leak that started from 3.12") def test_copy_build_request(self) -> None: options = FinalRequestOptions(method="get", url="/foo") @@ -1003,6 +1004,7 @@ def test_copy_signature(self) -> None: copy_param = copy_signature.parameters.get(name) assert copy_param is not None, f"copy() signature is missing the {name} param" + @pytest.mark.skipif(sys.version_info >= (3, 10), reason="fails because of a memory leak that started from 3.12") def test_copy_build_request(self) -> None: options = FinalRequestOptions(method="get", url="/foo") From b16cceb63edb4253084036b693834bde5da10943 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 25 Jun 2025 01:13:24 +0000 Subject: [PATCH 35/41] feat(api): update via SDK Studio --- .stats.yml | 2 +- tests/test_client.py | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/.stats.yml b/.stats.yml index 34b3d279..742d7130 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 58 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 -config_hash: a17cf79d9650def96874dbd8e2416faf +config_hash: 6082607b38b030ffbcb6f681788d1a88 diff --git a/tests/test_client.py b/tests/test_client.py index 11ebd21b..f19a5edb 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -191,7 +191,6 @@ def test_copy_signature(self) -> None: copy_param = copy_signature.parameters.get(name) assert copy_param is not None, f"copy() signature is missing the {name} param" - @pytest.mark.skipif(sys.version_info >= (3, 10), reason="fails because of a memory leak that started from 3.12") def test_copy_build_request(self) -> None: options = FinalRequestOptions(method="get", url="/foo") @@ -1004,7 +1003,6 @@ def test_copy_signature(self) -> None: copy_param = copy_signature.parameters.get(name) assert copy_param is not None, f"copy() signature is missing the {name} param" - @pytest.mark.skipif(sys.version_info >= (3, 10), reason="fails because of a memory leak that started from 3.12") def test_copy_build_request(self) -> None: options = FinalRequestOptions(method="get", url="/foo") From 4212f62b19c44bcb12c02fe396e8c51dd89d3868 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 25 Jun 2025 01:15:44 +0000 Subject: [PATCH 36/41] feat(api): update via SDK Studio --- .stats.yml | 2 +- api.md | 33 +++- src/gradientai/_client.py | 2 +- .../resources/knowledge_bases/data_sources.py | 5 +- src/gradientai/resources/regions/__init__.py | 19 +++ .../resources/{ => regions}/regions.py | 16 +- src/gradientai/types/__init__.py | 5 + src/gradientai/types/agent_list_response.py | 139 +---------------- src/gradientai/types/api_agent.py | 144 +----------------- src/gradientai/types/api_agent_model.py | 57 +++++++ src/gradientai/types/api_agreement.py | 17 +++ src/gradientai/types/api_evaluation_metric.py | 24 +++ src/gradientai/types/api_model.py | 26 +--- src/gradientai/types/api_model_version.py | 15 ++ .../types/api_openai_api_key_info.py | 70 +-------- src/gradientai/types/api_workspace.py | 36 +++++ src/gradientai/types/chat/__init__.py | 1 + .../chat/chat_completion_token_logprob.py | 57 +++++++ .../types/chat/completion_create_response.py | 117 +------------- .../types/knowledge_base_create_params.py | 17 +-- .../types/knowledge_bases/__init__.py | 1 + .../knowledge_bases/aws_data_source_param.py | 19 +++ .../data_source_create_params.py | 17 +-- src/gradientai/types/regions/__init__.py | 6 + .../types/regions/api_evaluation_test_case.py | 46 ++++++ .../types/regions/api_star_metric.py | 19 +++ .../types/regions/evaluation_runs/__init__.py | 3 + tests/api_resources/regions/__init__.py | 1 + 28 files changed, 396 insertions(+), 518 deletions(-) create mode 100644 src/gradientai/resources/regions/__init__.py rename src/gradientai/resources/{ => regions}/regions.py (94%) create mode 100644 src/gradientai/types/api_agent_model.py create mode 100644 src/gradientai/types/api_agreement.py create mode 100644 src/gradientai/types/api_evaluation_metric.py create mode 100644 src/gradientai/types/api_model_version.py create mode 100644 src/gradientai/types/api_workspace.py create mode 100644 src/gradientai/types/chat/chat_completion_token_logprob.py create mode 100644 src/gradientai/types/knowledge_bases/aws_data_source_param.py create mode 100644 src/gradientai/types/regions/__init__.py create mode 100644 src/gradientai/types/regions/api_evaluation_test_case.py create mode 100644 src/gradientai/types/regions/api_star_metric.py create mode 100644 src/gradientai/types/regions/evaluation_runs/__init__.py create mode 100644 tests/api_resources/regions/__init__.py diff --git a/.stats.yml b/.stats.yml index 742d7130..611b679c 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 58 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 -config_hash: 6082607b38b030ffbcb6f681788d1a88 +config_hash: ae932f39d93e617d3f513271503efbcf diff --git a/api.md b/api.md index 2376a11f..d644b609 100644 --- a/api.md +++ b/api.md @@ -6,10 +6,12 @@ Types: from gradientai.types import ( APIAgent, APIAgentAPIKeyInfo, + APIAgentModel, APIAnthropicAPIKeyInfo, APIDeploymentVisibility, APIOpenAIAPIKeyInfo, APIRetrievalMethod, + APIWorkspace, AgentCreateResponse, AgentRetrieveResponse, AgentUpdateResponse, @@ -174,12 +176,34 @@ Methods: Types: ```python -from gradientai.types import RegionListResponse +from gradientai.types import APIEvaluationMetric, RegionListResponse ``` Methods: -- client.regions.list(\*\*params) -> RegionListResponse +- client.regions.list(\*\*params) -> RegionListResponse + +## EvaluationRuns + +### Results + +Types: + +```python +from gradientai.types.regions.evaluation_runs import ( + APIEvaluationMetricResult, + APIEvaluationRun, + APIPrompt, +) +``` + +## EvaluationTestCases + +Types: + +```python +from gradientai.types.regions import APIEvaluationTestCase, APIStarMetric +``` # IndexingJobs @@ -237,6 +261,7 @@ from gradientai.types.knowledge_bases import ( APIKnowledgeBaseDataSource, APISpacesDataSource, APIWebCrawlerDataSource, + AwsDataSource, DataSourceCreateResponse, DataSourceListResponse, DataSourceDeleteResponse, @@ -256,7 +281,7 @@ Methods: Types: ```python -from gradientai.types.chat import CompletionCreateResponse +from gradientai.types.chat import ChatCompletionTokenLogprob, CompletionCreateResponse ``` Methods: @@ -306,7 +331,7 @@ Methods: Types: ```python -from gradientai.types import APIModel, ModelListResponse +from gradientai.types import APIAgreement, APIModel, APIModelVersion, ModelListResponse ``` Methods: diff --git a/src/gradientai/_client.py b/src/gradientai/_client.py index 8710fe68..71db35bc 100644 --- a/src/gradientai/_client.py +++ b/src/gradientai/_client.py @@ -33,10 +33,10 @@ if TYPE_CHECKING: from .resources import chat, agents, models, regions, inference, providers, indexing_jobs, knowledge_bases from .resources.models import ModelsResource, AsyncModelsResource - from .resources.regions import RegionsResource, AsyncRegionsResource from .resources.chat.chat import ChatResource, AsyncChatResource from .resources.agents.agents import AgentsResource, AsyncAgentsResource from .resources.indexing_jobs import IndexingJobsResource, AsyncIndexingJobsResource + from .resources.regions.regions import RegionsResource, AsyncRegionsResource from .resources.inference.inference import InferenceResource, AsyncInferenceResource from .resources.providers.providers import ProvidersResource, AsyncProvidersResource from .resources.knowledge_bases.knowledge_bases import KnowledgeBasesResource, AsyncKnowledgeBasesResource diff --git a/src/gradientai/resources/knowledge_bases/data_sources.py b/src/gradientai/resources/knowledge_bases/data_sources.py index bcd48b74..e05696b9 100644 --- a/src/gradientai/resources/knowledge_bases/data_sources.py +++ b/src/gradientai/resources/knowledge_bases/data_sources.py @@ -19,6 +19,7 @@ data_source_list_params, data_source_create_params, ) +from ...types.knowledge_bases.aws_data_source_param import AwsDataSourceParam from ...types.knowledge_bases.data_source_list_response import DataSourceListResponse from ...types.knowledge_bases.data_source_create_response import DataSourceCreateResponse from ...types.knowledge_bases.data_source_delete_response import DataSourceDeleteResponse @@ -52,7 +53,7 @@ def create( self, path_knowledge_base_uuid: str, *, - aws_data_source: data_source_create_params.AwsDataSource | NotGiven = NOT_GIVEN, + aws_data_source: AwsDataSourceParam | NotGiven = NOT_GIVEN, body_knowledge_base_uuid: str | NotGiven = NOT_GIVEN, spaces_data_source: APISpacesDataSourceParam | NotGiven = NOT_GIVEN, web_crawler_data_source: APIWebCrawlerDataSourceParam | NotGiven = NOT_GIVEN, @@ -219,7 +220,7 @@ async def create( self, path_knowledge_base_uuid: str, *, - aws_data_source: data_source_create_params.AwsDataSource | NotGiven = NOT_GIVEN, + aws_data_source: AwsDataSourceParam | NotGiven = NOT_GIVEN, body_knowledge_base_uuid: str | NotGiven = NOT_GIVEN, spaces_data_source: APISpacesDataSourceParam | NotGiven = NOT_GIVEN, web_crawler_data_source: APIWebCrawlerDataSourceParam | NotGiven = NOT_GIVEN, diff --git a/src/gradientai/resources/regions/__init__.py b/src/gradientai/resources/regions/__init__.py new file mode 100644 index 00000000..fb9cf834 --- /dev/null +++ b/src/gradientai/resources/regions/__init__.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .regions import ( + RegionsResource, + AsyncRegionsResource, + RegionsResourceWithRawResponse, + AsyncRegionsResourceWithRawResponse, + RegionsResourceWithStreamingResponse, + AsyncRegionsResourceWithStreamingResponse, +) + +__all__ = [ + "RegionsResource", + "AsyncRegionsResource", + "RegionsResourceWithRawResponse", + "AsyncRegionsResourceWithRawResponse", + "RegionsResourceWithStreamingResponse", + "AsyncRegionsResourceWithStreamingResponse", +] diff --git a/src/gradientai/resources/regions.py b/src/gradientai/resources/regions/regions.py similarity index 94% rename from src/gradientai/resources/regions.py rename to src/gradientai/resources/regions/regions.py index 4c50d9e6..6662e80a 100644 --- a/src/gradientai/resources/regions.py +++ b/src/gradientai/resources/regions/regions.py @@ -4,19 +4,19 @@ import httpx -from ..types import region_list_params -from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from .._utils import maybe_transform, async_maybe_transform -from .._compat import cached_property -from .._resource import SyncAPIResource, AsyncAPIResource -from .._response import ( +from ...types import region_list_params +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( to_raw_response_wrapper, to_streamed_response_wrapper, async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) -from .._base_client import make_request_options -from ..types.region_list_response import RegionListResponse +from ..._base_client import make_request_options +from ...types.region_list_response import RegionListResponse __all__ = ["RegionsResource", "AsyncRegionsResource"] diff --git a/src/gradientai/types/__init__.py b/src/gradientai/types/__init__.py index 5ee961c6..22414733 100644 --- a/src/gradientai/types/__init__.py +++ b/src/gradientai/types/__init__.py @@ -4,8 +4,12 @@ from .api_agent import APIAgent as APIAgent from .api_model import APIModel as APIModel +from .api_agreement import APIAgreement as APIAgreement +from .api_workspace import APIWorkspace as APIWorkspace +from .api_agent_model import APIAgentModel as APIAgentModel from .api_indexing_job import APIIndexingJob as APIIndexingJob from .agent_list_params import AgentListParams as AgentListParams +from .api_model_version import APIModelVersion as APIModelVersion from .model_list_params import ModelListParams as ModelListParams from .api_knowledge_base import APIKnowledgeBase as APIKnowledgeBase from .region_list_params import RegionListParams as RegionListParams @@ -18,6 +22,7 @@ from .agent_create_response import AgentCreateResponse as AgentCreateResponse from .agent_delete_response import AgentDeleteResponse as AgentDeleteResponse from .agent_update_response import AgentUpdateResponse as AgentUpdateResponse +from .api_evaluation_metric import APIEvaluationMetric as APIEvaluationMetric from .api_agent_api_key_info import APIAgentAPIKeyInfo as APIAgentAPIKeyInfo from .agent_retrieve_response import AgentRetrieveResponse as AgentRetrieveResponse from .api_openai_api_key_info import APIOpenAIAPIKeyInfo as APIOpenAIAPIKeyInfo diff --git a/src/gradientai/types/agent_list_response.py b/src/gradientai/types/agent_list_response.py index 6af9cd51..97c0f0d5 100644 --- a/src/gradientai/types/agent_list_response.py +++ b/src/gradientai/types/agent_list_response.py @@ -6,6 +6,7 @@ from .._models import BaseModel from .agents.api_meta import APIMeta +from .api_agent_model import APIAgentModel from .agents.api_links import APILinks from .api_knowledge_base import APIKnowledgeBase from .api_retrieval_method import APIRetrievalMethod @@ -17,14 +18,8 @@ "AgentChatbot", "AgentChatbotIdentifier", "AgentDeployment", - "AgentModel", - "AgentModelAgreement", - "AgentModelVersion", "AgentTemplate", "AgentTemplateGuardrail", - "AgentTemplateModel", - "AgentTemplateModelAgreement", - "AgentTemplateModelVersion", ] @@ -74,140 +69,12 @@ class AgentDeployment(BaseModel): visibility: Optional[APIDeploymentVisibility] = None -class AgentModelAgreement(BaseModel): - description: Optional[str] = None - - name: Optional[str] = None - - url: Optional[str] = None - - uuid: Optional[str] = None - - -class AgentModelVersion(BaseModel): - major: Optional[int] = None - - minor: Optional[int] = None - - patch: Optional[int] = None - - -class AgentModel(BaseModel): - agreement: Optional[AgentModelAgreement] = None - - created_at: Optional[datetime] = None - - inference_name: Optional[str] = None - - inference_version: Optional[str] = None - - is_foundational: Optional[bool] = None - - metadata: Optional[object] = None - - name: Optional[str] = None - - parent_uuid: Optional[str] = None - - provider: Optional[Literal["MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", "MODEL_PROVIDER_OPENAI"]] = ( - None - ) - - updated_at: Optional[datetime] = None - - upload_complete: Optional[bool] = None - - url: Optional[str] = None - - usecases: Optional[ - List[ - Literal[ - "MODEL_USECASE_UNKNOWN", - "MODEL_USECASE_AGENT", - "MODEL_USECASE_FINETUNED", - "MODEL_USECASE_KNOWLEDGEBASE", - "MODEL_USECASE_GUARDRAIL", - "MODEL_USECASE_REASONING", - "MODEL_USECASE_SERVERLESS", - ] - ] - ] = None - - uuid: Optional[str] = None - - version: Optional[AgentModelVersion] = None - - class AgentTemplateGuardrail(BaseModel): priority: Optional[int] = None uuid: Optional[str] = None -class AgentTemplateModelAgreement(BaseModel): - description: Optional[str] = None - - name: Optional[str] = None - - url: Optional[str] = None - - uuid: Optional[str] = None - - -class AgentTemplateModelVersion(BaseModel): - major: Optional[int] = None - - minor: Optional[int] = None - - patch: Optional[int] = None - - -class AgentTemplateModel(BaseModel): - agreement: Optional[AgentTemplateModelAgreement] = None - - created_at: Optional[datetime] = None - - inference_name: Optional[str] = None - - inference_version: Optional[str] = None - - is_foundational: Optional[bool] = None - - metadata: Optional[object] = None - - name: Optional[str] = None - - parent_uuid: Optional[str] = None - - provider: Optional[Literal["MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", "MODEL_PROVIDER_OPENAI"]] = ( - None - ) - - updated_at: Optional[datetime] = None - - upload_complete: Optional[bool] = None - - url: Optional[str] = None - - usecases: Optional[ - List[ - Literal[ - "MODEL_USECASE_UNKNOWN", - "MODEL_USECASE_AGENT", - "MODEL_USECASE_FINETUNED", - "MODEL_USECASE_KNOWLEDGEBASE", - "MODEL_USECASE_GUARDRAIL", - "MODEL_USECASE_REASONING", - "MODEL_USECASE_SERVERLESS", - ] - ] - ] = None - - uuid: Optional[str] = None - - version: Optional[AgentTemplateModelVersion] = None - - class AgentTemplate(BaseModel): created_at: Optional[datetime] = None @@ -225,7 +92,7 @@ class AgentTemplate(BaseModel): max_tokens: Optional[int] = None - model: Optional[AgentTemplateModel] = None + model: Optional[APIAgentModel] = None name: Optional[str] = None @@ -276,7 +143,7 @@ class Agent(BaseModel): response. """ - model: Optional[AgentModel] = None + model: Optional[APIAgentModel] = None name: Optional[str] = None diff --git a/src/gradientai/types/api_agent.py b/src/gradientai/types/api_agent.py index 3eb01fc7..1378950a 100644 --- a/src/gradientai/types/api_agent.py +++ b/src/gradientai/types/api_agent.py @@ -7,6 +7,7 @@ from typing_extensions import Literal from .._models import BaseModel +from .api_agent_model import APIAgentModel from .api_knowledge_base import APIKnowledgeBase from .api_retrieval_method import APIRetrievalMethod from .api_agent_api_key_info import APIAgentAPIKeyInfo @@ -22,14 +23,8 @@ "Deployment", "Function", "Guardrail", - "Model", - "ModelAgreement", - "ModelVersion", "Template", "TemplateGuardrail", - "TemplateModel", - "TemplateModelAgreement", - "TemplateModelVersion", ] @@ -144,140 +139,12 @@ class Guardrail(BaseModel): uuid: Optional[str] = None -class ModelAgreement(BaseModel): - description: Optional[str] = None - - name: Optional[str] = None - - url: Optional[str] = None - - uuid: Optional[str] = None - - -class ModelVersion(BaseModel): - major: Optional[int] = None - - minor: Optional[int] = None - - patch: Optional[int] = None - - -class Model(BaseModel): - agreement: Optional[ModelAgreement] = None - - created_at: Optional[datetime] = None - - inference_name: Optional[str] = None - - inference_version: Optional[str] = None - - is_foundational: Optional[bool] = None - - metadata: Optional[object] = None - - name: Optional[str] = None - - parent_uuid: Optional[str] = None - - provider: Optional[Literal["MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", "MODEL_PROVIDER_OPENAI"]] = ( - None - ) - - updated_at: Optional[datetime] = None - - upload_complete: Optional[bool] = None - - url: Optional[str] = None - - usecases: Optional[ - List[ - Literal[ - "MODEL_USECASE_UNKNOWN", - "MODEL_USECASE_AGENT", - "MODEL_USECASE_FINETUNED", - "MODEL_USECASE_KNOWLEDGEBASE", - "MODEL_USECASE_GUARDRAIL", - "MODEL_USECASE_REASONING", - "MODEL_USECASE_SERVERLESS", - ] - ] - ] = None - - uuid: Optional[str] = None - - version: Optional[ModelVersion] = None - - class TemplateGuardrail(BaseModel): priority: Optional[int] = None uuid: Optional[str] = None -class TemplateModelAgreement(BaseModel): - description: Optional[str] = None - - name: Optional[str] = None - - url: Optional[str] = None - - uuid: Optional[str] = None - - -class TemplateModelVersion(BaseModel): - major: Optional[int] = None - - minor: Optional[int] = None - - patch: Optional[int] = None - - -class TemplateModel(BaseModel): - agreement: Optional[TemplateModelAgreement] = None - - created_at: Optional[datetime] = None - - inference_name: Optional[str] = None - - inference_version: Optional[str] = None - - is_foundational: Optional[bool] = None - - metadata: Optional[object] = None - - name: Optional[str] = None - - parent_uuid: Optional[str] = None - - provider: Optional[Literal["MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", "MODEL_PROVIDER_OPENAI"]] = ( - None - ) - - updated_at: Optional[datetime] = None - - upload_complete: Optional[bool] = None - - url: Optional[str] = None - - usecases: Optional[ - List[ - Literal[ - "MODEL_USECASE_UNKNOWN", - "MODEL_USECASE_AGENT", - "MODEL_USECASE_FINETUNED", - "MODEL_USECASE_KNOWLEDGEBASE", - "MODEL_USECASE_GUARDRAIL", - "MODEL_USECASE_REASONING", - "MODEL_USECASE_SERVERLESS", - ] - ] - ] = None - - uuid: Optional[str] = None - - version: Optional[TemplateModelVersion] = None - - class Template(BaseModel): created_at: Optional[datetime] = None @@ -295,7 +162,7 @@ class Template(BaseModel): max_tokens: Optional[int] = None - model: Optional[TemplateModel] = None + model: Optional[APIAgentModel] = None name: Optional[str] = None @@ -355,7 +222,7 @@ class APIAgent(BaseModel): max_tokens: Optional[int] = None - model: Optional[Model] = None + model: Optional[APIAgentModel] = None name: Optional[str] = None @@ -395,4 +262,7 @@ class APIAgent(BaseModel): uuid: Optional[str] = None - workspace: Optional[object] = None + workspace: Optional["APIWorkspace"] = None + + +from .api_workspace import APIWorkspace diff --git a/src/gradientai/types/api_agent_model.py b/src/gradientai/types/api_agent_model.py new file mode 100644 index 00000000..1025321b --- /dev/null +++ b/src/gradientai/types/api_agent_model.py @@ -0,0 +1,57 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime +from typing_extensions import Literal + +from .._models import BaseModel +from .api_agreement import APIAgreement +from .api_model_version import APIModelVersion + +__all__ = ["APIAgentModel"] + + +class APIAgentModel(BaseModel): + agreement: Optional[APIAgreement] = None + + created_at: Optional[datetime] = None + + inference_name: Optional[str] = None + + inference_version: Optional[str] = None + + is_foundational: Optional[bool] = None + + metadata: Optional[object] = None + + name: Optional[str] = None + + parent_uuid: Optional[str] = None + + provider: Optional[Literal["MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", "MODEL_PROVIDER_OPENAI"]] = ( + None + ) + + updated_at: Optional[datetime] = None + + upload_complete: Optional[bool] = None + + url: Optional[str] = None + + usecases: Optional[ + List[ + Literal[ + "MODEL_USECASE_UNKNOWN", + "MODEL_USECASE_AGENT", + "MODEL_USECASE_FINETUNED", + "MODEL_USECASE_KNOWLEDGEBASE", + "MODEL_USECASE_GUARDRAIL", + "MODEL_USECASE_REASONING", + "MODEL_USECASE_SERVERLESS", + ] + ] + ] = None + + uuid: Optional[str] = None + + version: Optional[APIModelVersion] = None diff --git a/src/gradientai/types/api_agreement.py b/src/gradientai/types/api_agreement.py new file mode 100644 index 00000000..c4359f1f --- /dev/null +++ b/src/gradientai/types/api_agreement.py @@ -0,0 +1,17 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from .._models import BaseModel + +__all__ = ["APIAgreement"] + + +class APIAgreement(BaseModel): + description: Optional[str] = None + + name: Optional[str] = None + + url: Optional[str] = None + + uuid: Optional[str] = None diff --git a/src/gradientai/types/api_evaluation_metric.py b/src/gradientai/types/api_evaluation_metric.py new file mode 100644 index 00000000..05390297 --- /dev/null +++ b/src/gradientai/types/api_evaluation_metric.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from .._models import BaseModel + +__all__ = ["APIEvaluationMetric"] + + +class APIEvaluationMetric(BaseModel): + description: Optional[str] = None + + metric_name: Optional[str] = None + + metric_type: Optional[ + Literal["METRIC_TYPE_UNSPECIFIED", "METRIC_TYPE_GENERAL_QUALITY", "METRIC_TYPE_RAG_AND_TOOL"] + ] = None + + metric_uuid: Optional[str] = None + + metric_value_type: Optional[ + Literal["METRIC_VALUE_TYPE_UNSPECIFIED", "METRIC_VALUE_TYPE_NUMBER", "METRIC_VALUE_TYPE_STRING"] + ] = None diff --git a/src/gradientai/types/api_model.py b/src/gradientai/types/api_model.py index ac6f9c55..c2bc1edd 100644 --- a/src/gradientai/types/api_model.py +++ b/src/gradientai/types/api_model.py @@ -4,30 +4,14 @@ from datetime import datetime from .._models import BaseModel +from .api_agreement import APIAgreement +from .api_model_version import APIModelVersion -__all__ = ["APIModel", "Agreement", "Version"] - - -class Agreement(BaseModel): - description: Optional[str] = None - - name: Optional[str] = None - - url: Optional[str] = None - - uuid: Optional[str] = None - - -class Version(BaseModel): - major: Optional[int] = None - - minor: Optional[int] = None - - patch: Optional[int] = None +__all__ = ["APIModel"] class APIModel(BaseModel): - agreement: Optional[Agreement] = None + agreement: Optional[APIAgreement] = None created_at: Optional[datetime] = None @@ -45,4 +29,4 @@ class APIModel(BaseModel): uuid: Optional[str] = None - version: Optional[Version] = None + version: Optional[APIModelVersion] = None diff --git a/src/gradientai/types/api_model_version.py b/src/gradientai/types/api_model_version.py new file mode 100644 index 00000000..2e118632 --- /dev/null +++ b/src/gradientai/types/api_model_version.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from .._models import BaseModel + +__all__ = ["APIModelVersion"] + + +class APIModelVersion(BaseModel): + major: Optional[int] = None + + minor: Optional[int] = None + + patch: Optional[int] = None diff --git a/src/gradientai/types/api_openai_api_key_info.py b/src/gradientai/types/api_openai_api_key_info.py index 0f57136d..7467cfc2 100644 --- a/src/gradientai/types/api_openai_api_key_info.py +++ b/src/gradientai/types/api_openai_api_key_info.py @@ -2,75 +2,11 @@ from typing import List, Optional from datetime import datetime -from typing_extensions import Literal from .._models import BaseModel +from .api_agent_model import APIAgentModel -__all__ = ["APIOpenAIAPIKeyInfo", "Model", "ModelAgreement", "ModelVersion"] - - -class ModelAgreement(BaseModel): - description: Optional[str] = None - - name: Optional[str] = None - - url: Optional[str] = None - - uuid: Optional[str] = None - - -class ModelVersion(BaseModel): - major: Optional[int] = None - - minor: Optional[int] = None - - patch: Optional[int] = None - - -class Model(BaseModel): - agreement: Optional[ModelAgreement] = None - - created_at: Optional[datetime] = None - - inference_name: Optional[str] = None - - inference_version: Optional[str] = None - - is_foundational: Optional[bool] = None - - metadata: Optional[object] = None - - name: Optional[str] = None - - parent_uuid: Optional[str] = None - - provider: Optional[Literal["MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", "MODEL_PROVIDER_OPENAI"]] = ( - None - ) - - updated_at: Optional[datetime] = None - - upload_complete: Optional[bool] = None - - url: Optional[str] = None - - usecases: Optional[ - List[ - Literal[ - "MODEL_USECASE_UNKNOWN", - "MODEL_USECASE_AGENT", - "MODEL_USECASE_FINETUNED", - "MODEL_USECASE_KNOWLEDGEBASE", - "MODEL_USECASE_GUARDRAIL", - "MODEL_USECASE_REASONING", - "MODEL_USECASE_SERVERLESS", - ] - ] - ] = None - - uuid: Optional[str] = None - - version: Optional[ModelVersion] = None +__all__ = ["APIOpenAIAPIKeyInfo"] class APIOpenAIAPIKeyInfo(BaseModel): @@ -80,7 +16,7 @@ class APIOpenAIAPIKeyInfo(BaseModel): deleted_at: Optional[datetime] = None - models: Optional[List[Model]] = None + models: Optional[List[APIAgentModel]] = None name: Optional[str] = None diff --git a/src/gradientai/types/api_workspace.py b/src/gradientai/types/api_workspace.py new file mode 100644 index 00000000..b170d504 --- /dev/null +++ b/src/gradientai/types/api_workspace.py @@ -0,0 +1,36 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Optional +from datetime import datetime + +from .._models import BaseModel +from .regions.api_evaluation_test_case import APIEvaluationTestCase + +__all__ = ["APIWorkspace"] + + +class APIWorkspace(BaseModel): + agents: Optional[List["APIAgent"]] = None + + created_at: Optional[datetime] = None + + created_by: Optional[str] = None + + created_by_email: Optional[str] = None + + deleted_at: Optional[datetime] = None + + description: Optional[str] = None + + evaluation_test_cases: Optional[List[APIEvaluationTestCase]] = None + + name: Optional[str] = None + + updated_at: Optional[datetime] = None + + uuid: Optional[str] = None + + +from .api_agent import APIAgent diff --git a/src/gradientai/types/chat/__init__.py b/src/gradientai/types/chat/__init__.py index 9384ac14..59553f68 100644 --- a/src/gradientai/types/chat/__init__.py +++ b/src/gradientai/types/chat/__init__.py @@ -4,3 +4,4 @@ from .completion_create_params import CompletionCreateParams as CompletionCreateParams from .completion_create_response import CompletionCreateResponse as CompletionCreateResponse +from .chat_completion_token_logprob import ChatCompletionTokenLogprob as ChatCompletionTokenLogprob diff --git a/src/gradientai/types/chat/chat_completion_token_logprob.py b/src/gradientai/types/chat/chat_completion_token_logprob.py new file mode 100644 index 00000000..c69e2589 --- /dev/null +++ b/src/gradientai/types/chat/chat_completion_token_logprob.py @@ -0,0 +1,57 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel + +__all__ = ["ChatCompletionTokenLogprob", "TopLogprob"] + + +class TopLogprob(BaseModel): + token: str + """The token.""" + + bytes: Optional[List[int]] = None + """A list of integers representing the UTF-8 bytes representation of the token. + + Useful in instances where characters are represented by multiple tokens and + their byte representations must be combined to generate the correct text + representation. Can be `null` if there is no bytes representation for the token. + """ + + logprob: float + """The log probability of this token, if it is within the top 20 most likely + tokens. + + Otherwise, the value `-9999.0` is used to signify that the token is very + unlikely. + """ + + +class ChatCompletionTokenLogprob(BaseModel): + token: str + """The token.""" + + bytes: Optional[List[int]] = None + """A list of integers representing the UTF-8 bytes representation of the token. + + Useful in instances where characters are represented by multiple tokens and + their byte representations must be combined to generate the correct text + representation. Can be `null` if there is no bytes representation for the token. + """ + + logprob: float + """The log probability of this token, if it is within the top 20 most likely + tokens. + + Otherwise, the value `-9999.0` is used to signify that the token is very + unlikely. + """ + + top_logprobs: List[TopLogprob] + """List of the most likely tokens and their log probability, at this token + position. + + In rare cases, there may be fewer than the number of requested `top_logprobs` + returned. + """ diff --git a/src/gradientai/types/chat/completion_create_response.py b/src/gradientai/types/chat/completion_create_response.py index 5a25ac7c..1ac59a28 100644 --- a/src/gradientai/types/chat/completion_create_response.py +++ b/src/gradientai/types/chat/completion_create_response.py @@ -4,125 +4,16 @@ from typing_extensions import Literal from ..._models import BaseModel +from .chat_completion_token_logprob import ChatCompletionTokenLogprob -__all__ = [ - "CompletionCreateResponse", - "Choice", - "ChoiceLogprobs", - "ChoiceLogprobsContent", - "ChoiceLogprobsContentTopLogprob", - "ChoiceLogprobsRefusal", - "ChoiceLogprobsRefusalTopLogprob", - "ChoiceMessage", - "Usage", -] - - -class ChoiceLogprobsContentTopLogprob(BaseModel): - token: str - """The token.""" - - bytes: Optional[List[int]] = None - """A list of integers representing the UTF-8 bytes representation of the token. - - Useful in instances where characters are represented by multiple tokens and - their byte representations must be combined to generate the correct text - representation. Can be `null` if there is no bytes representation for the token. - """ - - logprob: float - """The log probability of this token, if it is within the top 20 most likely - tokens. - - Otherwise, the value `-9999.0` is used to signify that the token is very - unlikely. - """ - - -class ChoiceLogprobsContent(BaseModel): - token: str - """The token.""" - - bytes: Optional[List[int]] = None - """A list of integers representing the UTF-8 bytes representation of the token. - - Useful in instances where characters are represented by multiple tokens and - their byte representations must be combined to generate the correct text - representation. Can be `null` if there is no bytes representation for the token. - """ - - logprob: float - """The log probability of this token, if it is within the top 20 most likely - tokens. - - Otherwise, the value `-9999.0` is used to signify that the token is very - unlikely. - """ - - top_logprobs: List[ChoiceLogprobsContentTopLogprob] - """List of the most likely tokens and their log probability, at this token - position. - - In rare cases, there may be fewer than the number of requested `top_logprobs` - returned. - """ - - -class ChoiceLogprobsRefusalTopLogprob(BaseModel): - token: str - """The token.""" - - bytes: Optional[List[int]] = None - """A list of integers representing the UTF-8 bytes representation of the token. - - Useful in instances where characters are represented by multiple tokens and - their byte representations must be combined to generate the correct text - representation. Can be `null` if there is no bytes representation for the token. - """ - - logprob: float - """The log probability of this token, if it is within the top 20 most likely - tokens. - - Otherwise, the value `-9999.0` is used to signify that the token is very - unlikely. - """ - - -class ChoiceLogprobsRefusal(BaseModel): - token: str - """The token.""" - - bytes: Optional[List[int]] = None - """A list of integers representing the UTF-8 bytes representation of the token. - - Useful in instances where characters are represented by multiple tokens and - their byte representations must be combined to generate the correct text - representation. Can be `null` if there is no bytes representation for the token. - """ - - logprob: float - """The log probability of this token, if it is within the top 20 most likely - tokens. - - Otherwise, the value `-9999.0` is used to signify that the token is very - unlikely. - """ - - top_logprobs: List[ChoiceLogprobsRefusalTopLogprob] - """List of the most likely tokens and their log probability, at this token - position. - - In rare cases, there may be fewer than the number of requested `top_logprobs` - returned. - """ +__all__ = ["CompletionCreateResponse", "Choice", "ChoiceLogprobs", "ChoiceMessage", "Usage"] class ChoiceLogprobs(BaseModel): - content: Optional[List[ChoiceLogprobsContent]] = None + content: Optional[List[ChatCompletionTokenLogprob]] = None """A list of message content tokens with log probability information.""" - refusal: Optional[List[ChoiceLogprobsRefusal]] = None + refusal: Optional[List[ChatCompletionTokenLogprob]] = None """A list of message refusal tokens with log probability information.""" diff --git a/src/gradientai/types/knowledge_base_create_params.py b/src/gradientai/types/knowledge_base_create_params.py index 2552bcf6..acf52e30 100644 --- a/src/gradientai/types/knowledge_base_create_params.py +++ b/src/gradientai/types/knowledge_base_create_params.py @@ -5,11 +5,12 @@ from typing import List, Iterable from typing_extensions import TypedDict +from .knowledge_bases.aws_data_source_param import AwsDataSourceParam from .knowledge_bases.api_spaces_data_source_param import APISpacesDataSourceParam from .knowledge_bases.api_file_upload_data_source_param import APIFileUploadDataSourceParam from .knowledge_bases.api_web_crawler_data_source_param import APIWebCrawlerDataSourceParam -__all__ = ["KnowledgeBaseCreateParams", "Datasource", "DatasourceAwsDataSource"] +__all__ = ["KnowledgeBaseCreateParams", "Datasource"] class KnowledgeBaseCreateParams(TypedDict, total=False): @@ -49,20 +50,8 @@ class KnowledgeBaseCreateParams(TypedDict, total=False): vpc_uuid: str -class DatasourceAwsDataSource(TypedDict, total=False): - bucket_name: str - - item_path: str - - key_id: str - - region: str - - secret_key: str - - class Datasource(TypedDict, total=False): - aws_data_source: DatasourceAwsDataSource + aws_data_source: AwsDataSourceParam bucket_name: str diff --git a/src/gradientai/types/knowledge_bases/__init__.py b/src/gradientai/types/knowledge_bases/__init__.py index f5f31034..859c3618 100644 --- a/src/gradientai/types/knowledge_bases/__init__.py +++ b/src/gradientai/types/knowledge_bases/__init__.py @@ -2,6 +2,7 @@ from __future__ import annotations +from .aws_data_source_param import AwsDataSourceParam as AwsDataSourceParam from .api_spaces_data_source import APISpacesDataSource as APISpacesDataSource from .data_source_list_params import DataSourceListParams as DataSourceListParams from .data_source_create_params import DataSourceCreateParams as DataSourceCreateParams diff --git a/src/gradientai/types/knowledge_bases/aws_data_source_param.py b/src/gradientai/types/knowledge_bases/aws_data_source_param.py new file mode 100644 index 00000000..93d49228 --- /dev/null +++ b/src/gradientai/types/knowledge_bases/aws_data_source_param.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["AwsDataSourceParam"] + + +class AwsDataSourceParam(TypedDict, total=False): + bucket_name: str + + item_path: str + + key_id: str + + region: str + + secret_key: str diff --git a/src/gradientai/types/knowledge_bases/data_source_create_params.py b/src/gradientai/types/knowledge_bases/data_source_create_params.py index b1abafdf..22bd76e7 100644 --- a/src/gradientai/types/knowledge_bases/data_source_create_params.py +++ b/src/gradientai/types/knowledge_bases/data_source_create_params.py @@ -5,29 +5,18 @@ from typing_extensions import Annotated, TypedDict from ..._utils import PropertyInfo +from .aws_data_source_param import AwsDataSourceParam from .api_spaces_data_source_param import APISpacesDataSourceParam from .api_web_crawler_data_source_param import APIWebCrawlerDataSourceParam -__all__ = ["DataSourceCreateParams", "AwsDataSource"] +__all__ = ["DataSourceCreateParams"] class DataSourceCreateParams(TypedDict, total=False): - aws_data_source: AwsDataSource + aws_data_source: AwsDataSourceParam body_knowledge_base_uuid: Annotated[str, PropertyInfo(alias="knowledge_base_uuid")] spaces_data_source: APISpacesDataSourceParam web_crawler_data_source: APIWebCrawlerDataSourceParam - - -class AwsDataSource(TypedDict, total=False): - bucket_name: str - - item_path: str - - key_id: str - - region: str - - secret_key: str diff --git a/src/gradientai/types/regions/__init__.py b/src/gradientai/types/regions/__init__.py new file mode 100644 index 00000000..83b21099 --- /dev/null +++ b/src/gradientai/types/regions/__init__.py @@ -0,0 +1,6 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .api_star_metric import APIStarMetric as APIStarMetric +from .api_evaluation_test_case import APIEvaluationTestCase as APIEvaluationTestCase diff --git a/src/gradientai/types/regions/api_evaluation_test_case.py b/src/gradientai/types/regions/api_evaluation_test_case.py new file mode 100644 index 00000000..d799b0e0 --- /dev/null +++ b/src/gradientai/types/regions/api_evaluation_test_case.py @@ -0,0 +1,46 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime + +from ..._models import BaseModel +from .api_star_metric import APIStarMetric +from ..api_evaluation_metric import APIEvaluationMetric + +__all__ = ["APIEvaluationTestCase"] + + +class APIEvaluationTestCase(BaseModel): + archived_at: Optional[datetime] = None + + created_at: Optional[datetime] = None + + created_by_user_email: Optional[str] = None + + created_by_user_id: Optional[str] = None + + dataset_name: Optional[str] = None + + dataset_uuid: Optional[str] = None + + description: Optional[str] = None + + latest_version_number_of_runs: Optional[int] = None + + metrics: Optional[List[APIEvaluationMetric]] = None + + name: Optional[str] = None + + star_metric: Optional[APIStarMetric] = None + + test_case_uuid: Optional[str] = None + + total_runs: Optional[int] = None + + updated_at: Optional[datetime] = None + + updated_by_user_email: Optional[str] = None + + updated_by_user_id: Optional[str] = None + + version: Optional[int] = None diff --git a/src/gradientai/types/regions/api_star_metric.py b/src/gradientai/types/regions/api_star_metric.py new file mode 100644 index 00000000..c9ecc60a --- /dev/null +++ b/src/gradientai/types/regions/api_star_metric.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["APIStarMetric"] + + +class APIStarMetric(BaseModel): + metric_uuid: Optional[str] = None + + name: Optional[str] = None + + success_threshold_pct: Optional[int] = None + """ + The success threshold for the star metric. This is a percentage value between 0 + and 100. + """ diff --git a/src/gradientai/types/regions/evaluation_runs/__init__.py b/src/gradientai/types/regions/evaluation_runs/__init__.py new file mode 100644 index 00000000..f8ee8b14 --- /dev/null +++ b/src/gradientai/types/regions/evaluation_runs/__init__.py @@ -0,0 +1,3 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations diff --git a/tests/api_resources/regions/__init__.py b/tests/api_resources/regions/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/regions/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. From 7aee6e55a0574fc1b6ab73a1777c92e4f3a940ea Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 25 Jun 2025 01:19:15 +0000 Subject: [PATCH 37/41] feat(api): update via SDK Studio --- .stats.yml | 4 +- README.md | 7 +- api.md | 59 +- src/gradientai/resources/regions/__init__.py | 42 ++ .../resources/regions/evaluation_datasets.py | 292 +++++++++ .../regions/evaluation_runs/__init__.py | 33 + .../evaluation_runs/evaluation_runs.py | 316 +++++++++ .../regions/evaluation_runs/results.py | 264 ++++++++ .../regions/evaluation_test_cases.py | 618 ++++++++++++++++++ src/gradientai/resources/regions/regions.py | 157 +++++ src/gradientai/types/__init__.py | 3 + ...region_list_evaluation_metrics_response.py | 12 + src/gradientai/types/regions/__init__.py | 26 + .../types/regions/api_star_metric_param.py | 19 + ...reate_file_upload_presigned_urls_params.py | 20 + ...ate_file_upload_presigned_urls_response.py | 30 + .../evaluation_dataset_create_params.py | 17 + .../evaluation_dataset_create_response.py | 12 + .../regions/evaluation_run_create_params.py | 17 + .../regions/evaluation_run_create_response.py | 11 + .../evaluation_run_retrieve_response.py | 12 + .../types/regions/evaluation_runs/__init__.py | 6 + .../api_evaluation_metric_result.py | 17 + .../evaluation_runs/api_evaluation_run.py | 56 ++ .../regions/evaluation_runs/api_prompt.py | 42 ++ .../result_retrieve_prompt_response.py | 12 + .../result_retrieve_response.py | 16 + .../evaluation_test_case_create_params.py | 29 + .../evaluation_test_case_create_response.py | 12 + ...n_test_case_list_evaluation_runs_params.py | 12 + ...test_case_list_evaluation_runs_response.py | 13 + .../evaluation_test_case_list_response.py | 12 + .../evaluation_test_case_retrieve_response.py | 12 + .../evaluation_test_case_update_params.py | 32 + .../evaluation_test_case_update_response.py | 14 + .../regions/evaluation_runs/__init__.py | 1 + .../regions/evaluation_runs/test_results.py | 200 ++++++ .../regions/test_evaluation_datasets.py | 211 ++++++ .../regions/test_evaluation_runs.py | 187 ++++++ .../regions/test_evaluation_test_cases.py | 486 ++++++++++++++ tests/api_resources/test_regions.py | 58 +- 41 files changed, 3390 insertions(+), 9 deletions(-) create mode 100644 src/gradientai/resources/regions/evaluation_datasets.py create mode 100644 src/gradientai/resources/regions/evaluation_runs/__init__.py create mode 100644 src/gradientai/resources/regions/evaluation_runs/evaluation_runs.py create mode 100644 src/gradientai/resources/regions/evaluation_runs/results.py create mode 100644 src/gradientai/resources/regions/evaluation_test_cases.py create mode 100644 src/gradientai/types/region_list_evaluation_metrics_response.py create mode 100644 src/gradientai/types/regions/api_star_metric_param.py create mode 100644 src/gradientai/types/regions/evaluation_dataset_create_file_upload_presigned_urls_params.py create mode 100644 src/gradientai/types/regions/evaluation_dataset_create_file_upload_presigned_urls_response.py create mode 100644 src/gradientai/types/regions/evaluation_dataset_create_params.py create mode 100644 src/gradientai/types/regions/evaluation_dataset_create_response.py create mode 100644 src/gradientai/types/regions/evaluation_run_create_params.py create mode 100644 src/gradientai/types/regions/evaluation_run_create_response.py create mode 100644 src/gradientai/types/regions/evaluation_run_retrieve_response.py create mode 100644 src/gradientai/types/regions/evaluation_runs/api_evaluation_metric_result.py create mode 100644 src/gradientai/types/regions/evaluation_runs/api_evaluation_run.py create mode 100644 src/gradientai/types/regions/evaluation_runs/api_prompt.py create mode 100644 src/gradientai/types/regions/evaluation_runs/result_retrieve_prompt_response.py create mode 100644 src/gradientai/types/regions/evaluation_runs/result_retrieve_response.py create mode 100644 src/gradientai/types/regions/evaluation_test_case_create_params.py create mode 100644 src/gradientai/types/regions/evaluation_test_case_create_response.py create mode 100644 src/gradientai/types/regions/evaluation_test_case_list_evaluation_runs_params.py create mode 100644 src/gradientai/types/regions/evaluation_test_case_list_evaluation_runs_response.py create mode 100644 src/gradientai/types/regions/evaluation_test_case_list_response.py create mode 100644 src/gradientai/types/regions/evaluation_test_case_retrieve_response.py create mode 100644 src/gradientai/types/regions/evaluation_test_case_update_params.py create mode 100644 src/gradientai/types/regions/evaluation_test_case_update_response.py create mode 100644 tests/api_resources/regions/evaluation_runs/__init__.py create mode 100644 tests/api_resources/regions/evaluation_runs/test_results.py create mode 100644 tests/api_resources/regions/test_evaluation_datasets.py create mode 100644 tests/api_resources/regions/test_evaluation_runs.py create mode 100644 tests/api_resources/regions/test_evaluation_test_cases.py diff --git a/.stats.yml b/.stats.yml index 611b679c..f0863f5f 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 58 +configured_endpoints: 70 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 -config_hash: ae932f39d93e617d3f513271503efbcf +config_hash: 211ece2994c6ac52f84f78ee56c1097a diff --git a/README.md b/README.md index bd72811f..09b1e15d 100644 --- a/README.md +++ b/README.md @@ -120,11 +120,10 @@ from gradientai import GradientAI client = GradientAI() -data_source = client.knowledge_bases.data_sources.create( - path_knowledge_base_uuid="knowledge_base_uuid", - aws_data_source={}, +evaluation_test_case = client.regions.evaluation_test_cases.create( + star_metric={}, ) -print(data_source.aws_data_source) +print(evaluation_test_case.star_metric) ``` ## Handling errors diff --git a/api.md b/api.md index d644b609..970f6951 100644 --- a/api.md +++ b/api.md @@ -176,15 +176,31 @@ Methods: Types: ```python -from gradientai.types import APIEvaluationMetric, RegionListResponse +from gradientai.types import ( + APIEvaluationMetric, + RegionListResponse, + RegionListEvaluationMetricsResponse, +) ``` Methods: - client.regions.list(\*\*params) -> RegionListResponse +- client.regions.list_evaluation_metrics() -> RegionListEvaluationMetricsResponse ## EvaluationRuns +Types: + +```python +from gradientai.types.regions import EvaluationRunCreateResponse, EvaluationRunRetrieveResponse +``` + +Methods: + +- client.regions.evaluation_runs.create(\*\*params) -> EvaluationRunCreateResponse +- client.regions.evaluation_runs.retrieve(evaluation_run_uuid) -> EvaluationRunRetrieveResponse + ### Results Types: @@ -194,17 +210,56 @@ from gradientai.types.regions.evaluation_runs import ( APIEvaluationMetricResult, APIEvaluationRun, APIPrompt, + ResultRetrieveResponse, + ResultRetrievePromptResponse, ) ``` +Methods: + +- client.regions.evaluation_runs.results.retrieve(evaluation_run_uuid) -> ResultRetrieveResponse +- client.regions.evaluation_runs.results.retrieve_prompt(prompt_id, \*, evaluation_run_uuid) -> ResultRetrievePromptResponse + ## EvaluationTestCases Types: ```python -from gradientai.types.regions import APIEvaluationTestCase, APIStarMetric +from gradientai.types.regions import ( + APIEvaluationTestCase, + APIStarMetric, + EvaluationTestCaseCreateResponse, + EvaluationTestCaseRetrieveResponse, + EvaluationTestCaseUpdateResponse, + EvaluationTestCaseListResponse, + EvaluationTestCaseListEvaluationRunsResponse, +) ``` +Methods: + +- client.regions.evaluation_test_cases.create(\*\*params) -> EvaluationTestCaseCreateResponse +- client.regions.evaluation_test_cases.retrieve(test_case_uuid) -> EvaluationTestCaseRetrieveResponse +- client.regions.evaluation_test_cases.update(path_test_case_uuid, \*\*params) -> EvaluationTestCaseUpdateResponse +- client.regions.evaluation_test_cases.list() -> EvaluationTestCaseListResponse +- client.regions.evaluation_test_cases.list_evaluation_runs(evaluation_test_case_uuid, \*\*params) -> EvaluationTestCaseListEvaluationRunsResponse + +## EvaluationDatasets + +Types: + +```python +from gradientai.types.regions import ( + EvaluationDatasetCreateResponse, + EvaluationDatasetCreateFileUploadPresignedURLsResponse, +) +``` + +Methods: + +- client.regions.evaluation_datasets.create(\*\*params) -> EvaluationDatasetCreateResponse +- client.regions.evaluation_datasets.create_file_upload_presigned_urls(\*\*params) -> EvaluationDatasetCreateFileUploadPresignedURLsResponse + # IndexingJobs Types: diff --git a/src/gradientai/resources/regions/__init__.py b/src/gradientai/resources/regions/__init__.py index fb9cf834..51a96d61 100644 --- a/src/gradientai/resources/regions/__init__.py +++ b/src/gradientai/resources/regions/__init__.py @@ -8,8 +8,50 @@ RegionsResourceWithStreamingResponse, AsyncRegionsResourceWithStreamingResponse, ) +from .evaluation_runs import ( + EvaluationRunsResource, + AsyncEvaluationRunsResource, + EvaluationRunsResourceWithRawResponse, + AsyncEvaluationRunsResourceWithRawResponse, + EvaluationRunsResourceWithStreamingResponse, + AsyncEvaluationRunsResourceWithStreamingResponse, +) +from .evaluation_datasets import ( + EvaluationDatasetsResource, + AsyncEvaluationDatasetsResource, + EvaluationDatasetsResourceWithRawResponse, + AsyncEvaluationDatasetsResourceWithRawResponse, + EvaluationDatasetsResourceWithStreamingResponse, + AsyncEvaluationDatasetsResourceWithStreamingResponse, +) +from .evaluation_test_cases import ( + EvaluationTestCasesResource, + AsyncEvaluationTestCasesResource, + EvaluationTestCasesResourceWithRawResponse, + AsyncEvaluationTestCasesResourceWithRawResponse, + EvaluationTestCasesResourceWithStreamingResponse, + AsyncEvaluationTestCasesResourceWithStreamingResponse, +) __all__ = [ + "EvaluationRunsResource", + "AsyncEvaluationRunsResource", + "EvaluationRunsResourceWithRawResponse", + "AsyncEvaluationRunsResourceWithRawResponse", + "EvaluationRunsResourceWithStreamingResponse", + "AsyncEvaluationRunsResourceWithStreamingResponse", + "EvaluationTestCasesResource", + "AsyncEvaluationTestCasesResource", + "EvaluationTestCasesResourceWithRawResponse", + "AsyncEvaluationTestCasesResourceWithRawResponse", + "EvaluationTestCasesResourceWithStreamingResponse", + "AsyncEvaluationTestCasesResourceWithStreamingResponse", + "EvaluationDatasetsResource", + "AsyncEvaluationDatasetsResource", + "EvaluationDatasetsResourceWithRawResponse", + "AsyncEvaluationDatasetsResourceWithRawResponse", + "EvaluationDatasetsResourceWithStreamingResponse", + "AsyncEvaluationDatasetsResourceWithStreamingResponse", "RegionsResource", "AsyncRegionsResource", "RegionsResourceWithRawResponse", diff --git a/src/gradientai/resources/regions/evaluation_datasets.py b/src/gradientai/resources/regions/evaluation_datasets.py new file mode 100644 index 00000000..f82e9701 --- /dev/null +++ b/src/gradientai/resources/regions/evaluation_datasets.py @@ -0,0 +1,292 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable + +import httpx + +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.regions import ( + evaluation_dataset_create_params, + evaluation_dataset_create_file_upload_presigned_urls_params, +) +from ...types.regions.evaluation_dataset_create_response import EvaluationDatasetCreateResponse +from ...types.knowledge_bases.api_file_upload_data_source_param import APIFileUploadDataSourceParam +from ...types.regions.evaluation_dataset_create_file_upload_presigned_urls_response import ( + EvaluationDatasetCreateFileUploadPresignedURLsResponse, +) + +__all__ = ["EvaluationDatasetsResource", "AsyncEvaluationDatasetsResource"] + + +class EvaluationDatasetsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> EvaluationDatasetsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return EvaluationDatasetsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> EvaluationDatasetsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return EvaluationDatasetsResourceWithStreamingResponse(self) + + def create( + self, + *, + file_upload_dataset: APIFileUploadDataSourceParam | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EvaluationDatasetCreateResponse: + """ + To create an evaluation dataset, send a POST request to + `/v2/gen-ai/evaluation_datasets`. + + Args: + file_upload_dataset: File to upload as data source for knowledge base. + + name: The name of the agent evaluation dataset. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v2/gen-ai/evaluation_datasets" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/evaluation_datasets", + body=maybe_transform( + { + "file_upload_dataset": file_upload_dataset, + "name": name, + }, + evaluation_dataset_create_params.EvaluationDatasetCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=EvaluationDatasetCreateResponse, + ) + + def create_file_upload_presigned_urls( + self, + *, + files: Iterable[evaluation_dataset_create_file_upload_presigned_urls_params.File] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EvaluationDatasetCreateFileUploadPresignedURLsResponse: + """ + To create presigned URLs for evaluation dataset file upload, send a POST request + to `/v2/gen-ai/evaluation_datasets/file_upload_presigned_urls`. + + Args: + files: A list of files to generate presigned URLs for. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v2/gen-ai/evaluation_datasets/file_upload_presigned_urls" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/evaluation_datasets/file_upload_presigned_urls", + body=maybe_transform( + {"files": files}, + evaluation_dataset_create_file_upload_presigned_urls_params.EvaluationDatasetCreateFileUploadPresignedURLsParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=EvaluationDatasetCreateFileUploadPresignedURLsResponse, + ) + + +class AsyncEvaluationDatasetsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncEvaluationDatasetsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncEvaluationDatasetsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncEvaluationDatasetsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncEvaluationDatasetsResourceWithStreamingResponse(self) + + async def create( + self, + *, + file_upload_dataset: APIFileUploadDataSourceParam | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EvaluationDatasetCreateResponse: + """ + To create an evaluation dataset, send a POST request to + `/v2/gen-ai/evaluation_datasets`. + + Args: + file_upload_dataset: File to upload as data source for knowledge base. + + name: The name of the agent evaluation dataset. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v2/gen-ai/evaluation_datasets" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/evaluation_datasets", + body=await async_maybe_transform( + { + "file_upload_dataset": file_upload_dataset, + "name": name, + }, + evaluation_dataset_create_params.EvaluationDatasetCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=EvaluationDatasetCreateResponse, + ) + + async def create_file_upload_presigned_urls( + self, + *, + files: Iterable[evaluation_dataset_create_file_upload_presigned_urls_params.File] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EvaluationDatasetCreateFileUploadPresignedURLsResponse: + """ + To create presigned URLs for evaluation dataset file upload, send a POST request + to `/v2/gen-ai/evaluation_datasets/file_upload_presigned_urls`. + + Args: + files: A list of files to generate presigned URLs for. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v2/gen-ai/evaluation_datasets/file_upload_presigned_urls" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/evaluation_datasets/file_upload_presigned_urls", + body=await async_maybe_transform( + {"files": files}, + evaluation_dataset_create_file_upload_presigned_urls_params.EvaluationDatasetCreateFileUploadPresignedURLsParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=EvaluationDatasetCreateFileUploadPresignedURLsResponse, + ) + + +class EvaluationDatasetsResourceWithRawResponse: + def __init__(self, evaluation_datasets: EvaluationDatasetsResource) -> None: + self._evaluation_datasets = evaluation_datasets + + self.create = to_raw_response_wrapper( + evaluation_datasets.create, + ) + self.create_file_upload_presigned_urls = to_raw_response_wrapper( + evaluation_datasets.create_file_upload_presigned_urls, + ) + + +class AsyncEvaluationDatasetsResourceWithRawResponse: + def __init__(self, evaluation_datasets: AsyncEvaluationDatasetsResource) -> None: + self._evaluation_datasets = evaluation_datasets + + self.create = async_to_raw_response_wrapper( + evaluation_datasets.create, + ) + self.create_file_upload_presigned_urls = async_to_raw_response_wrapper( + evaluation_datasets.create_file_upload_presigned_urls, + ) + + +class EvaluationDatasetsResourceWithStreamingResponse: + def __init__(self, evaluation_datasets: EvaluationDatasetsResource) -> None: + self._evaluation_datasets = evaluation_datasets + + self.create = to_streamed_response_wrapper( + evaluation_datasets.create, + ) + self.create_file_upload_presigned_urls = to_streamed_response_wrapper( + evaluation_datasets.create_file_upload_presigned_urls, + ) + + +class AsyncEvaluationDatasetsResourceWithStreamingResponse: + def __init__(self, evaluation_datasets: AsyncEvaluationDatasetsResource) -> None: + self._evaluation_datasets = evaluation_datasets + + self.create = async_to_streamed_response_wrapper( + evaluation_datasets.create, + ) + self.create_file_upload_presigned_urls = async_to_streamed_response_wrapper( + evaluation_datasets.create_file_upload_presigned_urls, + ) diff --git a/src/gradientai/resources/regions/evaluation_runs/__init__.py b/src/gradientai/resources/regions/evaluation_runs/__init__.py new file mode 100644 index 00000000..e5580dd0 --- /dev/null +++ b/src/gradientai/resources/regions/evaluation_runs/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .results import ( + ResultsResource, + AsyncResultsResource, + ResultsResourceWithRawResponse, + AsyncResultsResourceWithRawResponse, + ResultsResourceWithStreamingResponse, + AsyncResultsResourceWithStreamingResponse, +) +from .evaluation_runs import ( + EvaluationRunsResource, + AsyncEvaluationRunsResource, + EvaluationRunsResourceWithRawResponse, + AsyncEvaluationRunsResourceWithRawResponse, + EvaluationRunsResourceWithStreamingResponse, + AsyncEvaluationRunsResourceWithStreamingResponse, +) + +__all__ = [ + "ResultsResource", + "AsyncResultsResource", + "ResultsResourceWithRawResponse", + "AsyncResultsResourceWithRawResponse", + "ResultsResourceWithStreamingResponse", + "AsyncResultsResourceWithStreamingResponse", + "EvaluationRunsResource", + "AsyncEvaluationRunsResource", + "EvaluationRunsResourceWithRawResponse", + "AsyncEvaluationRunsResourceWithRawResponse", + "EvaluationRunsResourceWithStreamingResponse", + "AsyncEvaluationRunsResourceWithStreamingResponse", +] diff --git a/src/gradientai/resources/regions/evaluation_runs/evaluation_runs.py b/src/gradientai/resources/regions/evaluation_runs/evaluation_runs.py new file mode 100644 index 00000000..9221c45c --- /dev/null +++ b/src/gradientai/resources/regions/evaluation_runs/evaluation_runs.py @@ -0,0 +1,316 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from .results import ( + ResultsResource, + AsyncResultsResource, + ResultsResourceWithRawResponse, + AsyncResultsResourceWithRawResponse, + ResultsResourceWithStreamingResponse, + AsyncResultsResourceWithStreamingResponse, +) +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.regions import evaluation_run_create_params +from ....types.regions.evaluation_run_create_response import EvaluationRunCreateResponse +from ....types.regions.evaluation_run_retrieve_response import EvaluationRunRetrieveResponse + +__all__ = ["EvaluationRunsResource", "AsyncEvaluationRunsResource"] + + +class EvaluationRunsResource(SyncAPIResource): + @cached_property + def results(self) -> ResultsResource: + return ResultsResource(self._client) + + @cached_property + def with_raw_response(self) -> EvaluationRunsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return EvaluationRunsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> EvaluationRunsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return EvaluationRunsResourceWithStreamingResponse(self) + + def create( + self, + *, + agent_uuid: str | NotGiven = NOT_GIVEN, + run_name: str | NotGiven = NOT_GIVEN, + test_case_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EvaluationRunCreateResponse: + """ + To run an evaluation test case, send a POST request to + `/v2/gen-ai/evaluation_runs`. + + Args: + agent_uuid: Agent UUID to run the test case against. + + run_name: The name of the run. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v2/gen-ai/evaluation_runs" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/evaluation_runs", + body=maybe_transform( + { + "agent_uuid": agent_uuid, + "run_name": run_name, + "test_case_uuid": test_case_uuid, + }, + evaluation_run_create_params.EvaluationRunCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=EvaluationRunCreateResponse, + ) + + def retrieve( + self, + evaluation_run_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EvaluationRunRetrieveResponse: + """ + To retrive information about an existing evaluation run, send a GET request to + `/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not evaluation_run_uuid: + raise ValueError( + f"Expected a non-empty value for `evaluation_run_uuid` but received {evaluation_run_uuid!r}" + ) + return self._get( + f"/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=EvaluationRunRetrieveResponse, + ) + + +class AsyncEvaluationRunsResource(AsyncAPIResource): + @cached_property + def results(self) -> AsyncResultsResource: + return AsyncResultsResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncEvaluationRunsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncEvaluationRunsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncEvaluationRunsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncEvaluationRunsResourceWithStreamingResponse(self) + + async def create( + self, + *, + agent_uuid: str | NotGiven = NOT_GIVEN, + run_name: str | NotGiven = NOT_GIVEN, + test_case_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EvaluationRunCreateResponse: + """ + To run an evaluation test case, send a POST request to + `/v2/gen-ai/evaluation_runs`. + + Args: + agent_uuid: Agent UUID to run the test case against. + + run_name: The name of the run. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v2/gen-ai/evaluation_runs" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/evaluation_runs", + body=await async_maybe_transform( + { + "agent_uuid": agent_uuid, + "run_name": run_name, + "test_case_uuid": test_case_uuid, + }, + evaluation_run_create_params.EvaluationRunCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=EvaluationRunCreateResponse, + ) + + async def retrieve( + self, + evaluation_run_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EvaluationRunRetrieveResponse: + """ + To retrive information about an existing evaluation run, send a GET request to + `/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not evaluation_run_uuid: + raise ValueError( + f"Expected a non-empty value for `evaluation_run_uuid` but received {evaluation_run_uuid!r}" + ) + return await self._get( + f"/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=EvaluationRunRetrieveResponse, + ) + + +class EvaluationRunsResourceWithRawResponse: + def __init__(self, evaluation_runs: EvaluationRunsResource) -> None: + self._evaluation_runs = evaluation_runs + + self.create = to_raw_response_wrapper( + evaluation_runs.create, + ) + self.retrieve = to_raw_response_wrapper( + evaluation_runs.retrieve, + ) + + @cached_property + def results(self) -> ResultsResourceWithRawResponse: + return ResultsResourceWithRawResponse(self._evaluation_runs.results) + + +class AsyncEvaluationRunsResourceWithRawResponse: + def __init__(self, evaluation_runs: AsyncEvaluationRunsResource) -> None: + self._evaluation_runs = evaluation_runs + + self.create = async_to_raw_response_wrapper( + evaluation_runs.create, + ) + self.retrieve = async_to_raw_response_wrapper( + evaluation_runs.retrieve, + ) + + @cached_property + def results(self) -> AsyncResultsResourceWithRawResponse: + return AsyncResultsResourceWithRawResponse(self._evaluation_runs.results) + + +class EvaluationRunsResourceWithStreamingResponse: + def __init__(self, evaluation_runs: EvaluationRunsResource) -> None: + self._evaluation_runs = evaluation_runs + + self.create = to_streamed_response_wrapper( + evaluation_runs.create, + ) + self.retrieve = to_streamed_response_wrapper( + evaluation_runs.retrieve, + ) + + @cached_property + def results(self) -> ResultsResourceWithStreamingResponse: + return ResultsResourceWithStreamingResponse(self._evaluation_runs.results) + + +class AsyncEvaluationRunsResourceWithStreamingResponse: + def __init__(self, evaluation_runs: AsyncEvaluationRunsResource) -> None: + self._evaluation_runs = evaluation_runs + + self.create = async_to_streamed_response_wrapper( + evaluation_runs.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + evaluation_runs.retrieve, + ) + + @cached_property + def results(self) -> AsyncResultsResourceWithStreamingResponse: + return AsyncResultsResourceWithStreamingResponse(self._evaluation_runs.results) diff --git a/src/gradientai/resources/regions/evaluation_runs/results.py b/src/gradientai/resources/regions/evaluation_runs/results.py new file mode 100644 index 00000000..ad74a778 --- /dev/null +++ b/src/gradientai/resources/regions/evaluation_runs/results.py @@ -0,0 +1,264 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.regions.evaluation_runs.result_retrieve_response import ResultRetrieveResponse +from ....types.regions.evaluation_runs.result_retrieve_prompt_response import ResultRetrievePromptResponse + +__all__ = ["ResultsResource", "AsyncResultsResource"] + + +class ResultsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> ResultsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return ResultsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ResultsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return ResultsResourceWithStreamingResponse(self) + + def retrieve( + self, + evaluation_run_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ResultRetrieveResponse: + """ + To retrieve results of an evaluation run, send a GET request to + `/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not evaluation_run_uuid: + raise ValueError( + f"Expected a non-empty value for `evaluation_run_uuid` but received {evaluation_run_uuid!r}" + ) + return self._get( + f"/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ResultRetrieveResponse, + ) + + def retrieve_prompt( + self, + prompt_id: int, + *, + evaluation_run_uuid: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ResultRetrievePromptResponse: + """ + To retrieve results of an evaluation run, send a GET request to + `/v2/genai/evaluation_runs/{evaluation_run_uuid}/results/{prompt_id}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not evaluation_run_uuid: + raise ValueError( + f"Expected a non-empty value for `evaluation_run_uuid` but received {evaluation_run_uuid!r}" + ) + return self._get( + f"/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results/{prompt_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results/{prompt_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ResultRetrievePromptResponse, + ) + + +class AsyncResultsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncResultsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncResultsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncResultsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncResultsResourceWithStreamingResponse(self) + + async def retrieve( + self, + evaluation_run_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ResultRetrieveResponse: + """ + To retrieve results of an evaluation run, send a GET request to + `/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not evaluation_run_uuid: + raise ValueError( + f"Expected a non-empty value for `evaluation_run_uuid` but received {evaluation_run_uuid!r}" + ) + return await self._get( + f"/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ResultRetrieveResponse, + ) + + async def retrieve_prompt( + self, + prompt_id: int, + *, + evaluation_run_uuid: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ResultRetrievePromptResponse: + """ + To retrieve results of an evaluation run, send a GET request to + `/v2/genai/evaluation_runs/{evaluation_run_uuid}/results/{prompt_id}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not evaluation_run_uuid: + raise ValueError( + f"Expected a non-empty value for `evaluation_run_uuid` but received {evaluation_run_uuid!r}" + ) + return await self._get( + f"/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results/{prompt_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results/{prompt_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ResultRetrievePromptResponse, + ) + + +class ResultsResourceWithRawResponse: + def __init__(self, results: ResultsResource) -> None: + self._results = results + + self.retrieve = to_raw_response_wrapper( + results.retrieve, + ) + self.retrieve_prompt = to_raw_response_wrapper( + results.retrieve_prompt, + ) + + +class AsyncResultsResourceWithRawResponse: + def __init__(self, results: AsyncResultsResource) -> None: + self._results = results + + self.retrieve = async_to_raw_response_wrapper( + results.retrieve, + ) + self.retrieve_prompt = async_to_raw_response_wrapper( + results.retrieve_prompt, + ) + + +class ResultsResourceWithStreamingResponse: + def __init__(self, results: ResultsResource) -> None: + self._results = results + + self.retrieve = to_streamed_response_wrapper( + results.retrieve, + ) + self.retrieve_prompt = to_streamed_response_wrapper( + results.retrieve_prompt, + ) + + +class AsyncResultsResourceWithStreamingResponse: + def __init__(self, results: AsyncResultsResource) -> None: + self._results = results + + self.retrieve = async_to_streamed_response_wrapper( + results.retrieve, + ) + self.retrieve_prompt = async_to_streamed_response_wrapper( + results.retrieve_prompt, + ) diff --git a/src/gradientai/resources/regions/evaluation_test_cases.py b/src/gradientai/resources/regions/evaluation_test_cases.py new file mode 100644 index 00000000..eed4d8b4 --- /dev/null +++ b/src/gradientai/resources/regions/evaluation_test_cases.py @@ -0,0 +1,618 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List + +import httpx + +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.regions import ( + evaluation_test_case_create_params, + evaluation_test_case_update_params, + evaluation_test_case_list_evaluation_runs_params, +) +from ...types.regions.api_star_metric_param import APIStarMetricParam +from ...types.regions.evaluation_test_case_list_response import EvaluationTestCaseListResponse +from ...types.regions.evaluation_test_case_create_response import EvaluationTestCaseCreateResponse +from ...types.regions.evaluation_test_case_update_response import EvaluationTestCaseUpdateResponse +from ...types.regions.evaluation_test_case_retrieve_response import EvaluationTestCaseRetrieveResponse +from ...types.regions.evaluation_test_case_list_evaluation_runs_response import ( + EvaluationTestCaseListEvaluationRunsResponse, +) + +__all__ = ["EvaluationTestCasesResource", "AsyncEvaluationTestCasesResource"] + + +class EvaluationTestCasesResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> EvaluationTestCasesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return EvaluationTestCasesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> EvaluationTestCasesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return EvaluationTestCasesResourceWithStreamingResponse(self) + + def create( + self, + *, + dataset_uuid: str | NotGiven = NOT_GIVEN, + description: str | NotGiven = NOT_GIVEN, + metrics: List[str] | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + star_metric: APIStarMetricParam | NotGiven = NOT_GIVEN, + workspace_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EvaluationTestCaseCreateResponse: + """ + To create an evaluation test-case send a POST request to + `/v2/gen-ai/evaluation_test_cases`. + + Args: + dataset_uuid: Dataset against which the test‑case is executed. + + description: Description of the test case. + + metrics: Full metric list to use for evaluation test case. + + name: Name of the test case. + + workspace_uuid: The workspace uuid. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v2/gen-ai/evaluation_test_cases" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/evaluation_test_cases", + body=maybe_transform( + { + "dataset_uuid": dataset_uuid, + "description": description, + "metrics": metrics, + "name": name, + "star_metric": star_metric, + "workspace_uuid": workspace_uuid, + }, + evaluation_test_case_create_params.EvaluationTestCaseCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=EvaluationTestCaseCreateResponse, + ) + + def retrieve( + self, + test_case_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EvaluationTestCaseRetrieveResponse: + """ + To retrive information about an existing evaluation test case, send a GET + request to `/v2/gen-ai/evaluation_test_case/{test_case_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not test_case_uuid: + raise ValueError(f"Expected a non-empty value for `test_case_uuid` but received {test_case_uuid!r}") + return self._get( + f"/v2/gen-ai/evaluation_test_cases/{test_case_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/evaluation_test_cases/{test_case_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=EvaluationTestCaseRetrieveResponse, + ) + + def update( + self, + path_test_case_uuid: str, + *, + dataset_uuid: str | NotGiven = NOT_GIVEN, + description: str | NotGiven = NOT_GIVEN, + metrics: evaluation_test_case_update_params.Metrics | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + star_metric: APIStarMetricParam | NotGiven = NOT_GIVEN, + body_test_case_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EvaluationTestCaseUpdateResponse: + """ + To update an evaluation test-case send a POST request to + `/v2/gen-ai/evaluation_test_cases/{test_case_uuid}`. + + Args: + dataset_uuid: Dataset against which the test‑case is executed. + + description: Description of the test case. + + name: Name of the test case. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_test_case_uuid: + raise ValueError( + f"Expected a non-empty value for `path_test_case_uuid` but received {path_test_case_uuid!r}" + ) + return self._post( + f"/v2/gen-ai/evaluation_test_cases/{path_test_case_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/evaluation_test_cases/{path_test_case_uuid}", + body=maybe_transform( + { + "dataset_uuid": dataset_uuid, + "description": description, + "metrics": metrics, + "name": name, + "star_metric": star_metric, + "body_test_case_uuid": body_test_case_uuid, + }, + evaluation_test_case_update_params.EvaluationTestCaseUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=EvaluationTestCaseUpdateResponse, + ) + + def list( + self, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EvaluationTestCaseListResponse: + """ + To list all evaluation test cases, send a GET request to + `/v2/gen-ai/evaluation_test_cases`. + """ + return self._get( + "/v2/gen-ai/evaluation_test_cases" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/evaluation_test_cases", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=EvaluationTestCaseListResponse, + ) + + def list_evaluation_runs( + self, + evaluation_test_case_uuid: str, + *, + evaluation_test_case_version: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EvaluationTestCaseListEvaluationRunsResponse: + """ + To list all evaluation runs by test case, send a GET request to + `/v2/gen-ai/evaluation_test_cases/{evaluation_test_case_uuid}/evaluation_runs`. + + Args: + evaluation_test_case_version: Version of the test case. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not evaluation_test_case_uuid: + raise ValueError( + f"Expected a non-empty value for `evaluation_test_case_uuid` but received {evaluation_test_case_uuid!r}" + ) + return self._get( + f"/v2/gen-ai/evaluation_test_cases/{evaluation_test_case_uuid}/evaluation_runs" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/evaluation_test_cases/{evaluation_test_case_uuid}/evaluation_runs", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + {"evaluation_test_case_version": evaluation_test_case_version}, + evaluation_test_case_list_evaluation_runs_params.EvaluationTestCaseListEvaluationRunsParams, + ), + ), + cast_to=EvaluationTestCaseListEvaluationRunsResponse, + ) + + +class AsyncEvaluationTestCasesResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncEvaluationTestCasesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncEvaluationTestCasesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncEvaluationTestCasesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncEvaluationTestCasesResourceWithStreamingResponse(self) + + async def create( + self, + *, + dataset_uuid: str | NotGiven = NOT_GIVEN, + description: str | NotGiven = NOT_GIVEN, + metrics: List[str] | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + star_metric: APIStarMetricParam | NotGiven = NOT_GIVEN, + workspace_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EvaluationTestCaseCreateResponse: + """ + To create an evaluation test-case send a POST request to + `/v2/gen-ai/evaluation_test_cases`. + + Args: + dataset_uuid: Dataset against which the test‑case is executed. + + description: Description of the test case. + + metrics: Full metric list to use for evaluation test case. + + name: Name of the test case. + + workspace_uuid: The workspace uuid. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v2/gen-ai/evaluation_test_cases" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/evaluation_test_cases", + body=await async_maybe_transform( + { + "dataset_uuid": dataset_uuid, + "description": description, + "metrics": metrics, + "name": name, + "star_metric": star_metric, + "workspace_uuid": workspace_uuid, + }, + evaluation_test_case_create_params.EvaluationTestCaseCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=EvaluationTestCaseCreateResponse, + ) + + async def retrieve( + self, + test_case_uuid: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EvaluationTestCaseRetrieveResponse: + """ + To retrive information about an existing evaluation test case, send a GET + request to `/v2/gen-ai/evaluation_test_case/{test_case_uuid}`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not test_case_uuid: + raise ValueError(f"Expected a non-empty value for `test_case_uuid` but received {test_case_uuid!r}") + return await self._get( + f"/v2/gen-ai/evaluation_test_cases/{test_case_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/evaluation_test_cases/{test_case_uuid}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=EvaluationTestCaseRetrieveResponse, + ) + + async def update( + self, + path_test_case_uuid: str, + *, + dataset_uuid: str | NotGiven = NOT_GIVEN, + description: str | NotGiven = NOT_GIVEN, + metrics: evaluation_test_case_update_params.Metrics | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + star_metric: APIStarMetricParam | NotGiven = NOT_GIVEN, + body_test_case_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EvaluationTestCaseUpdateResponse: + """ + To update an evaluation test-case send a POST request to + `/v2/gen-ai/evaluation_test_cases/{test_case_uuid}`. + + Args: + dataset_uuid: Dataset against which the test‑case is executed. + + description: Description of the test case. + + name: Name of the test case. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not path_test_case_uuid: + raise ValueError( + f"Expected a non-empty value for `path_test_case_uuid` but received {path_test_case_uuid!r}" + ) + return await self._post( + f"/v2/gen-ai/evaluation_test_cases/{path_test_case_uuid}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/evaluation_test_cases/{path_test_case_uuid}", + body=await async_maybe_transform( + { + "dataset_uuid": dataset_uuid, + "description": description, + "metrics": metrics, + "name": name, + "star_metric": star_metric, + "body_test_case_uuid": body_test_case_uuid, + }, + evaluation_test_case_update_params.EvaluationTestCaseUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=EvaluationTestCaseUpdateResponse, + ) + + async def list( + self, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EvaluationTestCaseListResponse: + """ + To list all evaluation test cases, send a GET request to + `/v2/gen-ai/evaluation_test_cases`. + """ + return await self._get( + "/v2/gen-ai/evaluation_test_cases" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/evaluation_test_cases", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=EvaluationTestCaseListResponse, + ) + + async def list_evaluation_runs( + self, + evaluation_test_case_uuid: str, + *, + evaluation_test_case_version: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EvaluationTestCaseListEvaluationRunsResponse: + """ + To list all evaluation runs by test case, send a GET request to + `/v2/gen-ai/evaluation_test_cases/{evaluation_test_case_uuid}/evaluation_runs`. + + Args: + evaluation_test_case_version: Version of the test case. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not evaluation_test_case_uuid: + raise ValueError( + f"Expected a non-empty value for `evaluation_test_case_uuid` but received {evaluation_test_case_uuid!r}" + ) + return await self._get( + f"/v2/gen-ai/evaluation_test_cases/{evaluation_test_case_uuid}/evaluation_runs" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/gen-ai/evaluation_test_cases/{evaluation_test_case_uuid}/evaluation_runs", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + {"evaluation_test_case_version": evaluation_test_case_version}, + evaluation_test_case_list_evaluation_runs_params.EvaluationTestCaseListEvaluationRunsParams, + ), + ), + cast_to=EvaluationTestCaseListEvaluationRunsResponse, + ) + + +class EvaluationTestCasesResourceWithRawResponse: + def __init__(self, evaluation_test_cases: EvaluationTestCasesResource) -> None: + self._evaluation_test_cases = evaluation_test_cases + + self.create = to_raw_response_wrapper( + evaluation_test_cases.create, + ) + self.retrieve = to_raw_response_wrapper( + evaluation_test_cases.retrieve, + ) + self.update = to_raw_response_wrapper( + evaluation_test_cases.update, + ) + self.list = to_raw_response_wrapper( + evaluation_test_cases.list, + ) + self.list_evaluation_runs = to_raw_response_wrapper( + evaluation_test_cases.list_evaluation_runs, + ) + + +class AsyncEvaluationTestCasesResourceWithRawResponse: + def __init__(self, evaluation_test_cases: AsyncEvaluationTestCasesResource) -> None: + self._evaluation_test_cases = evaluation_test_cases + + self.create = async_to_raw_response_wrapper( + evaluation_test_cases.create, + ) + self.retrieve = async_to_raw_response_wrapper( + evaluation_test_cases.retrieve, + ) + self.update = async_to_raw_response_wrapper( + evaluation_test_cases.update, + ) + self.list = async_to_raw_response_wrapper( + evaluation_test_cases.list, + ) + self.list_evaluation_runs = async_to_raw_response_wrapper( + evaluation_test_cases.list_evaluation_runs, + ) + + +class EvaluationTestCasesResourceWithStreamingResponse: + def __init__(self, evaluation_test_cases: EvaluationTestCasesResource) -> None: + self._evaluation_test_cases = evaluation_test_cases + + self.create = to_streamed_response_wrapper( + evaluation_test_cases.create, + ) + self.retrieve = to_streamed_response_wrapper( + evaluation_test_cases.retrieve, + ) + self.update = to_streamed_response_wrapper( + evaluation_test_cases.update, + ) + self.list = to_streamed_response_wrapper( + evaluation_test_cases.list, + ) + self.list_evaluation_runs = to_streamed_response_wrapper( + evaluation_test_cases.list_evaluation_runs, + ) + + +class AsyncEvaluationTestCasesResourceWithStreamingResponse: + def __init__(self, evaluation_test_cases: AsyncEvaluationTestCasesResource) -> None: + self._evaluation_test_cases = evaluation_test_cases + + self.create = async_to_streamed_response_wrapper( + evaluation_test_cases.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + evaluation_test_cases.retrieve, + ) + self.update = async_to_streamed_response_wrapper( + evaluation_test_cases.update, + ) + self.list = async_to_streamed_response_wrapper( + evaluation_test_cases.list, + ) + self.list_evaluation_runs = async_to_streamed_response_wrapper( + evaluation_test_cases.list_evaluation_runs, + ) diff --git a/src/gradientai/resources/regions/regions.py b/src/gradientai/resources/regions/regions.py index 6662e80a..5f74b2e8 100644 --- a/src/gradientai/resources/regions/regions.py +++ b/src/gradientai/resources/regions/regions.py @@ -16,12 +16,49 @@ async_to_streamed_response_wrapper, ) from ..._base_client import make_request_options +from .evaluation_datasets import ( + EvaluationDatasetsResource, + AsyncEvaluationDatasetsResource, + EvaluationDatasetsResourceWithRawResponse, + AsyncEvaluationDatasetsResourceWithRawResponse, + EvaluationDatasetsResourceWithStreamingResponse, + AsyncEvaluationDatasetsResourceWithStreamingResponse, +) +from .evaluation_test_cases import ( + EvaluationTestCasesResource, + AsyncEvaluationTestCasesResource, + EvaluationTestCasesResourceWithRawResponse, + AsyncEvaluationTestCasesResourceWithRawResponse, + EvaluationTestCasesResourceWithStreamingResponse, + AsyncEvaluationTestCasesResourceWithStreamingResponse, +) from ...types.region_list_response import RegionListResponse +from .evaluation_runs.evaluation_runs import ( + EvaluationRunsResource, + AsyncEvaluationRunsResource, + EvaluationRunsResourceWithRawResponse, + AsyncEvaluationRunsResourceWithRawResponse, + EvaluationRunsResourceWithStreamingResponse, + AsyncEvaluationRunsResourceWithStreamingResponse, +) +from ...types.region_list_evaluation_metrics_response import RegionListEvaluationMetricsResponse __all__ = ["RegionsResource", "AsyncRegionsResource"] class RegionsResource(SyncAPIResource): + @cached_property + def evaluation_runs(self) -> EvaluationRunsResource: + return EvaluationRunsResource(self._client) + + @cached_property + def evaluation_test_cases(self) -> EvaluationTestCasesResource: + return EvaluationTestCasesResource(self._client) + + @cached_property + def evaluation_datasets(self) -> EvaluationDatasetsResource: + return EvaluationDatasetsResource(self._client) + @cached_property def with_raw_response(self) -> RegionsResourceWithRawResponse: """ @@ -89,8 +126,44 @@ def list( cast_to=RegionListResponse, ) + def list_evaluation_metrics( + self, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RegionListEvaluationMetricsResponse: + """ + To list all evaluation metrics, send a GET request to + `/v2/gen-ai/evaluation_metrics`. + """ + return self._get( + "/v2/gen-ai/evaluation_metrics" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/evaluation_metrics", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=RegionListEvaluationMetricsResponse, + ) + class AsyncRegionsResource(AsyncAPIResource): + @cached_property + def evaluation_runs(self) -> AsyncEvaluationRunsResource: + return AsyncEvaluationRunsResource(self._client) + + @cached_property + def evaluation_test_cases(self) -> AsyncEvaluationTestCasesResource: + return AsyncEvaluationTestCasesResource(self._client) + + @cached_property + def evaluation_datasets(self) -> AsyncEvaluationDatasetsResource: + return AsyncEvaluationDatasetsResource(self._client) + @cached_property def with_raw_response(self) -> AsyncRegionsResourceWithRawResponse: """ @@ -158,6 +231,30 @@ async def list( cast_to=RegionListResponse, ) + async def list_evaluation_metrics( + self, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> RegionListEvaluationMetricsResponse: + """ + To list all evaluation metrics, send a GET request to + `/v2/gen-ai/evaluation_metrics`. + """ + return await self._get( + "/v2/gen-ai/evaluation_metrics" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/evaluation_metrics", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=RegionListEvaluationMetricsResponse, + ) + class RegionsResourceWithRawResponse: def __init__(self, regions: RegionsResource) -> None: @@ -166,6 +263,21 @@ def __init__(self, regions: RegionsResource) -> None: self.list = to_raw_response_wrapper( regions.list, ) + self.list_evaluation_metrics = to_raw_response_wrapper( + regions.list_evaluation_metrics, + ) + + @cached_property + def evaluation_runs(self) -> EvaluationRunsResourceWithRawResponse: + return EvaluationRunsResourceWithRawResponse(self._regions.evaluation_runs) + + @cached_property + def evaluation_test_cases(self) -> EvaluationTestCasesResourceWithRawResponse: + return EvaluationTestCasesResourceWithRawResponse(self._regions.evaluation_test_cases) + + @cached_property + def evaluation_datasets(self) -> EvaluationDatasetsResourceWithRawResponse: + return EvaluationDatasetsResourceWithRawResponse(self._regions.evaluation_datasets) class AsyncRegionsResourceWithRawResponse: @@ -175,6 +287,21 @@ def __init__(self, regions: AsyncRegionsResource) -> None: self.list = async_to_raw_response_wrapper( regions.list, ) + self.list_evaluation_metrics = async_to_raw_response_wrapper( + regions.list_evaluation_metrics, + ) + + @cached_property + def evaluation_runs(self) -> AsyncEvaluationRunsResourceWithRawResponse: + return AsyncEvaluationRunsResourceWithRawResponse(self._regions.evaluation_runs) + + @cached_property + def evaluation_test_cases(self) -> AsyncEvaluationTestCasesResourceWithRawResponse: + return AsyncEvaluationTestCasesResourceWithRawResponse(self._regions.evaluation_test_cases) + + @cached_property + def evaluation_datasets(self) -> AsyncEvaluationDatasetsResourceWithRawResponse: + return AsyncEvaluationDatasetsResourceWithRawResponse(self._regions.evaluation_datasets) class RegionsResourceWithStreamingResponse: @@ -184,6 +311,21 @@ def __init__(self, regions: RegionsResource) -> None: self.list = to_streamed_response_wrapper( regions.list, ) + self.list_evaluation_metrics = to_streamed_response_wrapper( + regions.list_evaluation_metrics, + ) + + @cached_property + def evaluation_runs(self) -> EvaluationRunsResourceWithStreamingResponse: + return EvaluationRunsResourceWithStreamingResponse(self._regions.evaluation_runs) + + @cached_property + def evaluation_test_cases(self) -> EvaluationTestCasesResourceWithStreamingResponse: + return EvaluationTestCasesResourceWithStreamingResponse(self._regions.evaluation_test_cases) + + @cached_property + def evaluation_datasets(self) -> EvaluationDatasetsResourceWithStreamingResponse: + return EvaluationDatasetsResourceWithStreamingResponse(self._regions.evaluation_datasets) class AsyncRegionsResourceWithStreamingResponse: @@ -193,3 +335,18 @@ def __init__(self, regions: AsyncRegionsResource) -> None: self.list = async_to_streamed_response_wrapper( regions.list, ) + self.list_evaluation_metrics = async_to_streamed_response_wrapper( + regions.list_evaluation_metrics, + ) + + @cached_property + def evaluation_runs(self) -> AsyncEvaluationRunsResourceWithStreamingResponse: + return AsyncEvaluationRunsResourceWithStreamingResponse(self._regions.evaluation_runs) + + @cached_property + def evaluation_test_cases(self) -> AsyncEvaluationTestCasesResourceWithStreamingResponse: + return AsyncEvaluationTestCasesResourceWithStreamingResponse(self._regions.evaluation_test_cases) + + @cached_property + def evaluation_datasets(self) -> AsyncEvaluationDatasetsResourceWithStreamingResponse: + return AsyncEvaluationDatasetsResourceWithStreamingResponse(self._regions.evaluation_datasets) diff --git a/src/gradientai/types/__init__.py b/src/gradientai/types/__init__.py index 22414733..d09aaa2a 100644 --- a/src/gradientai/types/__init__.py +++ b/src/gradientai/types/__init__.py @@ -45,6 +45,9 @@ from .knowledge_base_retrieve_response import KnowledgeBaseRetrieveResponse as KnowledgeBaseRetrieveResponse from .indexing_job_update_cancel_params import IndexingJobUpdateCancelParams as IndexingJobUpdateCancelParams from .indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse as IndexingJobUpdateCancelResponse +from .region_list_evaluation_metrics_response import ( + RegionListEvaluationMetricsResponse as RegionListEvaluationMetricsResponse, +) from .indexing_job_retrieve_data_sources_response import ( IndexingJobRetrieveDataSourcesResponse as IndexingJobRetrieveDataSourcesResponse, ) diff --git a/src/gradientai/types/region_list_evaluation_metrics_response.py b/src/gradientai/types/region_list_evaluation_metrics_response.py new file mode 100644 index 00000000..c57b71d1 --- /dev/null +++ b/src/gradientai/types/region_list_evaluation_metrics_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from .._models import BaseModel +from .api_evaluation_metric import APIEvaluationMetric + +__all__ = ["RegionListEvaluationMetricsResponse"] + + +class RegionListEvaluationMetricsResponse(BaseModel): + metrics: Optional[List[APIEvaluationMetric]] = None diff --git a/src/gradientai/types/regions/__init__.py b/src/gradientai/types/regions/__init__.py index 83b21099..695ba3b4 100644 --- a/src/gradientai/types/regions/__init__.py +++ b/src/gradientai/types/regions/__init__.py @@ -3,4 +3,30 @@ from __future__ import annotations from .api_star_metric import APIStarMetric as APIStarMetric +from .api_star_metric_param import APIStarMetricParam as APIStarMetricParam from .api_evaluation_test_case import APIEvaluationTestCase as APIEvaluationTestCase +from .evaluation_run_create_params import EvaluationRunCreateParams as EvaluationRunCreateParams +from .evaluation_run_create_response import EvaluationRunCreateResponse as EvaluationRunCreateResponse +from .evaluation_dataset_create_params import EvaluationDatasetCreateParams as EvaluationDatasetCreateParams +from .evaluation_run_retrieve_response import EvaluationRunRetrieveResponse as EvaluationRunRetrieveResponse +from .evaluation_dataset_create_response import EvaluationDatasetCreateResponse as EvaluationDatasetCreateResponse +from .evaluation_test_case_create_params import EvaluationTestCaseCreateParams as EvaluationTestCaseCreateParams +from .evaluation_test_case_list_response import EvaluationTestCaseListResponse as EvaluationTestCaseListResponse +from .evaluation_test_case_update_params import EvaluationTestCaseUpdateParams as EvaluationTestCaseUpdateParams +from .evaluation_test_case_create_response import EvaluationTestCaseCreateResponse as EvaluationTestCaseCreateResponse +from .evaluation_test_case_update_response import EvaluationTestCaseUpdateResponse as EvaluationTestCaseUpdateResponse +from .evaluation_test_case_retrieve_response import ( + EvaluationTestCaseRetrieveResponse as EvaluationTestCaseRetrieveResponse, +) +from .evaluation_test_case_list_evaluation_runs_params import ( + EvaluationTestCaseListEvaluationRunsParams as EvaluationTestCaseListEvaluationRunsParams, +) +from .evaluation_test_case_list_evaluation_runs_response import ( + EvaluationTestCaseListEvaluationRunsResponse as EvaluationTestCaseListEvaluationRunsResponse, +) +from .evaluation_dataset_create_file_upload_presigned_urls_params import ( + EvaluationDatasetCreateFileUploadPresignedURLsParams as EvaluationDatasetCreateFileUploadPresignedURLsParams, +) +from .evaluation_dataset_create_file_upload_presigned_urls_response import ( + EvaluationDatasetCreateFileUploadPresignedURLsResponse as EvaluationDatasetCreateFileUploadPresignedURLsResponse, +) diff --git a/src/gradientai/types/regions/api_star_metric_param.py b/src/gradientai/types/regions/api_star_metric_param.py new file mode 100644 index 00000000..5f7b2fd9 --- /dev/null +++ b/src/gradientai/types/regions/api_star_metric_param.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["APIStarMetricParam"] + + +class APIStarMetricParam(TypedDict, total=False): + metric_uuid: str + + name: str + + success_threshold_pct: int + """ + The success threshold for the star metric. This is a percentage value between 0 + and 100. + """ diff --git a/src/gradientai/types/regions/evaluation_dataset_create_file_upload_presigned_urls_params.py b/src/gradientai/types/regions/evaluation_dataset_create_file_upload_presigned_urls_params.py new file mode 100644 index 00000000..6aa6d27a --- /dev/null +++ b/src/gradientai/types/regions/evaluation_dataset_create_file_upload_presigned_urls_params.py @@ -0,0 +1,20 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable +from typing_extensions import TypedDict + +__all__ = ["EvaluationDatasetCreateFileUploadPresignedURLsParams", "File"] + + +class EvaluationDatasetCreateFileUploadPresignedURLsParams(TypedDict, total=False): + files: Iterable[File] + """A list of files to generate presigned URLs for.""" + + +class File(TypedDict, total=False): + file_name: str + + file_size: str + """The size of the file in bytes.""" diff --git a/src/gradientai/types/regions/evaluation_dataset_create_file_upload_presigned_urls_response.py b/src/gradientai/types/regions/evaluation_dataset_create_file_upload_presigned_urls_response.py new file mode 100644 index 00000000..bee94c93 --- /dev/null +++ b/src/gradientai/types/regions/evaluation_dataset_create_file_upload_presigned_urls_response.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime + +from ..._models import BaseModel + +__all__ = ["EvaluationDatasetCreateFileUploadPresignedURLsResponse", "Upload"] + + +class Upload(BaseModel): + expires_at: Optional[datetime] = None + """The time the url expires at.""" + + object_key: Optional[str] = None + """The unique object key to store the file as.""" + + original_file_name: Optional[str] = None + """The original file name.""" + + presigned_url: Optional[str] = None + """The actual presigned URL the client can use to upload the file directly.""" + + +class EvaluationDatasetCreateFileUploadPresignedURLsResponse(BaseModel): + request_id: Optional[str] = None + """The ID generated for the request for Presigned URLs.""" + + uploads: Optional[List[Upload]] = None + """A list of generated presigned URLs and object keys, one per file.""" diff --git a/src/gradientai/types/regions/evaluation_dataset_create_params.py b/src/gradientai/types/regions/evaluation_dataset_create_params.py new file mode 100644 index 00000000..c8a84c23 --- /dev/null +++ b/src/gradientai/types/regions/evaluation_dataset_create_params.py @@ -0,0 +1,17 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +from ..knowledge_bases.api_file_upload_data_source_param import APIFileUploadDataSourceParam + +__all__ = ["EvaluationDatasetCreateParams"] + + +class EvaluationDatasetCreateParams(TypedDict, total=False): + file_upload_dataset: APIFileUploadDataSourceParam + """File to upload as data source for knowledge base.""" + + name: str + """The name of the agent evaluation dataset.""" diff --git a/src/gradientai/types/regions/evaluation_dataset_create_response.py b/src/gradientai/types/regions/evaluation_dataset_create_response.py new file mode 100644 index 00000000..f5c7fbac --- /dev/null +++ b/src/gradientai/types/regions/evaluation_dataset_create_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["EvaluationDatasetCreateResponse"] + + +class EvaluationDatasetCreateResponse(BaseModel): + evaluation_dataset_uuid: Optional[str] = None + """Evaluation dataset uuid.""" diff --git a/src/gradientai/types/regions/evaluation_run_create_params.py b/src/gradientai/types/regions/evaluation_run_create_params.py new file mode 100644 index 00000000..1ae2dbbb --- /dev/null +++ b/src/gradientai/types/regions/evaluation_run_create_params.py @@ -0,0 +1,17 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["EvaluationRunCreateParams"] + + +class EvaluationRunCreateParams(TypedDict, total=False): + agent_uuid: str + """Agent UUID to run the test case against.""" + + run_name: str + """The name of the run.""" + + test_case_uuid: str diff --git a/src/gradientai/types/regions/evaluation_run_create_response.py b/src/gradientai/types/regions/evaluation_run_create_response.py new file mode 100644 index 00000000..36942c29 --- /dev/null +++ b/src/gradientai/types/regions/evaluation_run_create_response.py @@ -0,0 +1,11 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["EvaluationRunCreateResponse"] + + +class EvaluationRunCreateResponse(BaseModel): + evaluation_run_uuid: Optional[str] = None diff --git a/src/gradientai/types/regions/evaluation_run_retrieve_response.py b/src/gradientai/types/regions/evaluation_run_retrieve_response.py new file mode 100644 index 00000000..68d71978 --- /dev/null +++ b/src/gradientai/types/regions/evaluation_run_retrieve_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from .evaluation_runs.api_evaluation_run import APIEvaluationRun + +__all__ = ["EvaluationRunRetrieveResponse"] + + +class EvaluationRunRetrieveResponse(BaseModel): + evaluation_run: Optional[APIEvaluationRun] = None diff --git a/src/gradientai/types/regions/evaluation_runs/__init__.py b/src/gradientai/types/regions/evaluation_runs/__init__.py index f8ee8b14..0ec4f2f6 100644 --- a/src/gradientai/types/regions/evaluation_runs/__init__.py +++ b/src/gradientai/types/regions/evaluation_runs/__init__.py @@ -1,3 +1,9 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from __future__ import annotations + +from .api_prompt import APIPrompt as APIPrompt +from .api_evaluation_run import APIEvaluationRun as APIEvaluationRun +from .result_retrieve_response import ResultRetrieveResponse as ResultRetrieveResponse +from .api_evaluation_metric_result import APIEvaluationMetricResult as APIEvaluationMetricResult +from .result_retrieve_prompt_response import ResultRetrievePromptResponse as ResultRetrievePromptResponse diff --git a/src/gradientai/types/regions/evaluation_runs/api_evaluation_metric_result.py b/src/gradientai/types/regions/evaluation_runs/api_evaluation_metric_result.py new file mode 100644 index 00000000..cb50fd80 --- /dev/null +++ b/src/gradientai/types/regions/evaluation_runs/api_evaluation_metric_result.py @@ -0,0 +1,17 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel + +__all__ = ["APIEvaluationMetricResult"] + + +class APIEvaluationMetricResult(BaseModel): + metric_name: Optional[str] = None + + number_value: Optional[float] = None + """The value of the metric as a number.""" + + string_value: Optional[str] = None + """The value of the metric as a string.""" diff --git a/src/gradientai/types/regions/evaluation_runs/api_evaluation_run.py b/src/gradientai/types/regions/evaluation_runs/api_evaluation_run.py new file mode 100644 index 00000000..7822f53c --- /dev/null +++ b/src/gradientai/types/regions/evaluation_runs/api_evaluation_run.py @@ -0,0 +1,56 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime +from typing_extensions import Literal + +from ...._models import BaseModel +from .api_evaluation_metric_result import APIEvaluationMetricResult + +__all__ = ["APIEvaluationRun"] + + +class APIEvaluationRun(BaseModel): + agent_uuid: Optional[str] = None + """Agent UUID.""" + + agent_version_hash: Optional[str] = None + + evaluation_run_uuid: Optional[str] = None + """Evaluation run UUID.""" + + finished_at: Optional[datetime] = None + """Run end time.""" + + pass_status: Optional[bool] = None + """The pass status of the evaluation run based on the star metric.""" + + run_level_metric_results: Optional[List[APIEvaluationMetricResult]] = None + + run_name: Optional[str] = None + """Run name.""" + + star_metric_result: Optional[APIEvaluationMetricResult] = None + + started_at: Optional[datetime] = None + """Run start time.""" + + status: Optional[ + Literal[ + "EVALUATION_RUN_STATUS_UNSPECIFIED", + "EVALUATION_RUN_QUEUED", + "EVALUATION_RUN_RUNNING_DATASET", + "EVALUATION_RUN_EVALUATING_RESULTS", + "EVALUATION_RUN_CANCELLING", + "EVALUATION_RUN_CANCELLED", + "EVALUATION_RUN_SUCCESSFUL", + "EVALUATION_RUN_PARTIALLY_SUCCESSFUL", + "EVALUATION_RUN_FAILED", + ] + ] = None + + test_case_uuid: Optional[str] = None + """Test-case UUID.""" + + test_case_version: Optional[int] = None + """Test-case-version.""" diff --git a/src/gradientai/types/regions/evaluation_runs/api_prompt.py b/src/gradientai/types/regions/evaluation_runs/api_prompt.py new file mode 100644 index 00000000..fb5a51f4 --- /dev/null +++ b/src/gradientai/types/regions/evaluation_runs/api_prompt.py @@ -0,0 +1,42 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ...._models import BaseModel +from .api_evaluation_metric_result import APIEvaluationMetricResult + +__all__ = ["APIPrompt", "PromptChunk"] + + +class PromptChunk(BaseModel): + chunk_usage_pct: Optional[float] = None + """The usage percentage of the chunk.""" + + chunk_used: Optional[bool] = None + """Indicates if the chunk was used in the prompt.""" + + index_uuid: Optional[str] = None + """The index uuid (Knowledge Base) of the chunk.""" + + source_name: Optional[str] = None + """The source name for the chunk, e.g., the file name or document title.""" + + text: Optional[str] = None + """Text content of the chunk.""" + + +class APIPrompt(BaseModel): + ground_truth: Optional[str] = None + """The ground truth for the prompt.""" + + input: Optional[str] = None + + output: Optional[str] = None + + prompt_chunks: Optional[List[PromptChunk]] = None + """The list of prompt chunks.""" + + prompt_id: Optional[int] = None + + prompt_level_metric_results: Optional[List[APIEvaluationMetricResult]] = None + """The metric results for the prompt.""" diff --git a/src/gradientai/types/regions/evaluation_runs/result_retrieve_prompt_response.py b/src/gradientai/types/regions/evaluation_runs/result_retrieve_prompt_response.py new file mode 100644 index 00000000..ebebee48 --- /dev/null +++ b/src/gradientai/types/regions/evaluation_runs/result_retrieve_prompt_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel +from .api_prompt import APIPrompt + +__all__ = ["ResultRetrievePromptResponse"] + + +class ResultRetrievePromptResponse(BaseModel): + prompt: Optional[APIPrompt] = None diff --git a/src/gradientai/types/regions/evaluation_runs/result_retrieve_response.py b/src/gradientai/types/regions/evaluation_runs/result_retrieve_response.py new file mode 100644 index 00000000..27256353 --- /dev/null +++ b/src/gradientai/types/regions/evaluation_runs/result_retrieve_response.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ...._models import BaseModel +from .api_prompt import APIPrompt +from .api_evaluation_run import APIEvaluationRun + +__all__ = ["ResultRetrieveResponse"] + + +class ResultRetrieveResponse(BaseModel): + evaluation_run: Optional[APIEvaluationRun] = None + + prompts: Optional[List[APIPrompt]] = None + """The prompt level results.""" diff --git a/src/gradientai/types/regions/evaluation_test_case_create_params.py b/src/gradientai/types/regions/evaluation_test_case_create_params.py new file mode 100644 index 00000000..51ce20c7 --- /dev/null +++ b/src/gradientai/types/regions/evaluation_test_case_create_params.py @@ -0,0 +1,29 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import TypedDict + +from .api_star_metric_param import APIStarMetricParam + +__all__ = ["EvaluationTestCaseCreateParams"] + + +class EvaluationTestCaseCreateParams(TypedDict, total=False): + dataset_uuid: str + """Dataset against which the test‑case is executed.""" + + description: str + """Description of the test case.""" + + metrics: List[str] + """Full metric list to use for evaluation test case.""" + + name: str + """Name of the test case.""" + + star_metric: APIStarMetricParam + + workspace_uuid: str + """The workspace uuid.""" diff --git a/src/gradientai/types/regions/evaluation_test_case_create_response.py b/src/gradientai/types/regions/evaluation_test_case_create_response.py new file mode 100644 index 00000000..9f8e37f4 --- /dev/null +++ b/src/gradientai/types/regions/evaluation_test_case_create_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["EvaluationTestCaseCreateResponse"] + + +class EvaluationTestCaseCreateResponse(BaseModel): + test_case_uuid: Optional[str] = None + """Test‑case UUID.""" diff --git a/src/gradientai/types/regions/evaluation_test_case_list_evaluation_runs_params.py b/src/gradientai/types/regions/evaluation_test_case_list_evaluation_runs_params.py new file mode 100644 index 00000000..7f30ee28 --- /dev/null +++ b/src/gradientai/types/regions/evaluation_test_case_list_evaluation_runs_params.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["EvaluationTestCaseListEvaluationRunsParams"] + + +class EvaluationTestCaseListEvaluationRunsParams(TypedDict, total=False): + evaluation_test_case_version: int + """Version of the test case.""" diff --git a/src/gradientai/types/regions/evaluation_test_case_list_evaluation_runs_response.py b/src/gradientai/types/regions/evaluation_test_case_list_evaluation_runs_response.py new file mode 100644 index 00000000..4233d0ec --- /dev/null +++ b/src/gradientai/types/regions/evaluation_test_case_list_evaluation_runs_response.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel +from .evaluation_runs.api_evaluation_run import APIEvaluationRun + +__all__ = ["EvaluationTestCaseListEvaluationRunsResponse"] + + +class EvaluationTestCaseListEvaluationRunsResponse(BaseModel): + evaluation_runs: Optional[List[APIEvaluationRun]] = None + """List of evaluation runs.""" diff --git a/src/gradientai/types/regions/evaluation_test_case_list_response.py b/src/gradientai/types/regions/evaluation_test_case_list_response.py new file mode 100644 index 00000000..ccfc263e --- /dev/null +++ b/src/gradientai/types/regions/evaluation_test_case_list_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel +from .api_evaluation_test_case import APIEvaluationTestCase + +__all__ = ["EvaluationTestCaseListResponse"] + + +class EvaluationTestCaseListResponse(BaseModel): + evaluation_test_cases: Optional[List[APIEvaluationTestCase]] = None diff --git a/src/gradientai/types/regions/evaluation_test_case_retrieve_response.py b/src/gradientai/types/regions/evaluation_test_case_retrieve_response.py new file mode 100644 index 00000000..1511ba74 --- /dev/null +++ b/src/gradientai/types/regions/evaluation_test_case_retrieve_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from .api_evaluation_test_case import APIEvaluationTestCase + +__all__ = ["EvaluationTestCaseRetrieveResponse"] + + +class EvaluationTestCaseRetrieveResponse(BaseModel): + evaluation_test_case: Optional[APIEvaluationTestCase] = None diff --git a/src/gradientai/types/regions/evaluation_test_case_update_params.py b/src/gradientai/types/regions/evaluation_test_case_update_params.py new file mode 100644 index 00000000..be70fc95 --- /dev/null +++ b/src/gradientai/types/regions/evaluation_test_case_update_params.py @@ -0,0 +1,32 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Annotated, TypedDict + +from ..._utils import PropertyInfo +from .api_star_metric_param import APIStarMetricParam + +__all__ = ["EvaluationTestCaseUpdateParams", "Metrics"] + + +class EvaluationTestCaseUpdateParams(TypedDict, total=False): + dataset_uuid: str + """Dataset against which the test‑case is executed.""" + + description: str + """Description of the test case.""" + + metrics: Metrics + + name: str + """Name of the test case.""" + + star_metric: APIStarMetricParam + + body_test_case_uuid: Annotated[str, PropertyInfo(alias="test_case_uuid")] + + +class Metrics(TypedDict, total=False): + metric_uuids: List[str] diff --git a/src/gradientai/types/regions/evaluation_test_case_update_response.py b/src/gradientai/types/regions/evaluation_test_case_update_response.py new file mode 100644 index 00000000..6f8e3b04 --- /dev/null +++ b/src/gradientai/types/regions/evaluation_test_case_update_response.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["EvaluationTestCaseUpdateResponse"] + + +class EvaluationTestCaseUpdateResponse(BaseModel): + test_case_uuid: Optional[str] = None + + version: Optional[int] = None + """The new verson of the test case.""" diff --git a/tests/api_resources/regions/evaluation_runs/__init__.py b/tests/api_resources/regions/evaluation_runs/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/regions/evaluation_runs/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/regions/evaluation_runs/test_results.py b/tests/api_resources/regions/evaluation_runs/test_results.py new file mode 100644 index 00000000..29deb8b2 --- /dev/null +++ b/tests/api_resources/regions/evaluation_runs/test_results.py @@ -0,0 +1,200 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.regions.evaluation_runs import ResultRetrieveResponse, ResultRetrievePromptResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestResults: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + result = client.regions.evaluation_runs.results.retrieve( + "evaluation_run_uuid", + ) + assert_matches_type(ResultRetrieveResponse, result, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.regions.evaluation_runs.results.with_raw_response.retrieve( + "evaluation_run_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + result = response.parse() + assert_matches_type(ResultRetrieveResponse, result, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.regions.evaluation_runs.results.with_streaming_response.retrieve( + "evaluation_run_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + result = response.parse() + assert_matches_type(ResultRetrieveResponse, result, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"): + client.regions.evaluation_runs.results.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve_prompt(self, client: GradientAI) -> None: + result = client.regions.evaluation_runs.results.retrieve_prompt( + prompt_id=0, + evaluation_run_uuid="evaluation_run_uuid", + ) + assert_matches_type(ResultRetrievePromptResponse, result, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve_prompt(self, client: GradientAI) -> None: + response = client.regions.evaluation_runs.results.with_raw_response.retrieve_prompt( + prompt_id=0, + evaluation_run_uuid="evaluation_run_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + result = response.parse() + assert_matches_type(ResultRetrievePromptResponse, result, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve_prompt(self, client: GradientAI) -> None: + with client.regions.evaluation_runs.results.with_streaming_response.retrieve_prompt( + prompt_id=0, + evaluation_run_uuid="evaluation_run_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + result = response.parse() + assert_matches_type(ResultRetrievePromptResponse, result, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve_prompt(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"): + client.regions.evaluation_runs.results.with_raw_response.retrieve_prompt( + prompt_id=0, + evaluation_run_uuid="", + ) + + +class TestAsyncResults: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + result = await async_client.regions.evaluation_runs.results.retrieve( + "evaluation_run_uuid", + ) + assert_matches_type(ResultRetrieveResponse, result, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.regions.evaluation_runs.results.with_raw_response.retrieve( + "evaluation_run_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + result = await response.parse() + assert_matches_type(ResultRetrieveResponse, result, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.regions.evaluation_runs.results.with_streaming_response.retrieve( + "evaluation_run_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + result = await response.parse() + assert_matches_type(ResultRetrieveResponse, result, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"): + await async_client.regions.evaluation_runs.results.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve_prompt(self, async_client: AsyncGradientAI) -> None: + result = await async_client.regions.evaluation_runs.results.retrieve_prompt( + prompt_id=0, + evaluation_run_uuid="evaluation_run_uuid", + ) + assert_matches_type(ResultRetrievePromptResponse, result, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve_prompt(self, async_client: AsyncGradientAI) -> None: + response = await async_client.regions.evaluation_runs.results.with_raw_response.retrieve_prompt( + prompt_id=0, + evaluation_run_uuid="evaluation_run_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + result = await response.parse() + assert_matches_type(ResultRetrievePromptResponse, result, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve_prompt(self, async_client: AsyncGradientAI) -> None: + async with async_client.regions.evaluation_runs.results.with_streaming_response.retrieve_prompt( + prompt_id=0, + evaluation_run_uuid="evaluation_run_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + result = await response.parse() + assert_matches_type(ResultRetrievePromptResponse, result, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve_prompt(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"): + await async_client.regions.evaluation_runs.results.with_raw_response.retrieve_prompt( + prompt_id=0, + evaluation_run_uuid="", + ) diff --git a/tests/api_resources/regions/test_evaluation_datasets.py b/tests/api_resources/regions/test_evaluation_datasets.py new file mode 100644 index 00000000..3e3da0fe --- /dev/null +++ b/tests/api_resources/regions/test_evaluation_datasets.py @@ -0,0 +1,211 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.regions import ( + EvaluationDatasetCreateResponse, + EvaluationDatasetCreateFileUploadPresignedURLsResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestEvaluationDatasets: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: GradientAI) -> None: + evaluation_dataset = client.regions.evaluation_datasets.create() + assert_matches_type(EvaluationDatasetCreateResponse, evaluation_dataset, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: GradientAI) -> None: + evaluation_dataset = client.regions.evaluation_datasets.create( + file_upload_dataset={ + "original_file_name": "original_file_name", + "size_in_bytes": "size_in_bytes", + "stored_object_key": "stored_object_key", + }, + name="name", + ) + assert_matches_type(EvaluationDatasetCreateResponse, evaluation_dataset, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: GradientAI) -> None: + response = client.regions.evaluation_datasets.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + evaluation_dataset = response.parse() + assert_matches_type(EvaluationDatasetCreateResponse, evaluation_dataset, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: GradientAI) -> None: + with client.regions.evaluation_datasets.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + evaluation_dataset = response.parse() + assert_matches_type(EvaluationDatasetCreateResponse, evaluation_dataset, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_create_file_upload_presigned_urls(self, client: GradientAI) -> None: + evaluation_dataset = client.regions.evaluation_datasets.create_file_upload_presigned_urls() + assert_matches_type( + EvaluationDatasetCreateFileUploadPresignedURLsResponse, evaluation_dataset, path=["response"] + ) + + @pytest.mark.skip() + @parametrize + def test_method_create_file_upload_presigned_urls_with_all_params(self, client: GradientAI) -> None: + evaluation_dataset = client.regions.evaluation_datasets.create_file_upload_presigned_urls( + files=[ + { + "file_name": "file_name", + "file_size": "file_size", + } + ], + ) + assert_matches_type( + EvaluationDatasetCreateFileUploadPresignedURLsResponse, evaluation_dataset, path=["response"] + ) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create_file_upload_presigned_urls(self, client: GradientAI) -> None: + response = client.regions.evaluation_datasets.with_raw_response.create_file_upload_presigned_urls() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + evaluation_dataset = response.parse() + assert_matches_type( + EvaluationDatasetCreateFileUploadPresignedURLsResponse, evaluation_dataset, path=["response"] + ) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create_file_upload_presigned_urls(self, client: GradientAI) -> None: + with client.regions.evaluation_datasets.with_streaming_response.create_file_upload_presigned_urls() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + evaluation_dataset = response.parse() + assert_matches_type( + EvaluationDatasetCreateFileUploadPresignedURLsResponse, evaluation_dataset, path=["response"] + ) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncEvaluationDatasets: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncGradientAI) -> None: + evaluation_dataset = await async_client.regions.evaluation_datasets.create() + assert_matches_type(EvaluationDatasetCreateResponse, evaluation_dataset, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + evaluation_dataset = await async_client.regions.evaluation_datasets.create( + file_upload_dataset={ + "original_file_name": "original_file_name", + "size_in_bytes": "size_in_bytes", + "stored_object_key": "stored_object_key", + }, + name="name", + ) + assert_matches_type(EvaluationDatasetCreateResponse, evaluation_dataset, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + response = await async_client.regions.evaluation_datasets.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + evaluation_dataset = await response.parse() + assert_matches_type(EvaluationDatasetCreateResponse, evaluation_dataset, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async with async_client.regions.evaluation_datasets.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + evaluation_dataset = await response.parse() + assert_matches_type(EvaluationDatasetCreateResponse, evaluation_dataset, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_create_file_upload_presigned_urls(self, async_client: AsyncGradientAI) -> None: + evaluation_dataset = await async_client.regions.evaluation_datasets.create_file_upload_presigned_urls() + assert_matches_type( + EvaluationDatasetCreateFileUploadPresignedURLsResponse, evaluation_dataset, path=["response"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_create_file_upload_presigned_urls_with_all_params( + self, async_client: AsyncGradientAI + ) -> None: + evaluation_dataset = await async_client.regions.evaluation_datasets.create_file_upload_presigned_urls( + files=[ + { + "file_name": "file_name", + "file_size": "file_size", + } + ], + ) + assert_matches_type( + EvaluationDatasetCreateFileUploadPresignedURLsResponse, evaluation_dataset, path=["response"] + ) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create_file_upload_presigned_urls(self, async_client: AsyncGradientAI) -> None: + response = await async_client.regions.evaluation_datasets.with_raw_response.create_file_upload_presigned_urls() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + evaluation_dataset = await response.parse() + assert_matches_type( + EvaluationDatasetCreateFileUploadPresignedURLsResponse, evaluation_dataset, path=["response"] + ) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create_file_upload_presigned_urls(self, async_client: AsyncGradientAI) -> None: + async with ( + async_client.regions.evaluation_datasets.with_streaming_response.create_file_upload_presigned_urls() + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + evaluation_dataset = await response.parse() + assert_matches_type( + EvaluationDatasetCreateFileUploadPresignedURLsResponse, evaluation_dataset, path=["response"] + ) + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/regions/test_evaluation_runs.py b/tests/api_resources/regions/test_evaluation_runs.py new file mode 100644 index 00000000..b2d3c634 --- /dev/null +++ b/tests/api_resources/regions/test_evaluation_runs.py @@ -0,0 +1,187 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.regions import ( + EvaluationRunCreateResponse, + EvaluationRunRetrieveResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestEvaluationRuns: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: GradientAI) -> None: + evaluation_run = client.regions.evaluation_runs.create() + assert_matches_type(EvaluationRunCreateResponse, evaluation_run, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: GradientAI) -> None: + evaluation_run = client.regions.evaluation_runs.create( + agent_uuid="agent_uuid", + run_name="run_name", + test_case_uuid="test_case_uuid", + ) + assert_matches_type(EvaluationRunCreateResponse, evaluation_run, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: GradientAI) -> None: + response = client.regions.evaluation_runs.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + evaluation_run = response.parse() + assert_matches_type(EvaluationRunCreateResponse, evaluation_run, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: GradientAI) -> None: + with client.regions.evaluation_runs.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + evaluation_run = response.parse() + assert_matches_type(EvaluationRunCreateResponse, evaluation_run, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + evaluation_run = client.regions.evaluation_runs.retrieve( + "evaluation_run_uuid", + ) + assert_matches_type(EvaluationRunRetrieveResponse, evaluation_run, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.regions.evaluation_runs.with_raw_response.retrieve( + "evaluation_run_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + evaluation_run = response.parse() + assert_matches_type(EvaluationRunRetrieveResponse, evaluation_run, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.regions.evaluation_runs.with_streaming_response.retrieve( + "evaluation_run_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + evaluation_run = response.parse() + assert_matches_type(EvaluationRunRetrieveResponse, evaluation_run, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"): + client.regions.evaluation_runs.with_raw_response.retrieve( + "", + ) + + +class TestAsyncEvaluationRuns: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncGradientAI) -> None: + evaluation_run = await async_client.regions.evaluation_runs.create() + assert_matches_type(EvaluationRunCreateResponse, evaluation_run, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + evaluation_run = await async_client.regions.evaluation_runs.create( + agent_uuid="agent_uuid", + run_name="run_name", + test_case_uuid="test_case_uuid", + ) + assert_matches_type(EvaluationRunCreateResponse, evaluation_run, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + response = await async_client.regions.evaluation_runs.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + evaluation_run = await response.parse() + assert_matches_type(EvaluationRunCreateResponse, evaluation_run, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async with async_client.regions.evaluation_runs.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + evaluation_run = await response.parse() + assert_matches_type(EvaluationRunCreateResponse, evaluation_run, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + evaluation_run = await async_client.regions.evaluation_runs.retrieve( + "evaluation_run_uuid", + ) + assert_matches_type(EvaluationRunRetrieveResponse, evaluation_run, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.regions.evaluation_runs.with_raw_response.retrieve( + "evaluation_run_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + evaluation_run = await response.parse() + assert_matches_type(EvaluationRunRetrieveResponse, evaluation_run, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.regions.evaluation_runs.with_streaming_response.retrieve( + "evaluation_run_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + evaluation_run = await response.parse() + assert_matches_type(EvaluationRunRetrieveResponse, evaluation_run, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"): + await async_client.regions.evaluation_runs.with_raw_response.retrieve( + "", + ) diff --git a/tests/api_resources/regions/test_evaluation_test_cases.py b/tests/api_resources/regions/test_evaluation_test_cases.py new file mode 100644 index 00000000..a01ace90 --- /dev/null +++ b/tests/api_resources/regions/test_evaluation_test_cases.py @@ -0,0 +1,486 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.regions import ( + EvaluationTestCaseListResponse, + EvaluationTestCaseCreateResponse, + EvaluationTestCaseUpdateResponse, + EvaluationTestCaseRetrieveResponse, + EvaluationTestCaseListEvaluationRunsResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestEvaluationTestCases: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: GradientAI) -> None: + evaluation_test_case = client.regions.evaluation_test_cases.create() + assert_matches_type(EvaluationTestCaseCreateResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: GradientAI) -> None: + evaluation_test_case = client.regions.evaluation_test_cases.create( + dataset_uuid="dataset_uuid", + description="description", + metrics=["string"], + name="name", + star_metric={ + "metric_uuid": "metric_uuid", + "name": "name", + "success_threshold_pct": 0, + }, + workspace_uuid="workspace_uuid", + ) + assert_matches_type(EvaluationTestCaseCreateResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: GradientAI) -> None: + response = client.regions.evaluation_test_cases.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + evaluation_test_case = response.parse() + assert_matches_type(EvaluationTestCaseCreateResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: GradientAI) -> None: + with client.regions.evaluation_test_cases.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + evaluation_test_case = response.parse() + assert_matches_type(EvaluationTestCaseCreateResponse, evaluation_test_case, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + evaluation_test_case = client.regions.evaluation_test_cases.retrieve( + "test_case_uuid", + ) + assert_matches_type(EvaluationTestCaseRetrieveResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.regions.evaluation_test_cases.with_raw_response.retrieve( + "test_case_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + evaluation_test_case = response.parse() + assert_matches_type(EvaluationTestCaseRetrieveResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.regions.evaluation_test_cases.with_streaming_response.retrieve( + "test_case_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + evaluation_test_case = response.parse() + assert_matches_type(EvaluationTestCaseRetrieveResponse, evaluation_test_case, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `test_case_uuid` but received ''"): + client.regions.evaluation_test_cases.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: GradientAI) -> None: + evaluation_test_case = client.regions.evaluation_test_cases.update( + path_test_case_uuid="test_case_uuid", + ) + assert_matches_type(EvaluationTestCaseUpdateResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params(self, client: GradientAI) -> None: + evaluation_test_case = client.regions.evaluation_test_cases.update( + path_test_case_uuid="test_case_uuid", + dataset_uuid="dataset_uuid", + description="description", + metrics={"metric_uuids": ["string"]}, + name="name", + star_metric={ + "metric_uuid": "metric_uuid", + "name": "name", + "success_threshold_pct": 0, + }, + body_test_case_uuid="test_case_uuid", + ) + assert_matches_type(EvaluationTestCaseUpdateResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: GradientAI) -> None: + response = client.regions.evaluation_test_cases.with_raw_response.update( + path_test_case_uuid="test_case_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + evaluation_test_case = response.parse() + assert_matches_type(EvaluationTestCaseUpdateResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: GradientAI) -> None: + with client.regions.evaluation_test_cases.with_streaming_response.update( + path_test_case_uuid="test_case_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + evaluation_test_case = response.parse() + assert_matches_type(EvaluationTestCaseUpdateResponse, evaluation_test_case, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_test_case_uuid` but received ''"): + client.regions.evaluation_test_cases.with_raw_response.update( + path_test_case_uuid="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + evaluation_test_case = client.regions.evaluation_test_cases.list() + assert_matches_type(EvaluationTestCaseListResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.regions.evaluation_test_cases.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + evaluation_test_case = response.parse() + assert_matches_type(EvaluationTestCaseListResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.regions.evaluation_test_cases.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + evaluation_test_case = response.parse() + assert_matches_type(EvaluationTestCaseListResponse, evaluation_test_case, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_list_evaluation_runs(self, client: GradientAI) -> None: + evaluation_test_case = client.regions.evaluation_test_cases.list_evaluation_runs( + evaluation_test_case_uuid="evaluation_test_case_uuid", + ) + assert_matches_type(EvaluationTestCaseListEvaluationRunsResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_evaluation_runs_with_all_params(self, client: GradientAI) -> None: + evaluation_test_case = client.regions.evaluation_test_cases.list_evaluation_runs( + evaluation_test_case_uuid="evaluation_test_case_uuid", + evaluation_test_case_version=0, + ) + assert_matches_type(EvaluationTestCaseListEvaluationRunsResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list_evaluation_runs(self, client: GradientAI) -> None: + response = client.regions.evaluation_test_cases.with_raw_response.list_evaluation_runs( + evaluation_test_case_uuid="evaluation_test_case_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + evaluation_test_case = response.parse() + assert_matches_type(EvaluationTestCaseListEvaluationRunsResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list_evaluation_runs(self, client: GradientAI) -> None: + with client.regions.evaluation_test_cases.with_streaming_response.list_evaluation_runs( + evaluation_test_case_uuid="evaluation_test_case_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + evaluation_test_case = response.parse() + assert_matches_type(EvaluationTestCaseListEvaluationRunsResponse, evaluation_test_case, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_list_evaluation_runs(self, client: GradientAI) -> None: + with pytest.raises( + ValueError, match=r"Expected a non-empty value for `evaluation_test_case_uuid` but received ''" + ): + client.regions.evaluation_test_cases.with_raw_response.list_evaluation_runs( + evaluation_test_case_uuid="", + ) + + +class TestAsyncEvaluationTestCases: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncGradientAI) -> None: + evaluation_test_case = await async_client.regions.evaluation_test_cases.create() + assert_matches_type(EvaluationTestCaseCreateResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + evaluation_test_case = await async_client.regions.evaluation_test_cases.create( + dataset_uuid="dataset_uuid", + description="description", + metrics=["string"], + name="name", + star_metric={ + "metric_uuid": "metric_uuid", + "name": "name", + "success_threshold_pct": 0, + }, + workspace_uuid="workspace_uuid", + ) + assert_matches_type(EvaluationTestCaseCreateResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + response = await async_client.regions.evaluation_test_cases.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + evaluation_test_case = await response.parse() + assert_matches_type(EvaluationTestCaseCreateResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async with async_client.regions.evaluation_test_cases.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + evaluation_test_case = await response.parse() + assert_matches_type(EvaluationTestCaseCreateResponse, evaluation_test_case, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + evaluation_test_case = await async_client.regions.evaluation_test_cases.retrieve( + "test_case_uuid", + ) + assert_matches_type(EvaluationTestCaseRetrieveResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.regions.evaluation_test_cases.with_raw_response.retrieve( + "test_case_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + evaluation_test_case = await response.parse() + assert_matches_type(EvaluationTestCaseRetrieveResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.regions.evaluation_test_cases.with_streaming_response.retrieve( + "test_case_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + evaluation_test_case = await response.parse() + assert_matches_type(EvaluationTestCaseRetrieveResponse, evaluation_test_case, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `test_case_uuid` but received ''"): + await async_client.regions.evaluation_test_cases.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncGradientAI) -> None: + evaluation_test_case = await async_client.regions.evaluation_test_cases.update( + path_test_case_uuid="test_case_uuid", + ) + assert_matches_type(EvaluationTestCaseUpdateResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: + evaluation_test_case = await async_client.regions.evaluation_test_cases.update( + path_test_case_uuid="test_case_uuid", + dataset_uuid="dataset_uuid", + description="description", + metrics={"metric_uuids": ["string"]}, + name="name", + star_metric={ + "metric_uuid": "metric_uuid", + "name": "name", + "success_threshold_pct": 0, + }, + body_test_case_uuid="test_case_uuid", + ) + assert_matches_type(EvaluationTestCaseUpdateResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + response = await async_client.regions.evaluation_test_cases.with_raw_response.update( + path_test_case_uuid="test_case_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + evaluation_test_case = await response.parse() + assert_matches_type(EvaluationTestCaseUpdateResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async with async_client.regions.evaluation_test_cases.with_streaming_response.update( + path_test_case_uuid="test_case_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + evaluation_test_case = await response.parse() + assert_matches_type(EvaluationTestCaseUpdateResponse, evaluation_test_case, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_test_case_uuid` but received ''"): + await async_client.regions.evaluation_test_cases.with_raw_response.update( + path_test_case_uuid="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + evaluation_test_case = await async_client.regions.evaluation_test_cases.list() + assert_matches_type(EvaluationTestCaseListResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.regions.evaluation_test_cases.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + evaluation_test_case = await response.parse() + assert_matches_type(EvaluationTestCaseListResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.regions.evaluation_test_cases.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + evaluation_test_case = await response.parse() + assert_matches_type(EvaluationTestCaseListResponse, evaluation_test_case, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_list_evaluation_runs(self, async_client: AsyncGradientAI) -> None: + evaluation_test_case = await async_client.regions.evaluation_test_cases.list_evaluation_runs( + evaluation_test_case_uuid="evaluation_test_case_uuid", + ) + assert_matches_type(EvaluationTestCaseListEvaluationRunsResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_evaluation_runs_with_all_params(self, async_client: AsyncGradientAI) -> None: + evaluation_test_case = await async_client.regions.evaluation_test_cases.list_evaluation_runs( + evaluation_test_case_uuid="evaluation_test_case_uuid", + evaluation_test_case_version=0, + ) + assert_matches_type(EvaluationTestCaseListEvaluationRunsResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list_evaluation_runs(self, async_client: AsyncGradientAI) -> None: + response = await async_client.regions.evaluation_test_cases.with_raw_response.list_evaluation_runs( + evaluation_test_case_uuid="evaluation_test_case_uuid", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + evaluation_test_case = await response.parse() + assert_matches_type(EvaluationTestCaseListEvaluationRunsResponse, evaluation_test_case, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list_evaluation_runs(self, async_client: AsyncGradientAI) -> None: + async with async_client.regions.evaluation_test_cases.with_streaming_response.list_evaluation_runs( + evaluation_test_case_uuid="evaluation_test_case_uuid", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + evaluation_test_case = await response.parse() + assert_matches_type(EvaluationTestCaseListEvaluationRunsResponse, evaluation_test_case, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_list_evaluation_runs(self, async_client: AsyncGradientAI) -> None: + with pytest.raises( + ValueError, match=r"Expected a non-empty value for `evaluation_test_case_uuid` but received ''" + ): + await async_client.regions.evaluation_test_cases.with_raw_response.list_evaluation_runs( + evaluation_test_case_uuid="", + ) diff --git a/tests/api_resources/test_regions.py b/tests/api_resources/test_regions.py index 8e25617f..9cb24b0a 100644 --- a/tests/api_resources/test_regions.py +++ b/tests/api_resources/test_regions.py @@ -9,7 +9,7 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types import RegionListResponse +from gradientai.types import RegionListResponse, RegionListEvaluationMetricsResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -54,6 +54,34 @@ def test_streaming_response_list(self, client: GradientAI) -> None: assert cast(Any, response.is_closed) is True + @pytest.mark.skip() + @parametrize + def test_method_list_evaluation_metrics(self, client: GradientAI) -> None: + region = client.regions.list_evaluation_metrics() + assert_matches_type(RegionListEvaluationMetricsResponse, region, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list_evaluation_metrics(self, client: GradientAI) -> None: + response = client.regions.with_raw_response.list_evaluation_metrics() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + region = response.parse() + assert_matches_type(RegionListEvaluationMetricsResponse, region, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list_evaluation_metrics(self, client: GradientAI) -> None: + with client.regions.with_streaming_response.list_evaluation_metrics() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + region = response.parse() + assert_matches_type(RegionListEvaluationMetricsResponse, region, path=["response"]) + + assert cast(Any, response.is_closed) is True + class TestAsyncRegions: parametrize = pytest.mark.parametrize( @@ -96,3 +124,31 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N assert_matches_type(RegionListResponse, region, path=["response"]) assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_list_evaluation_metrics(self, async_client: AsyncGradientAI) -> None: + region = await async_client.regions.list_evaluation_metrics() + assert_matches_type(RegionListEvaluationMetricsResponse, region, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list_evaluation_metrics(self, async_client: AsyncGradientAI) -> None: + response = await async_client.regions.with_raw_response.list_evaluation_metrics() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + region = await response.parse() + assert_matches_type(RegionListEvaluationMetricsResponse, region, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list_evaluation_metrics(self, async_client: AsyncGradientAI) -> None: + async with async_client.regions.with_streaming_response.list_evaluation_metrics() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + region = await response.parse() + assert_matches_type(RegionListEvaluationMetricsResponse, region, path=["response"]) + + assert cast(Any, response.is_closed) is True From 584f9f1304b3612eb25f1438041d287592463438 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 25 Jun 2025 01:19:45 +0000 Subject: [PATCH 38/41] feat(api): update via SDK Studio --- .stats.yml | 2 +- api.md | 46 +++++----- src/gradientai/_client.py | 39 +-------- src/gradientai/resources/__init__.py | 14 --- .../resources/knowledge_bases/__init__.py | 14 +++ .../{ => knowledge_bases}/indexing_jobs.py | 28 +++--- .../knowledge_bases/knowledge_bases.py | 32 +++++++ src/gradientai/types/__init__.py | 11 --- src/gradientai/types/api_knowledge_base.py | 2 +- .../types/knowledge_bases/__init__.py | 11 +++ .../{ => knowledge_bases}/api_indexing_job.py | 2 +- .../api_knowledge_base_data_source.py | 2 +- .../indexing_job_create_params.py | 0 .../indexing_job_create_response.py | 2 +- .../indexing_job_list_params.py | 0 .../indexing_job_list_response.py | 6 +- ...xing_job_retrieve_data_sources_response.py | 2 +- .../indexing_job_retrieve_response.py | 2 +- .../indexing_job_update_cancel_params.py | 2 +- .../indexing_job_update_cancel_response.py | 2 +- .../test_indexing_jobs.py | 86 +++++++++---------- 21 files changed, 152 insertions(+), 153 deletions(-) rename src/gradientai/resources/{ => knowledge_bases}/indexing_jobs.py (95%) rename src/gradientai/types/{ => knowledge_bases}/api_indexing_job.py (96%) rename src/gradientai/types/{ => knowledge_bases}/indexing_job_create_params.py (100%) rename src/gradientai/types/{ => knowledge_bases}/indexing_job_create_response.py (89%) rename src/gradientai/types/{ => knowledge_bases}/indexing_job_list_params.py (100%) rename src/gradientai/types/{ => knowledge_bases}/indexing_job_list_response.py (77%) rename src/gradientai/types/{ => knowledge_bases}/indexing_job_retrieve_data_sources_response.py (97%) rename src/gradientai/types/{ => knowledge_bases}/indexing_job_retrieve_response.py (89%) rename src/gradientai/types/{ => knowledge_bases}/indexing_job_update_cancel_params.py (91%) rename src/gradientai/types/{ => knowledge_bases}/indexing_job_update_cancel_response.py (90%) rename tests/api_resources/{ => knowledge_bases}/test_indexing_jobs.py (80%) diff --git a/.stats.yml b/.stats.yml index f0863f5f..b756ab92 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 70 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 -config_hash: 211ece2994c6ac52f84f78ee56c1097a +config_hash: 4d0e9e07e7ac5a666632cffb655d028c diff --git a/api.md b/api.md index 970f6951..d14ceec2 100644 --- a/api.md +++ b/api.md @@ -260,29 +260,6 @@ Methods: - client.regions.evaluation_datasets.create(\*\*params) -> EvaluationDatasetCreateResponse - client.regions.evaluation_datasets.create_file_upload_presigned_urls(\*\*params) -> EvaluationDatasetCreateFileUploadPresignedURLsResponse -# IndexingJobs - -Types: - -```python -from gradientai.types import ( - APIIndexingJob, - IndexingJobCreateResponse, - IndexingJobRetrieveResponse, - IndexingJobListResponse, - IndexingJobRetrieveDataSourcesResponse, - IndexingJobUpdateCancelResponse, -) -``` - -Methods: - -- client.indexing_jobs.create(\*\*params) -> IndexingJobCreateResponse -- client.indexing_jobs.retrieve(uuid) -> IndexingJobRetrieveResponse -- client.indexing_jobs.list(\*\*params) -> IndexingJobListResponse -- client.indexing_jobs.retrieve_data_sources(indexing_job_uuid) -> IndexingJobRetrieveDataSourcesResponse -- client.indexing_jobs.update_cancel(path_uuid, \*\*params) -> IndexingJobUpdateCancelResponse - # KnowledgeBases Types: @@ -329,6 +306,29 @@ Methods: - client.knowledge_bases.data_sources.list(knowledge_base_uuid, \*\*params) -> DataSourceListResponse - client.knowledge_bases.data_sources.delete(data_source_uuid, \*, knowledge_base_uuid) -> DataSourceDeleteResponse +## IndexingJobs + +Types: + +```python +from gradientai.types.knowledge_bases import ( + APIIndexingJob, + IndexingJobCreateResponse, + IndexingJobRetrieveResponse, + IndexingJobListResponse, + IndexingJobRetrieveDataSourcesResponse, + IndexingJobUpdateCancelResponse, +) +``` + +Methods: + +- client.knowledge_bases.indexing_jobs.create(\*\*params) -> IndexingJobCreateResponse +- client.knowledge_bases.indexing_jobs.retrieve(uuid) -> IndexingJobRetrieveResponse +- client.knowledge_bases.indexing_jobs.list(\*\*params) -> IndexingJobListResponse +- client.knowledge_bases.indexing_jobs.retrieve_data_sources(indexing_job_uuid) -> IndexingJobRetrieveDataSourcesResponse +- client.knowledge_bases.indexing_jobs.update_cancel(path_uuid, \*\*params) -> IndexingJobUpdateCancelResponse + # Chat ## Completions diff --git a/src/gradientai/_client.py b/src/gradientai/_client.py index 71db35bc..00025498 100644 --- a/src/gradientai/_client.py +++ b/src/gradientai/_client.py @@ -31,11 +31,10 @@ ) if TYPE_CHECKING: - from .resources import chat, agents, models, regions, inference, providers, indexing_jobs, knowledge_bases + from .resources import chat, agents, models, regions, inference, providers, knowledge_bases from .resources.models import ModelsResource, AsyncModelsResource from .resources.chat.chat import ChatResource, AsyncChatResource from .resources.agents.agents import AgentsResource, AsyncAgentsResource - from .resources.indexing_jobs import IndexingJobsResource, AsyncIndexingJobsResource from .resources.regions.regions import RegionsResource, AsyncRegionsResource from .resources.inference.inference import InferenceResource, AsyncInferenceResource from .resources.providers.providers import ProvidersResource, AsyncProvidersResource @@ -127,12 +126,6 @@ def regions(self) -> RegionsResource: return RegionsResource(self) - @cached_property - def indexing_jobs(self) -> IndexingJobsResource: - from .resources.indexing_jobs import IndexingJobsResource - - return IndexingJobsResource(self) - @cached_property def knowledge_bases(self) -> KnowledgeBasesResource: from .resources.knowledge_bases import KnowledgeBasesResource @@ -346,12 +339,6 @@ def regions(self) -> AsyncRegionsResource: return AsyncRegionsResource(self) - @cached_property - def indexing_jobs(self) -> AsyncIndexingJobsResource: - from .resources.indexing_jobs import AsyncIndexingJobsResource - - return AsyncIndexingJobsResource(self) - @cached_property def knowledge_bases(self) -> AsyncKnowledgeBasesResource: from .resources.knowledge_bases import AsyncKnowledgeBasesResource @@ -515,12 +502,6 @@ def regions(self) -> regions.RegionsResourceWithRawResponse: return RegionsResourceWithRawResponse(self._client.regions) - @cached_property - def indexing_jobs(self) -> indexing_jobs.IndexingJobsResourceWithRawResponse: - from .resources.indexing_jobs import IndexingJobsResourceWithRawResponse - - return IndexingJobsResourceWithRawResponse(self._client.indexing_jobs) - @cached_property def knowledge_bases(self) -> knowledge_bases.KnowledgeBasesResourceWithRawResponse: from .resources.knowledge_bases import KnowledgeBasesResourceWithRawResponse @@ -570,12 +551,6 @@ def regions(self) -> regions.AsyncRegionsResourceWithRawResponse: return AsyncRegionsResourceWithRawResponse(self._client.regions) - @cached_property - def indexing_jobs(self) -> indexing_jobs.AsyncIndexingJobsResourceWithRawResponse: - from .resources.indexing_jobs import AsyncIndexingJobsResourceWithRawResponse - - return AsyncIndexingJobsResourceWithRawResponse(self._client.indexing_jobs) - @cached_property def knowledge_bases(self) -> knowledge_bases.AsyncKnowledgeBasesResourceWithRawResponse: from .resources.knowledge_bases import AsyncKnowledgeBasesResourceWithRawResponse @@ -625,12 +600,6 @@ def regions(self) -> regions.RegionsResourceWithStreamingResponse: return RegionsResourceWithStreamingResponse(self._client.regions) - @cached_property - def indexing_jobs(self) -> indexing_jobs.IndexingJobsResourceWithStreamingResponse: - from .resources.indexing_jobs import IndexingJobsResourceWithStreamingResponse - - return IndexingJobsResourceWithStreamingResponse(self._client.indexing_jobs) - @cached_property def knowledge_bases(self) -> knowledge_bases.KnowledgeBasesResourceWithStreamingResponse: from .resources.knowledge_bases import KnowledgeBasesResourceWithStreamingResponse @@ -680,12 +649,6 @@ def regions(self) -> regions.AsyncRegionsResourceWithStreamingResponse: return AsyncRegionsResourceWithStreamingResponse(self._client.regions) - @cached_property - def indexing_jobs(self) -> indexing_jobs.AsyncIndexingJobsResourceWithStreamingResponse: - from .resources.indexing_jobs import AsyncIndexingJobsResourceWithStreamingResponse - - return AsyncIndexingJobsResourceWithStreamingResponse(self._client.indexing_jobs) - @cached_property def knowledge_bases(self) -> knowledge_bases.AsyncKnowledgeBasesResourceWithStreamingResponse: from .resources.knowledge_bases import AsyncKnowledgeBasesResourceWithStreamingResponse diff --git a/src/gradientai/resources/__init__.py b/src/gradientai/resources/__init__.py index 1763a13e..6ad0aa32 100644 --- a/src/gradientai/resources/__init__.py +++ b/src/gradientai/resources/__init__.py @@ -48,14 +48,6 @@ ProvidersResourceWithStreamingResponse, AsyncProvidersResourceWithStreamingResponse, ) -from .indexing_jobs import ( - IndexingJobsResource, - AsyncIndexingJobsResource, - IndexingJobsResourceWithRawResponse, - AsyncIndexingJobsResourceWithRawResponse, - IndexingJobsResourceWithStreamingResponse, - AsyncIndexingJobsResourceWithStreamingResponse, -) from .knowledge_bases import ( KnowledgeBasesResource, AsyncKnowledgeBasesResource, @@ -84,12 +76,6 @@ "AsyncRegionsResourceWithRawResponse", "RegionsResourceWithStreamingResponse", "AsyncRegionsResourceWithStreamingResponse", - "IndexingJobsResource", - "AsyncIndexingJobsResource", - "IndexingJobsResourceWithRawResponse", - "AsyncIndexingJobsResourceWithRawResponse", - "IndexingJobsResourceWithStreamingResponse", - "AsyncIndexingJobsResourceWithStreamingResponse", "KnowledgeBasesResource", "AsyncKnowledgeBasesResource", "KnowledgeBasesResourceWithRawResponse", diff --git a/src/gradientai/resources/knowledge_bases/__init__.py b/src/gradientai/resources/knowledge_bases/__init__.py index 03d143e2..80d04328 100644 --- a/src/gradientai/resources/knowledge_bases/__init__.py +++ b/src/gradientai/resources/knowledge_bases/__init__.py @@ -8,6 +8,14 @@ DataSourcesResourceWithStreamingResponse, AsyncDataSourcesResourceWithStreamingResponse, ) +from .indexing_jobs import ( + IndexingJobsResource, + AsyncIndexingJobsResource, + IndexingJobsResourceWithRawResponse, + AsyncIndexingJobsResourceWithRawResponse, + IndexingJobsResourceWithStreamingResponse, + AsyncIndexingJobsResourceWithStreamingResponse, +) from .knowledge_bases import ( KnowledgeBasesResource, AsyncKnowledgeBasesResource, @@ -24,6 +32,12 @@ "AsyncDataSourcesResourceWithRawResponse", "DataSourcesResourceWithStreamingResponse", "AsyncDataSourcesResourceWithStreamingResponse", + "IndexingJobsResource", + "AsyncIndexingJobsResource", + "IndexingJobsResourceWithRawResponse", + "AsyncIndexingJobsResourceWithRawResponse", + "IndexingJobsResourceWithStreamingResponse", + "AsyncIndexingJobsResourceWithStreamingResponse", "KnowledgeBasesResource", "AsyncKnowledgeBasesResource", "KnowledgeBasesResourceWithRawResponse", diff --git a/src/gradientai/resources/indexing_jobs.py b/src/gradientai/resources/knowledge_bases/indexing_jobs.py similarity index 95% rename from src/gradientai/resources/indexing_jobs.py rename to src/gradientai/resources/knowledge_bases/indexing_jobs.py index 71c59023..39151e41 100644 --- a/src/gradientai/resources/indexing_jobs.py +++ b/src/gradientai/resources/knowledge_bases/indexing_jobs.py @@ -6,23 +6,27 @@ import httpx -from ..types import indexing_job_list_params, indexing_job_create_params, indexing_job_update_cancel_params -from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from .._utils import maybe_transform, async_maybe_transform -from .._compat import cached_property -from .._resource import SyncAPIResource, AsyncAPIResource -from .._response import ( +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( to_raw_response_wrapper, to_streamed_response_wrapper, async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) -from .._base_client import make_request_options -from ..types.indexing_job_list_response import IndexingJobListResponse -from ..types.indexing_job_create_response import IndexingJobCreateResponse -from ..types.indexing_job_retrieve_response import IndexingJobRetrieveResponse -from ..types.indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse -from ..types.indexing_job_retrieve_data_sources_response import IndexingJobRetrieveDataSourcesResponse +from ..._base_client import make_request_options +from ...types.knowledge_bases import ( + indexing_job_list_params, + indexing_job_create_params, + indexing_job_update_cancel_params, +) +from ...types.knowledge_bases.indexing_job_list_response import IndexingJobListResponse +from ...types.knowledge_bases.indexing_job_create_response import IndexingJobCreateResponse +from ...types.knowledge_bases.indexing_job_retrieve_response import IndexingJobRetrieveResponse +from ...types.knowledge_bases.indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse +from ...types.knowledge_bases.indexing_job_retrieve_data_sources_response import IndexingJobRetrieveDataSourcesResponse __all__ = ["IndexingJobsResource", "AsyncIndexingJobsResource"] diff --git a/src/gradientai/resources/knowledge_bases/knowledge_bases.py b/src/gradientai/resources/knowledge_bases/knowledge_bases.py index 2cab4f7b..28acdd7f 100644 --- a/src/gradientai/resources/knowledge_bases/knowledge_bases.py +++ b/src/gradientai/resources/knowledge_bases/knowledge_bases.py @@ -25,6 +25,14 @@ DataSourcesResourceWithStreamingResponse, AsyncDataSourcesResourceWithStreamingResponse, ) +from .indexing_jobs import ( + IndexingJobsResource, + AsyncIndexingJobsResource, + IndexingJobsResourceWithRawResponse, + AsyncIndexingJobsResourceWithRawResponse, + IndexingJobsResourceWithStreamingResponse, + AsyncIndexingJobsResourceWithStreamingResponse, +) from ..._base_client import make_request_options from ...types.knowledge_base_list_response import KnowledgeBaseListResponse from ...types.knowledge_base_create_response import KnowledgeBaseCreateResponse @@ -40,6 +48,10 @@ class KnowledgeBasesResource(SyncAPIResource): def data_sources(self) -> DataSourcesResource: return DataSourcesResource(self._client) + @cached_property + def indexing_jobs(self) -> IndexingJobsResource: + return IndexingJobsResource(self._client) + @cached_property def with_raw_response(self) -> KnowledgeBasesResourceWithRawResponse: """ @@ -316,6 +328,10 @@ class AsyncKnowledgeBasesResource(AsyncAPIResource): def data_sources(self) -> AsyncDataSourcesResource: return AsyncDataSourcesResource(self._client) + @cached_property + def indexing_jobs(self) -> AsyncIndexingJobsResource: + return AsyncIndexingJobsResource(self._client) + @cached_property def with_raw_response(self) -> AsyncKnowledgeBasesResourceWithRawResponse: """ @@ -611,6 +627,10 @@ def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None: def data_sources(self) -> DataSourcesResourceWithRawResponse: return DataSourcesResourceWithRawResponse(self._knowledge_bases.data_sources) + @cached_property + def indexing_jobs(self) -> IndexingJobsResourceWithRawResponse: + return IndexingJobsResourceWithRawResponse(self._knowledge_bases.indexing_jobs) + class AsyncKnowledgeBasesResourceWithRawResponse: def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None: @@ -636,6 +656,10 @@ def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None: def data_sources(self) -> AsyncDataSourcesResourceWithRawResponse: return AsyncDataSourcesResourceWithRawResponse(self._knowledge_bases.data_sources) + @cached_property + def indexing_jobs(self) -> AsyncIndexingJobsResourceWithRawResponse: + return AsyncIndexingJobsResourceWithRawResponse(self._knowledge_bases.indexing_jobs) + class KnowledgeBasesResourceWithStreamingResponse: def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None: @@ -661,6 +685,10 @@ def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None: def data_sources(self) -> DataSourcesResourceWithStreamingResponse: return DataSourcesResourceWithStreamingResponse(self._knowledge_bases.data_sources) + @cached_property + def indexing_jobs(self) -> IndexingJobsResourceWithStreamingResponse: + return IndexingJobsResourceWithStreamingResponse(self._knowledge_bases.indexing_jobs) + class AsyncKnowledgeBasesResourceWithStreamingResponse: def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None: @@ -685,3 +713,7 @@ def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None: @cached_property def data_sources(self) -> AsyncDataSourcesResourceWithStreamingResponse: return AsyncDataSourcesResourceWithStreamingResponse(self._knowledge_bases.data_sources) + + @cached_property + def indexing_jobs(self) -> AsyncIndexingJobsResourceWithStreamingResponse: + return AsyncIndexingJobsResourceWithStreamingResponse(self._knowledge_bases.indexing_jobs) diff --git a/src/gradientai/types/__init__.py b/src/gradientai/types/__init__.py index d09aaa2a..89a5ec4a 100644 --- a/src/gradientai/types/__init__.py +++ b/src/gradientai/types/__init__.py @@ -7,7 +7,6 @@ from .api_agreement import APIAgreement as APIAgreement from .api_workspace import APIWorkspace as APIWorkspace from .api_agent_model import APIAgentModel as APIAgentModel -from .api_indexing_job import APIIndexingJob as APIIndexingJob from .agent_list_params import AgentListParams as AgentListParams from .api_model_version import APIModelVersion as APIModelVersion from .model_list_params import ModelListParams as ModelListParams @@ -26,28 +25,18 @@ from .api_agent_api_key_info import APIAgentAPIKeyInfo as APIAgentAPIKeyInfo from .agent_retrieve_response import AgentRetrieveResponse as AgentRetrieveResponse from .api_openai_api_key_info import APIOpenAIAPIKeyInfo as APIOpenAIAPIKeyInfo -from .indexing_job_list_params import IndexingJobListParams as IndexingJobListParams from .api_deployment_visibility import APIDeploymentVisibility as APIDeploymentVisibility from .agent_update_status_params import AgentUpdateStatusParams as AgentUpdateStatusParams from .api_anthropic_api_key_info import APIAnthropicAPIKeyInfo as APIAnthropicAPIKeyInfo -from .indexing_job_create_params import IndexingJobCreateParams as IndexingJobCreateParams -from .indexing_job_list_response import IndexingJobListResponse as IndexingJobListResponse from .knowledge_base_list_params import KnowledgeBaseListParams as KnowledgeBaseListParams from .agent_update_status_response import AgentUpdateStatusResponse as AgentUpdateStatusResponse -from .indexing_job_create_response import IndexingJobCreateResponse as IndexingJobCreateResponse from .knowledge_base_create_params import KnowledgeBaseCreateParams as KnowledgeBaseCreateParams from .knowledge_base_list_response import KnowledgeBaseListResponse as KnowledgeBaseListResponse from .knowledge_base_update_params import KnowledgeBaseUpdateParams as KnowledgeBaseUpdateParams -from .indexing_job_retrieve_response import IndexingJobRetrieveResponse as IndexingJobRetrieveResponse from .knowledge_base_create_response import KnowledgeBaseCreateResponse as KnowledgeBaseCreateResponse from .knowledge_base_delete_response import KnowledgeBaseDeleteResponse as KnowledgeBaseDeleteResponse from .knowledge_base_update_response import KnowledgeBaseUpdateResponse as KnowledgeBaseUpdateResponse from .knowledge_base_retrieve_response import KnowledgeBaseRetrieveResponse as KnowledgeBaseRetrieveResponse -from .indexing_job_update_cancel_params import IndexingJobUpdateCancelParams as IndexingJobUpdateCancelParams -from .indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse as IndexingJobUpdateCancelResponse from .region_list_evaluation_metrics_response import ( RegionListEvaluationMetricsResponse as RegionListEvaluationMetricsResponse, ) -from .indexing_job_retrieve_data_sources_response import ( - IndexingJobRetrieveDataSourcesResponse as IndexingJobRetrieveDataSourcesResponse, -) diff --git a/src/gradientai/types/api_knowledge_base.py b/src/gradientai/types/api_knowledge_base.py index 5b4b6e2c..2b0676f0 100644 --- a/src/gradientai/types/api_knowledge_base.py +++ b/src/gradientai/types/api_knowledge_base.py @@ -4,7 +4,7 @@ from datetime import datetime from .._models import BaseModel -from .api_indexing_job import APIIndexingJob +from .knowledge_bases.api_indexing_job import APIIndexingJob __all__ = ["APIKnowledgeBase"] diff --git a/src/gradientai/types/knowledge_bases/__init__.py b/src/gradientai/types/knowledge_bases/__init__.py index 859c3618..9fc915e5 100644 --- a/src/gradientai/types/knowledge_bases/__init__.py +++ b/src/gradientai/types/knowledge_bases/__init__.py @@ -2,16 +2,27 @@ from __future__ import annotations +from .api_indexing_job import APIIndexingJob as APIIndexingJob from .aws_data_source_param import AwsDataSourceParam as AwsDataSourceParam from .api_spaces_data_source import APISpacesDataSource as APISpacesDataSource from .data_source_list_params import DataSourceListParams as DataSourceListParams +from .indexing_job_list_params import IndexingJobListParams as IndexingJobListParams from .data_source_create_params import DataSourceCreateParams as DataSourceCreateParams from .data_source_list_response import DataSourceListResponse as DataSourceListResponse +from .indexing_job_create_params import IndexingJobCreateParams as IndexingJobCreateParams +from .indexing_job_list_response import IndexingJobListResponse as IndexingJobListResponse from .api_file_upload_data_source import APIFileUploadDataSource as APIFileUploadDataSource from .api_web_crawler_data_source import APIWebCrawlerDataSource as APIWebCrawlerDataSource from .data_source_create_response import DataSourceCreateResponse as DataSourceCreateResponse from .data_source_delete_response import DataSourceDeleteResponse as DataSourceDeleteResponse from .api_spaces_data_source_param import APISpacesDataSourceParam as APISpacesDataSourceParam +from .indexing_job_create_response import IndexingJobCreateResponse as IndexingJobCreateResponse from .api_knowledge_base_data_source import APIKnowledgeBaseDataSource as APIKnowledgeBaseDataSource +from .indexing_job_retrieve_response import IndexingJobRetrieveResponse as IndexingJobRetrieveResponse from .api_file_upload_data_source_param import APIFileUploadDataSourceParam as APIFileUploadDataSourceParam from .api_web_crawler_data_source_param import APIWebCrawlerDataSourceParam as APIWebCrawlerDataSourceParam +from .indexing_job_update_cancel_params import IndexingJobUpdateCancelParams as IndexingJobUpdateCancelParams +from .indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse as IndexingJobUpdateCancelResponse +from .indexing_job_retrieve_data_sources_response import ( + IndexingJobRetrieveDataSourcesResponse as IndexingJobRetrieveDataSourcesResponse, +) diff --git a/src/gradientai/types/api_indexing_job.py b/src/gradientai/types/knowledge_bases/api_indexing_job.py similarity index 96% rename from src/gradientai/types/api_indexing_job.py rename to src/gradientai/types/knowledge_bases/api_indexing_job.py index f24aac94..2809141c 100644 --- a/src/gradientai/types/api_indexing_job.py +++ b/src/gradientai/types/knowledge_bases/api_indexing_job.py @@ -4,7 +4,7 @@ from datetime import datetime from typing_extensions import Literal -from .._models import BaseModel +from ..._models import BaseModel __all__ = ["APIIndexingJob"] diff --git a/src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py b/src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py index 57080aaa..ca24d6f0 100644 --- a/src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py +++ b/src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py @@ -4,7 +4,7 @@ from datetime import datetime from ..._models import BaseModel -from ..api_indexing_job import APIIndexingJob +from .api_indexing_job import APIIndexingJob from .api_spaces_data_source import APISpacesDataSource from .api_file_upload_data_source import APIFileUploadDataSource from .api_web_crawler_data_source import APIWebCrawlerDataSource diff --git a/src/gradientai/types/indexing_job_create_params.py b/src/gradientai/types/knowledge_bases/indexing_job_create_params.py similarity index 100% rename from src/gradientai/types/indexing_job_create_params.py rename to src/gradientai/types/knowledge_bases/indexing_job_create_params.py diff --git a/src/gradientai/types/indexing_job_create_response.py b/src/gradientai/types/knowledge_bases/indexing_job_create_response.py similarity index 89% rename from src/gradientai/types/indexing_job_create_response.py rename to src/gradientai/types/knowledge_bases/indexing_job_create_response.py index 839bc83b..835ec60d 100644 --- a/src/gradientai/types/indexing_job_create_response.py +++ b/src/gradientai/types/knowledge_bases/indexing_job_create_response.py @@ -2,7 +2,7 @@ from typing import Optional -from .._models import BaseModel +from ..._models import BaseModel from .api_indexing_job import APIIndexingJob __all__ = ["IndexingJobCreateResponse"] diff --git a/src/gradientai/types/indexing_job_list_params.py b/src/gradientai/types/knowledge_bases/indexing_job_list_params.py similarity index 100% rename from src/gradientai/types/indexing_job_list_params.py rename to src/gradientai/types/knowledge_bases/indexing_job_list_params.py diff --git a/src/gradientai/types/indexing_job_list_response.py b/src/gradientai/types/knowledge_bases/indexing_job_list_response.py similarity index 77% rename from src/gradientai/types/indexing_job_list_response.py rename to src/gradientai/types/knowledge_bases/indexing_job_list_response.py index 1379cc55..4784c1a1 100644 --- a/src/gradientai/types/indexing_job_list_response.py +++ b/src/gradientai/types/knowledge_bases/indexing_job_list_response.py @@ -2,10 +2,10 @@ from typing import List, Optional -from .._models import BaseModel -from .agents.api_meta import APIMeta -from .agents.api_links import APILinks +from ..._models import BaseModel +from ..agents.api_meta import APIMeta from .api_indexing_job import APIIndexingJob +from ..agents.api_links import APILinks __all__ = ["IndexingJobListResponse"] diff --git a/src/gradientai/types/indexing_job_retrieve_data_sources_response.py b/src/gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py similarity index 97% rename from src/gradientai/types/indexing_job_retrieve_data_sources_response.py rename to src/gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py index b178b984..a9d0c2c0 100644 --- a/src/gradientai/types/indexing_job_retrieve_data_sources_response.py +++ b/src/gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py @@ -4,7 +4,7 @@ from datetime import datetime from typing_extensions import Literal -from .._models import BaseModel +from ..._models import BaseModel __all__ = ["IndexingJobRetrieveDataSourcesResponse", "IndexedDataSource"] diff --git a/src/gradientai/types/indexing_job_retrieve_response.py b/src/gradientai/types/knowledge_bases/indexing_job_retrieve_response.py similarity index 89% rename from src/gradientai/types/indexing_job_retrieve_response.py rename to src/gradientai/types/knowledge_bases/indexing_job_retrieve_response.py index 95f33d7a..6034bdf1 100644 --- a/src/gradientai/types/indexing_job_retrieve_response.py +++ b/src/gradientai/types/knowledge_bases/indexing_job_retrieve_response.py @@ -2,7 +2,7 @@ from typing import Optional -from .._models import BaseModel +from ..._models import BaseModel from .api_indexing_job import APIIndexingJob __all__ = ["IndexingJobRetrieveResponse"] diff --git a/src/gradientai/types/indexing_job_update_cancel_params.py b/src/gradientai/types/knowledge_bases/indexing_job_update_cancel_params.py similarity index 91% rename from src/gradientai/types/indexing_job_update_cancel_params.py rename to src/gradientai/types/knowledge_bases/indexing_job_update_cancel_params.py index 4c2848b0..9359a42a 100644 --- a/src/gradientai/types/indexing_job_update_cancel_params.py +++ b/src/gradientai/types/knowledge_bases/indexing_job_update_cancel_params.py @@ -4,7 +4,7 @@ from typing_extensions import Annotated, TypedDict -from .._utils import PropertyInfo +from ..._utils import PropertyInfo __all__ = ["IndexingJobUpdateCancelParams"] diff --git a/src/gradientai/types/indexing_job_update_cancel_response.py b/src/gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py similarity index 90% rename from src/gradientai/types/indexing_job_update_cancel_response.py rename to src/gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py index d50e1865..ae4b394f 100644 --- a/src/gradientai/types/indexing_job_update_cancel_response.py +++ b/src/gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py @@ -2,7 +2,7 @@ from typing import Optional -from .._models import BaseModel +from ..._models import BaseModel from .api_indexing_job import APIIndexingJob __all__ = ["IndexingJobUpdateCancelResponse"] diff --git a/tests/api_resources/test_indexing_jobs.py b/tests/api_resources/knowledge_bases/test_indexing_jobs.py similarity index 80% rename from tests/api_resources/test_indexing_jobs.py rename to tests/api_resources/knowledge_bases/test_indexing_jobs.py index 6a50d9b5..8bf1829f 100644 --- a/tests/api_resources/test_indexing_jobs.py +++ b/tests/api_resources/knowledge_bases/test_indexing_jobs.py @@ -9,7 +9,7 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types import ( +from gradientai.types.knowledge_bases import ( IndexingJobListResponse, IndexingJobCreateResponse, IndexingJobRetrieveResponse, @@ -26,13 +26,13 @@ class TestIndexingJobs: @pytest.mark.skip() @parametrize def test_method_create(self, client: GradientAI) -> None: - indexing_job = client.indexing_jobs.create() + indexing_job = client.knowledge_bases.indexing_jobs.create() assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) @pytest.mark.skip() @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: - indexing_job = client.indexing_jobs.create( + indexing_job = client.knowledge_bases.indexing_jobs.create( data_source_uuids=["string"], knowledge_base_uuid="knowledge_base_uuid", ) @@ -41,7 +41,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_create(self, client: GradientAI) -> None: - response = client.indexing_jobs.with_raw_response.create() + response = client.knowledge_bases.indexing_jobs.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -51,7 +51,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_create(self, client: GradientAI) -> None: - with client.indexing_jobs.with_streaming_response.create() as response: + with client.knowledge_bases.indexing_jobs.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -63,7 +63,7 @@ def test_streaming_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_retrieve(self, client: GradientAI) -> None: - indexing_job = client.indexing_jobs.retrieve( + indexing_job = client.knowledge_bases.indexing_jobs.retrieve( "uuid", ) assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"]) @@ -71,7 +71,7 @@ def test_method_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_retrieve(self, client: GradientAI) -> None: - response = client.indexing_jobs.with_raw_response.retrieve( + response = client.knowledge_bases.indexing_jobs.with_raw_response.retrieve( "uuid", ) @@ -83,7 +83,7 @@ def test_raw_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_retrieve(self, client: GradientAI) -> None: - with client.indexing_jobs.with_streaming_response.retrieve( + with client.knowledge_bases.indexing_jobs.with_streaming_response.retrieve( "uuid", ) as response: assert not response.is_closed @@ -98,20 +98,20 @@ def test_streaming_response_retrieve(self, client: GradientAI) -> None: @parametrize def test_path_params_retrieve(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.indexing_jobs.with_raw_response.retrieve( + client.knowledge_bases.indexing_jobs.with_raw_response.retrieve( "", ) @pytest.mark.skip() @parametrize def test_method_list(self, client: GradientAI) -> None: - indexing_job = client.indexing_jobs.list() + indexing_job = client.knowledge_bases.indexing_jobs.list() assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) @pytest.mark.skip() @parametrize def test_method_list_with_all_params(self, client: GradientAI) -> None: - indexing_job = client.indexing_jobs.list( + indexing_job = client.knowledge_bases.indexing_jobs.list( page=0, per_page=0, ) @@ -120,7 +120,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_list(self, client: GradientAI) -> None: - response = client.indexing_jobs.with_raw_response.list() + response = client.knowledge_bases.indexing_jobs.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -130,7 +130,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_list(self, client: GradientAI) -> None: - with client.indexing_jobs.with_streaming_response.list() as response: + with client.knowledge_bases.indexing_jobs.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -142,7 +142,7 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_retrieve_data_sources(self, client: GradientAI) -> None: - indexing_job = client.indexing_jobs.retrieve_data_sources( + indexing_job = client.knowledge_bases.indexing_jobs.retrieve_data_sources( "indexing_job_uuid", ) assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"]) @@ -150,7 +150,7 @@ def test_method_retrieve_data_sources(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_retrieve_data_sources(self, client: GradientAI) -> None: - response = client.indexing_jobs.with_raw_response.retrieve_data_sources( + response = client.knowledge_bases.indexing_jobs.with_raw_response.retrieve_data_sources( "indexing_job_uuid", ) @@ -162,7 +162,7 @@ def test_raw_response_retrieve_data_sources(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_retrieve_data_sources(self, client: GradientAI) -> None: - with client.indexing_jobs.with_streaming_response.retrieve_data_sources( + with client.knowledge_bases.indexing_jobs.with_streaming_response.retrieve_data_sources( "indexing_job_uuid", ) as response: assert not response.is_closed @@ -177,14 +177,14 @@ def test_streaming_response_retrieve_data_sources(self, client: GradientAI) -> N @parametrize def test_path_params_retrieve_data_sources(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `indexing_job_uuid` but received ''"): - client.indexing_jobs.with_raw_response.retrieve_data_sources( + client.knowledge_bases.indexing_jobs.with_raw_response.retrieve_data_sources( "", ) @pytest.mark.skip() @parametrize def test_method_update_cancel(self, client: GradientAI) -> None: - indexing_job = client.indexing_jobs.update_cancel( + indexing_job = client.knowledge_bases.indexing_jobs.update_cancel( path_uuid="uuid", ) assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) @@ -192,7 +192,7 @@ def test_method_update_cancel(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_update_cancel_with_all_params(self, client: GradientAI) -> None: - indexing_job = client.indexing_jobs.update_cancel( + indexing_job = client.knowledge_bases.indexing_jobs.update_cancel( path_uuid="uuid", body_uuid="uuid", ) @@ -201,7 +201,7 @@ def test_method_update_cancel_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_update_cancel(self, client: GradientAI) -> None: - response = client.indexing_jobs.with_raw_response.update_cancel( + response = client.knowledge_bases.indexing_jobs.with_raw_response.update_cancel( path_uuid="uuid", ) @@ -213,7 +213,7 @@ def test_raw_response_update_cancel(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_update_cancel(self, client: GradientAI) -> None: - with client.indexing_jobs.with_streaming_response.update_cancel( + with client.knowledge_bases.indexing_jobs.with_streaming_response.update_cancel( path_uuid="uuid", ) as response: assert not response.is_closed @@ -228,7 +228,7 @@ def test_streaming_response_update_cancel(self, client: GradientAI) -> None: @parametrize def test_path_params_update_cancel(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - client.indexing_jobs.with_raw_response.update_cancel( + client.knowledge_bases.indexing_jobs.with_raw_response.update_cancel( path_uuid="", ) @@ -241,13 +241,13 @@ class TestAsyncIndexingJobs: @pytest.mark.skip() @parametrize async def test_method_create(self, async_client: AsyncGradientAI) -> None: - indexing_job = await async_client.indexing_jobs.create() + indexing_job = await async_client.knowledge_bases.indexing_jobs.create() assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: - indexing_job = await async_client.indexing_jobs.create( + indexing_job = await async_client.knowledge_bases.indexing_jobs.create( data_source_uuids=["string"], knowledge_base_uuid="knowledge_base_uuid", ) @@ -256,7 +256,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: - response = await async_client.indexing_jobs.with_raw_response.create() + response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -266,7 +266,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: - async with async_client.indexing_jobs.with_streaming_response.create() as response: + async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -278,7 +278,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: - indexing_job = await async_client.indexing_jobs.retrieve( + indexing_job = await async_client.knowledge_bases.indexing_jobs.retrieve( "uuid", ) assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"]) @@ -286,7 +286,7 @@ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: - response = await async_client.indexing_jobs.with_raw_response.retrieve( + response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.retrieve( "uuid", ) @@ -298,7 +298,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> Non @pytest.mark.skip() @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: - async with async_client.indexing_jobs.with_streaming_response.retrieve( + async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.retrieve( "uuid", ) as response: assert not response.is_closed @@ -313,20 +313,20 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) @parametrize async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.indexing_jobs.with_raw_response.retrieve( + await async_client.knowledge_bases.indexing_jobs.with_raw_response.retrieve( "", ) @pytest.mark.skip() @parametrize async def test_method_list(self, async_client: AsyncGradientAI) -> None: - indexing_job = await async_client.indexing_jobs.list() + indexing_job = await async_client.knowledge_bases.indexing_jobs.list() assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: - indexing_job = await async_client.indexing_jobs.list( + indexing_job = await async_client.knowledge_bases.indexing_jobs.list( page=0, per_page=0, ) @@ -335,7 +335,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: - response = await async_client.indexing_jobs.with_raw_response.list() + response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -345,7 +345,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: - async with async_client.indexing_jobs.with_streaming_response.list() as response: + async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -357,7 +357,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize async def test_method_retrieve_data_sources(self, async_client: AsyncGradientAI) -> None: - indexing_job = await async_client.indexing_jobs.retrieve_data_sources( + indexing_job = await async_client.knowledge_bases.indexing_jobs.retrieve_data_sources( "indexing_job_uuid", ) assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"]) @@ -365,7 +365,7 @@ async def test_method_retrieve_data_sources(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize async def test_raw_response_retrieve_data_sources(self, async_client: AsyncGradientAI) -> None: - response = await async_client.indexing_jobs.with_raw_response.retrieve_data_sources( + response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.retrieve_data_sources( "indexing_job_uuid", ) @@ -377,7 +377,7 @@ async def test_raw_response_retrieve_data_sources(self, async_client: AsyncGradi @pytest.mark.skip() @parametrize async def test_streaming_response_retrieve_data_sources(self, async_client: AsyncGradientAI) -> None: - async with async_client.indexing_jobs.with_streaming_response.retrieve_data_sources( + async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.retrieve_data_sources( "indexing_job_uuid", ) as response: assert not response.is_closed @@ -392,14 +392,14 @@ async def test_streaming_response_retrieve_data_sources(self, async_client: Asyn @parametrize async def test_path_params_retrieve_data_sources(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `indexing_job_uuid` but received ''"): - await async_client.indexing_jobs.with_raw_response.retrieve_data_sources( + await async_client.knowledge_bases.indexing_jobs.with_raw_response.retrieve_data_sources( "", ) @pytest.mark.skip() @parametrize async def test_method_update_cancel(self, async_client: AsyncGradientAI) -> None: - indexing_job = await async_client.indexing_jobs.update_cancel( + indexing_job = await async_client.knowledge_bases.indexing_jobs.update_cancel( path_uuid="uuid", ) assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) @@ -407,7 +407,7 @@ async def test_method_update_cancel(self, async_client: AsyncGradientAI) -> None @pytest.mark.skip() @parametrize async def test_method_update_cancel_with_all_params(self, async_client: AsyncGradientAI) -> None: - indexing_job = await async_client.indexing_jobs.update_cancel( + indexing_job = await async_client.knowledge_bases.indexing_jobs.update_cancel( path_uuid="uuid", body_uuid="uuid", ) @@ -416,7 +416,7 @@ async def test_method_update_cancel_with_all_params(self, async_client: AsyncGra @pytest.mark.skip() @parametrize async def test_raw_response_update_cancel(self, async_client: AsyncGradientAI) -> None: - response = await async_client.indexing_jobs.with_raw_response.update_cancel( + response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.update_cancel( path_uuid="uuid", ) @@ -428,7 +428,7 @@ async def test_raw_response_update_cancel(self, async_client: AsyncGradientAI) - @pytest.mark.skip() @parametrize async def test_streaming_response_update_cancel(self, async_client: AsyncGradientAI) -> None: - async with async_client.indexing_jobs.with_streaming_response.update_cancel( + async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.update_cancel( path_uuid="uuid", ) as response: assert not response.is_closed @@ -443,6 +443,6 @@ async def test_streaming_response_update_cancel(self, async_client: AsyncGradien @parametrize async def test_path_params_update_cancel(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - await async_client.indexing_jobs.with_raw_response.update_cancel( + await async_client.knowledge_bases.indexing_jobs.with_raw_response.update_cancel( path_uuid="", ) From b2ccc4a7f36ab9e515d37d7c76507e55fbb4d5d6 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 25 Jun 2025 01:33:11 +0000 Subject: [PATCH 39/41] codegen metadata --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index b756ab92..af151d02 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 70 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 -config_hash: 4d0e9e07e7ac5a666632cffb655d028c +config_hash: c53f9cc8c1576a747f2e766faafbbc06 From d1ea884c9be72b3f8804c5ba91bf4f77a3284a6c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 25 Jun 2025 01:50:10 +0000 Subject: [PATCH 40/41] feat(api): update via SDK Studio --- .stats.yml | 2 +- api.md | 46 +++++----- src/gradientai/_client.py | 39 ++++++++- src/gradientai/resources/__init__.py | 14 +++ .../{knowledge_bases => }/indexing_jobs.py | 28 +++--- .../resources/knowledge_bases/__init__.py | 14 --- .../knowledge_bases/knowledge_bases.py | 32 ------- src/gradientai/types/__init__.py | 11 +++ .../{knowledge_bases => }/api_indexing_job.py | 2 +- src/gradientai/types/api_knowledge_base.py | 2 +- .../indexing_job_create_params.py | 0 .../indexing_job_create_response.py | 2 +- .../indexing_job_list_params.py | 0 .../indexing_job_list_response.py | 6 +- ...xing_job_retrieve_data_sources_response.py | 2 +- .../indexing_job_retrieve_response.py | 2 +- .../indexing_job_update_cancel_params.py | 2 +- .../indexing_job_update_cancel_response.py | 2 +- .../types/knowledge_bases/__init__.py | 11 --- .../api_knowledge_base_data_source.py | 2 +- .../test_indexing_jobs.py | 86 +++++++++---------- 21 files changed, 153 insertions(+), 152 deletions(-) rename src/gradientai/resources/{knowledge_bases => }/indexing_jobs.py (95%) rename src/gradientai/types/{knowledge_bases => }/api_indexing_job.py (96%) rename src/gradientai/types/{knowledge_bases => }/indexing_job_create_params.py (100%) rename src/gradientai/types/{knowledge_bases => }/indexing_job_create_response.py (89%) rename src/gradientai/types/{knowledge_bases => }/indexing_job_list_params.py (100%) rename src/gradientai/types/{knowledge_bases => }/indexing_job_list_response.py (77%) rename src/gradientai/types/{knowledge_bases => }/indexing_job_retrieve_data_sources_response.py (97%) rename src/gradientai/types/{knowledge_bases => }/indexing_job_retrieve_response.py (89%) rename src/gradientai/types/{knowledge_bases => }/indexing_job_update_cancel_params.py (91%) rename src/gradientai/types/{knowledge_bases => }/indexing_job_update_cancel_response.py (90%) rename tests/api_resources/{knowledge_bases => }/test_indexing_jobs.py (80%) diff --git a/.stats.yml b/.stats.yml index af151d02..f0863f5f 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 70 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667 -config_hash: c53f9cc8c1576a747f2e766faafbbc06 +config_hash: 211ece2994c6ac52f84f78ee56c1097a diff --git a/api.md b/api.md index d14ceec2..970f6951 100644 --- a/api.md +++ b/api.md @@ -260,6 +260,29 @@ Methods: - client.regions.evaluation_datasets.create(\*\*params) -> EvaluationDatasetCreateResponse - client.regions.evaluation_datasets.create_file_upload_presigned_urls(\*\*params) -> EvaluationDatasetCreateFileUploadPresignedURLsResponse +# IndexingJobs + +Types: + +```python +from gradientai.types import ( + APIIndexingJob, + IndexingJobCreateResponse, + IndexingJobRetrieveResponse, + IndexingJobListResponse, + IndexingJobRetrieveDataSourcesResponse, + IndexingJobUpdateCancelResponse, +) +``` + +Methods: + +- client.indexing_jobs.create(\*\*params) -> IndexingJobCreateResponse +- client.indexing_jobs.retrieve(uuid) -> IndexingJobRetrieveResponse +- client.indexing_jobs.list(\*\*params) -> IndexingJobListResponse +- client.indexing_jobs.retrieve_data_sources(indexing_job_uuid) -> IndexingJobRetrieveDataSourcesResponse +- client.indexing_jobs.update_cancel(path_uuid, \*\*params) -> IndexingJobUpdateCancelResponse + # KnowledgeBases Types: @@ -306,29 +329,6 @@ Methods: - client.knowledge_bases.data_sources.list(knowledge_base_uuid, \*\*params) -> DataSourceListResponse - client.knowledge_bases.data_sources.delete(data_source_uuid, \*, knowledge_base_uuid) -> DataSourceDeleteResponse -## IndexingJobs - -Types: - -```python -from gradientai.types.knowledge_bases import ( - APIIndexingJob, - IndexingJobCreateResponse, - IndexingJobRetrieveResponse, - IndexingJobListResponse, - IndexingJobRetrieveDataSourcesResponse, - IndexingJobUpdateCancelResponse, -) -``` - -Methods: - -- client.knowledge_bases.indexing_jobs.create(\*\*params) -> IndexingJobCreateResponse -- client.knowledge_bases.indexing_jobs.retrieve(uuid) -> IndexingJobRetrieveResponse -- client.knowledge_bases.indexing_jobs.list(\*\*params) -> IndexingJobListResponse -- client.knowledge_bases.indexing_jobs.retrieve_data_sources(indexing_job_uuid) -> IndexingJobRetrieveDataSourcesResponse -- client.knowledge_bases.indexing_jobs.update_cancel(path_uuid, \*\*params) -> IndexingJobUpdateCancelResponse - # Chat ## Completions diff --git a/src/gradientai/_client.py b/src/gradientai/_client.py index 00025498..71db35bc 100644 --- a/src/gradientai/_client.py +++ b/src/gradientai/_client.py @@ -31,10 +31,11 @@ ) if TYPE_CHECKING: - from .resources import chat, agents, models, regions, inference, providers, knowledge_bases + from .resources import chat, agents, models, regions, inference, providers, indexing_jobs, knowledge_bases from .resources.models import ModelsResource, AsyncModelsResource from .resources.chat.chat import ChatResource, AsyncChatResource from .resources.agents.agents import AgentsResource, AsyncAgentsResource + from .resources.indexing_jobs import IndexingJobsResource, AsyncIndexingJobsResource from .resources.regions.regions import RegionsResource, AsyncRegionsResource from .resources.inference.inference import InferenceResource, AsyncInferenceResource from .resources.providers.providers import ProvidersResource, AsyncProvidersResource @@ -126,6 +127,12 @@ def regions(self) -> RegionsResource: return RegionsResource(self) + @cached_property + def indexing_jobs(self) -> IndexingJobsResource: + from .resources.indexing_jobs import IndexingJobsResource + + return IndexingJobsResource(self) + @cached_property def knowledge_bases(self) -> KnowledgeBasesResource: from .resources.knowledge_bases import KnowledgeBasesResource @@ -339,6 +346,12 @@ def regions(self) -> AsyncRegionsResource: return AsyncRegionsResource(self) + @cached_property + def indexing_jobs(self) -> AsyncIndexingJobsResource: + from .resources.indexing_jobs import AsyncIndexingJobsResource + + return AsyncIndexingJobsResource(self) + @cached_property def knowledge_bases(self) -> AsyncKnowledgeBasesResource: from .resources.knowledge_bases import AsyncKnowledgeBasesResource @@ -502,6 +515,12 @@ def regions(self) -> regions.RegionsResourceWithRawResponse: return RegionsResourceWithRawResponse(self._client.regions) + @cached_property + def indexing_jobs(self) -> indexing_jobs.IndexingJobsResourceWithRawResponse: + from .resources.indexing_jobs import IndexingJobsResourceWithRawResponse + + return IndexingJobsResourceWithRawResponse(self._client.indexing_jobs) + @cached_property def knowledge_bases(self) -> knowledge_bases.KnowledgeBasesResourceWithRawResponse: from .resources.knowledge_bases import KnowledgeBasesResourceWithRawResponse @@ -551,6 +570,12 @@ def regions(self) -> regions.AsyncRegionsResourceWithRawResponse: return AsyncRegionsResourceWithRawResponse(self._client.regions) + @cached_property + def indexing_jobs(self) -> indexing_jobs.AsyncIndexingJobsResourceWithRawResponse: + from .resources.indexing_jobs import AsyncIndexingJobsResourceWithRawResponse + + return AsyncIndexingJobsResourceWithRawResponse(self._client.indexing_jobs) + @cached_property def knowledge_bases(self) -> knowledge_bases.AsyncKnowledgeBasesResourceWithRawResponse: from .resources.knowledge_bases import AsyncKnowledgeBasesResourceWithRawResponse @@ -600,6 +625,12 @@ def regions(self) -> regions.RegionsResourceWithStreamingResponse: return RegionsResourceWithStreamingResponse(self._client.regions) + @cached_property + def indexing_jobs(self) -> indexing_jobs.IndexingJobsResourceWithStreamingResponse: + from .resources.indexing_jobs import IndexingJobsResourceWithStreamingResponse + + return IndexingJobsResourceWithStreamingResponse(self._client.indexing_jobs) + @cached_property def knowledge_bases(self) -> knowledge_bases.KnowledgeBasesResourceWithStreamingResponse: from .resources.knowledge_bases import KnowledgeBasesResourceWithStreamingResponse @@ -649,6 +680,12 @@ def regions(self) -> regions.AsyncRegionsResourceWithStreamingResponse: return AsyncRegionsResourceWithStreamingResponse(self._client.regions) + @cached_property + def indexing_jobs(self) -> indexing_jobs.AsyncIndexingJobsResourceWithStreamingResponse: + from .resources.indexing_jobs import AsyncIndexingJobsResourceWithStreamingResponse + + return AsyncIndexingJobsResourceWithStreamingResponse(self._client.indexing_jobs) + @cached_property def knowledge_bases(self) -> knowledge_bases.AsyncKnowledgeBasesResourceWithStreamingResponse: from .resources.knowledge_bases import AsyncKnowledgeBasesResourceWithStreamingResponse diff --git a/src/gradientai/resources/__init__.py b/src/gradientai/resources/__init__.py index 6ad0aa32..1763a13e 100644 --- a/src/gradientai/resources/__init__.py +++ b/src/gradientai/resources/__init__.py @@ -48,6 +48,14 @@ ProvidersResourceWithStreamingResponse, AsyncProvidersResourceWithStreamingResponse, ) +from .indexing_jobs import ( + IndexingJobsResource, + AsyncIndexingJobsResource, + IndexingJobsResourceWithRawResponse, + AsyncIndexingJobsResourceWithRawResponse, + IndexingJobsResourceWithStreamingResponse, + AsyncIndexingJobsResourceWithStreamingResponse, +) from .knowledge_bases import ( KnowledgeBasesResource, AsyncKnowledgeBasesResource, @@ -76,6 +84,12 @@ "AsyncRegionsResourceWithRawResponse", "RegionsResourceWithStreamingResponse", "AsyncRegionsResourceWithStreamingResponse", + "IndexingJobsResource", + "AsyncIndexingJobsResource", + "IndexingJobsResourceWithRawResponse", + "AsyncIndexingJobsResourceWithRawResponse", + "IndexingJobsResourceWithStreamingResponse", + "AsyncIndexingJobsResourceWithStreamingResponse", "KnowledgeBasesResource", "AsyncKnowledgeBasesResource", "KnowledgeBasesResourceWithRawResponse", diff --git a/src/gradientai/resources/knowledge_bases/indexing_jobs.py b/src/gradientai/resources/indexing_jobs.py similarity index 95% rename from src/gradientai/resources/knowledge_bases/indexing_jobs.py rename to src/gradientai/resources/indexing_jobs.py index 39151e41..71c59023 100644 --- a/src/gradientai/resources/knowledge_bases/indexing_jobs.py +++ b/src/gradientai/resources/indexing_jobs.py @@ -6,27 +6,23 @@ import httpx -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._utils import maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( +from ..types import indexing_job_list_params, indexing_job_create_params, indexing_job_update_cancel_params +from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from .._utils import maybe_transform, async_maybe_transform +from .._compat import cached_property +from .._resource import SyncAPIResource, AsyncAPIResource +from .._response import ( to_raw_response_wrapper, to_streamed_response_wrapper, async_to_raw_response_wrapper, async_to_streamed_response_wrapper, ) -from ..._base_client import make_request_options -from ...types.knowledge_bases import ( - indexing_job_list_params, - indexing_job_create_params, - indexing_job_update_cancel_params, -) -from ...types.knowledge_bases.indexing_job_list_response import IndexingJobListResponse -from ...types.knowledge_bases.indexing_job_create_response import IndexingJobCreateResponse -from ...types.knowledge_bases.indexing_job_retrieve_response import IndexingJobRetrieveResponse -from ...types.knowledge_bases.indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse -from ...types.knowledge_bases.indexing_job_retrieve_data_sources_response import IndexingJobRetrieveDataSourcesResponse +from .._base_client import make_request_options +from ..types.indexing_job_list_response import IndexingJobListResponse +from ..types.indexing_job_create_response import IndexingJobCreateResponse +from ..types.indexing_job_retrieve_response import IndexingJobRetrieveResponse +from ..types.indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse +from ..types.indexing_job_retrieve_data_sources_response import IndexingJobRetrieveDataSourcesResponse __all__ = ["IndexingJobsResource", "AsyncIndexingJobsResource"] diff --git a/src/gradientai/resources/knowledge_bases/__init__.py b/src/gradientai/resources/knowledge_bases/__init__.py index 80d04328..03d143e2 100644 --- a/src/gradientai/resources/knowledge_bases/__init__.py +++ b/src/gradientai/resources/knowledge_bases/__init__.py @@ -8,14 +8,6 @@ DataSourcesResourceWithStreamingResponse, AsyncDataSourcesResourceWithStreamingResponse, ) -from .indexing_jobs import ( - IndexingJobsResource, - AsyncIndexingJobsResource, - IndexingJobsResourceWithRawResponse, - AsyncIndexingJobsResourceWithRawResponse, - IndexingJobsResourceWithStreamingResponse, - AsyncIndexingJobsResourceWithStreamingResponse, -) from .knowledge_bases import ( KnowledgeBasesResource, AsyncKnowledgeBasesResource, @@ -32,12 +24,6 @@ "AsyncDataSourcesResourceWithRawResponse", "DataSourcesResourceWithStreamingResponse", "AsyncDataSourcesResourceWithStreamingResponse", - "IndexingJobsResource", - "AsyncIndexingJobsResource", - "IndexingJobsResourceWithRawResponse", - "AsyncIndexingJobsResourceWithRawResponse", - "IndexingJobsResourceWithStreamingResponse", - "AsyncIndexingJobsResourceWithStreamingResponse", "KnowledgeBasesResource", "AsyncKnowledgeBasesResource", "KnowledgeBasesResourceWithRawResponse", diff --git a/src/gradientai/resources/knowledge_bases/knowledge_bases.py b/src/gradientai/resources/knowledge_bases/knowledge_bases.py index 28acdd7f..2cab4f7b 100644 --- a/src/gradientai/resources/knowledge_bases/knowledge_bases.py +++ b/src/gradientai/resources/knowledge_bases/knowledge_bases.py @@ -25,14 +25,6 @@ DataSourcesResourceWithStreamingResponse, AsyncDataSourcesResourceWithStreamingResponse, ) -from .indexing_jobs import ( - IndexingJobsResource, - AsyncIndexingJobsResource, - IndexingJobsResourceWithRawResponse, - AsyncIndexingJobsResourceWithRawResponse, - IndexingJobsResourceWithStreamingResponse, - AsyncIndexingJobsResourceWithStreamingResponse, -) from ..._base_client import make_request_options from ...types.knowledge_base_list_response import KnowledgeBaseListResponse from ...types.knowledge_base_create_response import KnowledgeBaseCreateResponse @@ -48,10 +40,6 @@ class KnowledgeBasesResource(SyncAPIResource): def data_sources(self) -> DataSourcesResource: return DataSourcesResource(self._client) - @cached_property - def indexing_jobs(self) -> IndexingJobsResource: - return IndexingJobsResource(self._client) - @cached_property def with_raw_response(self) -> KnowledgeBasesResourceWithRawResponse: """ @@ -328,10 +316,6 @@ class AsyncKnowledgeBasesResource(AsyncAPIResource): def data_sources(self) -> AsyncDataSourcesResource: return AsyncDataSourcesResource(self._client) - @cached_property - def indexing_jobs(self) -> AsyncIndexingJobsResource: - return AsyncIndexingJobsResource(self._client) - @cached_property def with_raw_response(self) -> AsyncKnowledgeBasesResourceWithRawResponse: """ @@ -627,10 +611,6 @@ def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None: def data_sources(self) -> DataSourcesResourceWithRawResponse: return DataSourcesResourceWithRawResponse(self._knowledge_bases.data_sources) - @cached_property - def indexing_jobs(self) -> IndexingJobsResourceWithRawResponse: - return IndexingJobsResourceWithRawResponse(self._knowledge_bases.indexing_jobs) - class AsyncKnowledgeBasesResourceWithRawResponse: def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None: @@ -656,10 +636,6 @@ def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None: def data_sources(self) -> AsyncDataSourcesResourceWithRawResponse: return AsyncDataSourcesResourceWithRawResponse(self._knowledge_bases.data_sources) - @cached_property - def indexing_jobs(self) -> AsyncIndexingJobsResourceWithRawResponse: - return AsyncIndexingJobsResourceWithRawResponse(self._knowledge_bases.indexing_jobs) - class KnowledgeBasesResourceWithStreamingResponse: def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None: @@ -685,10 +661,6 @@ def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None: def data_sources(self) -> DataSourcesResourceWithStreamingResponse: return DataSourcesResourceWithStreamingResponse(self._knowledge_bases.data_sources) - @cached_property - def indexing_jobs(self) -> IndexingJobsResourceWithStreamingResponse: - return IndexingJobsResourceWithStreamingResponse(self._knowledge_bases.indexing_jobs) - class AsyncKnowledgeBasesResourceWithStreamingResponse: def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None: @@ -713,7 +685,3 @@ def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None: @cached_property def data_sources(self) -> AsyncDataSourcesResourceWithStreamingResponse: return AsyncDataSourcesResourceWithStreamingResponse(self._knowledge_bases.data_sources) - - @cached_property - def indexing_jobs(self) -> AsyncIndexingJobsResourceWithStreamingResponse: - return AsyncIndexingJobsResourceWithStreamingResponse(self._knowledge_bases.indexing_jobs) diff --git a/src/gradientai/types/__init__.py b/src/gradientai/types/__init__.py index 89a5ec4a..d09aaa2a 100644 --- a/src/gradientai/types/__init__.py +++ b/src/gradientai/types/__init__.py @@ -7,6 +7,7 @@ from .api_agreement import APIAgreement as APIAgreement from .api_workspace import APIWorkspace as APIWorkspace from .api_agent_model import APIAgentModel as APIAgentModel +from .api_indexing_job import APIIndexingJob as APIIndexingJob from .agent_list_params import AgentListParams as AgentListParams from .api_model_version import APIModelVersion as APIModelVersion from .model_list_params import ModelListParams as ModelListParams @@ -25,18 +26,28 @@ from .api_agent_api_key_info import APIAgentAPIKeyInfo as APIAgentAPIKeyInfo from .agent_retrieve_response import AgentRetrieveResponse as AgentRetrieveResponse from .api_openai_api_key_info import APIOpenAIAPIKeyInfo as APIOpenAIAPIKeyInfo +from .indexing_job_list_params import IndexingJobListParams as IndexingJobListParams from .api_deployment_visibility import APIDeploymentVisibility as APIDeploymentVisibility from .agent_update_status_params import AgentUpdateStatusParams as AgentUpdateStatusParams from .api_anthropic_api_key_info import APIAnthropicAPIKeyInfo as APIAnthropicAPIKeyInfo +from .indexing_job_create_params import IndexingJobCreateParams as IndexingJobCreateParams +from .indexing_job_list_response import IndexingJobListResponse as IndexingJobListResponse from .knowledge_base_list_params import KnowledgeBaseListParams as KnowledgeBaseListParams from .agent_update_status_response import AgentUpdateStatusResponse as AgentUpdateStatusResponse +from .indexing_job_create_response import IndexingJobCreateResponse as IndexingJobCreateResponse from .knowledge_base_create_params import KnowledgeBaseCreateParams as KnowledgeBaseCreateParams from .knowledge_base_list_response import KnowledgeBaseListResponse as KnowledgeBaseListResponse from .knowledge_base_update_params import KnowledgeBaseUpdateParams as KnowledgeBaseUpdateParams +from .indexing_job_retrieve_response import IndexingJobRetrieveResponse as IndexingJobRetrieveResponse from .knowledge_base_create_response import KnowledgeBaseCreateResponse as KnowledgeBaseCreateResponse from .knowledge_base_delete_response import KnowledgeBaseDeleteResponse as KnowledgeBaseDeleteResponse from .knowledge_base_update_response import KnowledgeBaseUpdateResponse as KnowledgeBaseUpdateResponse from .knowledge_base_retrieve_response import KnowledgeBaseRetrieveResponse as KnowledgeBaseRetrieveResponse +from .indexing_job_update_cancel_params import IndexingJobUpdateCancelParams as IndexingJobUpdateCancelParams +from .indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse as IndexingJobUpdateCancelResponse from .region_list_evaluation_metrics_response import ( RegionListEvaluationMetricsResponse as RegionListEvaluationMetricsResponse, ) +from .indexing_job_retrieve_data_sources_response import ( + IndexingJobRetrieveDataSourcesResponse as IndexingJobRetrieveDataSourcesResponse, +) diff --git a/src/gradientai/types/knowledge_bases/api_indexing_job.py b/src/gradientai/types/api_indexing_job.py similarity index 96% rename from src/gradientai/types/knowledge_bases/api_indexing_job.py rename to src/gradientai/types/api_indexing_job.py index 2809141c..f24aac94 100644 --- a/src/gradientai/types/knowledge_bases/api_indexing_job.py +++ b/src/gradientai/types/api_indexing_job.py @@ -4,7 +4,7 @@ from datetime import datetime from typing_extensions import Literal -from ..._models import BaseModel +from .._models import BaseModel __all__ = ["APIIndexingJob"] diff --git a/src/gradientai/types/api_knowledge_base.py b/src/gradientai/types/api_knowledge_base.py index 2b0676f0..5b4b6e2c 100644 --- a/src/gradientai/types/api_knowledge_base.py +++ b/src/gradientai/types/api_knowledge_base.py @@ -4,7 +4,7 @@ from datetime import datetime from .._models import BaseModel -from .knowledge_bases.api_indexing_job import APIIndexingJob +from .api_indexing_job import APIIndexingJob __all__ = ["APIKnowledgeBase"] diff --git a/src/gradientai/types/knowledge_bases/indexing_job_create_params.py b/src/gradientai/types/indexing_job_create_params.py similarity index 100% rename from src/gradientai/types/knowledge_bases/indexing_job_create_params.py rename to src/gradientai/types/indexing_job_create_params.py diff --git a/src/gradientai/types/knowledge_bases/indexing_job_create_response.py b/src/gradientai/types/indexing_job_create_response.py similarity index 89% rename from src/gradientai/types/knowledge_bases/indexing_job_create_response.py rename to src/gradientai/types/indexing_job_create_response.py index 835ec60d..839bc83b 100644 --- a/src/gradientai/types/knowledge_bases/indexing_job_create_response.py +++ b/src/gradientai/types/indexing_job_create_response.py @@ -2,7 +2,7 @@ from typing import Optional -from ..._models import BaseModel +from .._models import BaseModel from .api_indexing_job import APIIndexingJob __all__ = ["IndexingJobCreateResponse"] diff --git a/src/gradientai/types/knowledge_bases/indexing_job_list_params.py b/src/gradientai/types/indexing_job_list_params.py similarity index 100% rename from src/gradientai/types/knowledge_bases/indexing_job_list_params.py rename to src/gradientai/types/indexing_job_list_params.py diff --git a/src/gradientai/types/knowledge_bases/indexing_job_list_response.py b/src/gradientai/types/indexing_job_list_response.py similarity index 77% rename from src/gradientai/types/knowledge_bases/indexing_job_list_response.py rename to src/gradientai/types/indexing_job_list_response.py index 4784c1a1..1379cc55 100644 --- a/src/gradientai/types/knowledge_bases/indexing_job_list_response.py +++ b/src/gradientai/types/indexing_job_list_response.py @@ -2,10 +2,10 @@ from typing import List, Optional -from ..._models import BaseModel -from ..agents.api_meta import APIMeta +from .._models import BaseModel +from .agents.api_meta import APIMeta +from .agents.api_links import APILinks from .api_indexing_job import APIIndexingJob -from ..agents.api_links import APILinks __all__ = ["IndexingJobListResponse"] diff --git a/src/gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py b/src/gradientai/types/indexing_job_retrieve_data_sources_response.py similarity index 97% rename from src/gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py rename to src/gradientai/types/indexing_job_retrieve_data_sources_response.py index a9d0c2c0..b178b984 100644 --- a/src/gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py +++ b/src/gradientai/types/indexing_job_retrieve_data_sources_response.py @@ -4,7 +4,7 @@ from datetime import datetime from typing_extensions import Literal -from ..._models import BaseModel +from .._models import BaseModel __all__ = ["IndexingJobRetrieveDataSourcesResponse", "IndexedDataSource"] diff --git a/src/gradientai/types/knowledge_bases/indexing_job_retrieve_response.py b/src/gradientai/types/indexing_job_retrieve_response.py similarity index 89% rename from src/gradientai/types/knowledge_bases/indexing_job_retrieve_response.py rename to src/gradientai/types/indexing_job_retrieve_response.py index 6034bdf1..95f33d7a 100644 --- a/src/gradientai/types/knowledge_bases/indexing_job_retrieve_response.py +++ b/src/gradientai/types/indexing_job_retrieve_response.py @@ -2,7 +2,7 @@ from typing import Optional -from ..._models import BaseModel +from .._models import BaseModel from .api_indexing_job import APIIndexingJob __all__ = ["IndexingJobRetrieveResponse"] diff --git a/src/gradientai/types/knowledge_bases/indexing_job_update_cancel_params.py b/src/gradientai/types/indexing_job_update_cancel_params.py similarity index 91% rename from src/gradientai/types/knowledge_bases/indexing_job_update_cancel_params.py rename to src/gradientai/types/indexing_job_update_cancel_params.py index 9359a42a..4c2848b0 100644 --- a/src/gradientai/types/knowledge_bases/indexing_job_update_cancel_params.py +++ b/src/gradientai/types/indexing_job_update_cancel_params.py @@ -4,7 +4,7 @@ from typing_extensions import Annotated, TypedDict -from ..._utils import PropertyInfo +from .._utils import PropertyInfo __all__ = ["IndexingJobUpdateCancelParams"] diff --git a/src/gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py b/src/gradientai/types/indexing_job_update_cancel_response.py similarity index 90% rename from src/gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py rename to src/gradientai/types/indexing_job_update_cancel_response.py index ae4b394f..d50e1865 100644 --- a/src/gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py +++ b/src/gradientai/types/indexing_job_update_cancel_response.py @@ -2,7 +2,7 @@ from typing import Optional -from ..._models import BaseModel +from .._models import BaseModel from .api_indexing_job import APIIndexingJob __all__ = ["IndexingJobUpdateCancelResponse"] diff --git a/src/gradientai/types/knowledge_bases/__init__.py b/src/gradientai/types/knowledge_bases/__init__.py index 9fc915e5..859c3618 100644 --- a/src/gradientai/types/knowledge_bases/__init__.py +++ b/src/gradientai/types/knowledge_bases/__init__.py @@ -2,27 +2,16 @@ from __future__ import annotations -from .api_indexing_job import APIIndexingJob as APIIndexingJob from .aws_data_source_param import AwsDataSourceParam as AwsDataSourceParam from .api_spaces_data_source import APISpacesDataSource as APISpacesDataSource from .data_source_list_params import DataSourceListParams as DataSourceListParams -from .indexing_job_list_params import IndexingJobListParams as IndexingJobListParams from .data_source_create_params import DataSourceCreateParams as DataSourceCreateParams from .data_source_list_response import DataSourceListResponse as DataSourceListResponse -from .indexing_job_create_params import IndexingJobCreateParams as IndexingJobCreateParams -from .indexing_job_list_response import IndexingJobListResponse as IndexingJobListResponse from .api_file_upload_data_source import APIFileUploadDataSource as APIFileUploadDataSource from .api_web_crawler_data_source import APIWebCrawlerDataSource as APIWebCrawlerDataSource from .data_source_create_response import DataSourceCreateResponse as DataSourceCreateResponse from .data_source_delete_response import DataSourceDeleteResponse as DataSourceDeleteResponse from .api_spaces_data_source_param import APISpacesDataSourceParam as APISpacesDataSourceParam -from .indexing_job_create_response import IndexingJobCreateResponse as IndexingJobCreateResponse from .api_knowledge_base_data_source import APIKnowledgeBaseDataSource as APIKnowledgeBaseDataSource -from .indexing_job_retrieve_response import IndexingJobRetrieveResponse as IndexingJobRetrieveResponse from .api_file_upload_data_source_param import APIFileUploadDataSourceParam as APIFileUploadDataSourceParam from .api_web_crawler_data_source_param import APIWebCrawlerDataSourceParam as APIWebCrawlerDataSourceParam -from .indexing_job_update_cancel_params import IndexingJobUpdateCancelParams as IndexingJobUpdateCancelParams -from .indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse as IndexingJobUpdateCancelResponse -from .indexing_job_retrieve_data_sources_response import ( - IndexingJobRetrieveDataSourcesResponse as IndexingJobRetrieveDataSourcesResponse, -) diff --git a/src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py b/src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py index ca24d6f0..57080aaa 100644 --- a/src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py +++ b/src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py @@ -4,7 +4,7 @@ from datetime import datetime from ..._models import BaseModel -from .api_indexing_job import APIIndexingJob +from ..api_indexing_job import APIIndexingJob from .api_spaces_data_source import APISpacesDataSource from .api_file_upload_data_source import APIFileUploadDataSource from .api_web_crawler_data_source import APIWebCrawlerDataSource diff --git a/tests/api_resources/knowledge_bases/test_indexing_jobs.py b/tests/api_resources/test_indexing_jobs.py similarity index 80% rename from tests/api_resources/knowledge_bases/test_indexing_jobs.py rename to tests/api_resources/test_indexing_jobs.py index 8bf1829f..6a50d9b5 100644 --- a/tests/api_resources/knowledge_bases/test_indexing_jobs.py +++ b/tests/api_resources/test_indexing_jobs.py @@ -9,7 +9,7 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.knowledge_bases import ( +from gradientai.types import ( IndexingJobListResponse, IndexingJobCreateResponse, IndexingJobRetrieveResponse, @@ -26,13 +26,13 @@ class TestIndexingJobs: @pytest.mark.skip() @parametrize def test_method_create(self, client: GradientAI) -> None: - indexing_job = client.knowledge_bases.indexing_jobs.create() + indexing_job = client.indexing_jobs.create() assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) @pytest.mark.skip() @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: - indexing_job = client.knowledge_bases.indexing_jobs.create( + indexing_job = client.indexing_jobs.create( data_source_uuids=["string"], knowledge_base_uuid="knowledge_base_uuid", ) @@ -41,7 +41,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_create(self, client: GradientAI) -> None: - response = client.knowledge_bases.indexing_jobs.with_raw_response.create() + response = client.indexing_jobs.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -51,7 +51,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_create(self, client: GradientAI) -> None: - with client.knowledge_bases.indexing_jobs.with_streaming_response.create() as response: + with client.indexing_jobs.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -63,7 +63,7 @@ def test_streaming_response_create(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_retrieve(self, client: GradientAI) -> None: - indexing_job = client.knowledge_bases.indexing_jobs.retrieve( + indexing_job = client.indexing_jobs.retrieve( "uuid", ) assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"]) @@ -71,7 +71,7 @@ def test_method_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_retrieve(self, client: GradientAI) -> None: - response = client.knowledge_bases.indexing_jobs.with_raw_response.retrieve( + response = client.indexing_jobs.with_raw_response.retrieve( "uuid", ) @@ -83,7 +83,7 @@ def test_raw_response_retrieve(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_retrieve(self, client: GradientAI) -> None: - with client.knowledge_bases.indexing_jobs.with_streaming_response.retrieve( + with client.indexing_jobs.with_streaming_response.retrieve( "uuid", ) as response: assert not response.is_closed @@ -98,20 +98,20 @@ def test_streaming_response_retrieve(self, client: GradientAI) -> None: @parametrize def test_path_params_retrieve(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - client.knowledge_bases.indexing_jobs.with_raw_response.retrieve( + client.indexing_jobs.with_raw_response.retrieve( "", ) @pytest.mark.skip() @parametrize def test_method_list(self, client: GradientAI) -> None: - indexing_job = client.knowledge_bases.indexing_jobs.list() + indexing_job = client.indexing_jobs.list() assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) @pytest.mark.skip() @parametrize def test_method_list_with_all_params(self, client: GradientAI) -> None: - indexing_job = client.knowledge_bases.indexing_jobs.list( + indexing_job = client.indexing_jobs.list( page=0, per_page=0, ) @@ -120,7 +120,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_list(self, client: GradientAI) -> None: - response = client.knowledge_bases.indexing_jobs.with_raw_response.list() + response = client.indexing_jobs.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -130,7 +130,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_list(self, client: GradientAI) -> None: - with client.knowledge_bases.indexing_jobs.with_streaming_response.list() as response: + with client.indexing_jobs.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -142,7 +142,7 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_retrieve_data_sources(self, client: GradientAI) -> None: - indexing_job = client.knowledge_bases.indexing_jobs.retrieve_data_sources( + indexing_job = client.indexing_jobs.retrieve_data_sources( "indexing_job_uuid", ) assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"]) @@ -150,7 +150,7 @@ def test_method_retrieve_data_sources(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_retrieve_data_sources(self, client: GradientAI) -> None: - response = client.knowledge_bases.indexing_jobs.with_raw_response.retrieve_data_sources( + response = client.indexing_jobs.with_raw_response.retrieve_data_sources( "indexing_job_uuid", ) @@ -162,7 +162,7 @@ def test_raw_response_retrieve_data_sources(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_retrieve_data_sources(self, client: GradientAI) -> None: - with client.knowledge_bases.indexing_jobs.with_streaming_response.retrieve_data_sources( + with client.indexing_jobs.with_streaming_response.retrieve_data_sources( "indexing_job_uuid", ) as response: assert not response.is_closed @@ -177,14 +177,14 @@ def test_streaming_response_retrieve_data_sources(self, client: GradientAI) -> N @parametrize def test_path_params_retrieve_data_sources(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `indexing_job_uuid` but received ''"): - client.knowledge_bases.indexing_jobs.with_raw_response.retrieve_data_sources( + client.indexing_jobs.with_raw_response.retrieve_data_sources( "", ) @pytest.mark.skip() @parametrize def test_method_update_cancel(self, client: GradientAI) -> None: - indexing_job = client.knowledge_bases.indexing_jobs.update_cancel( + indexing_job = client.indexing_jobs.update_cancel( path_uuid="uuid", ) assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) @@ -192,7 +192,7 @@ def test_method_update_cancel(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_method_update_cancel_with_all_params(self, client: GradientAI) -> None: - indexing_job = client.knowledge_bases.indexing_jobs.update_cancel( + indexing_job = client.indexing_jobs.update_cancel( path_uuid="uuid", body_uuid="uuid", ) @@ -201,7 +201,7 @@ def test_method_update_cancel_with_all_params(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_raw_response_update_cancel(self, client: GradientAI) -> None: - response = client.knowledge_bases.indexing_jobs.with_raw_response.update_cancel( + response = client.indexing_jobs.with_raw_response.update_cancel( path_uuid="uuid", ) @@ -213,7 +213,7 @@ def test_raw_response_update_cancel(self, client: GradientAI) -> None: @pytest.mark.skip() @parametrize def test_streaming_response_update_cancel(self, client: GradientAI) -> None: - with client.knowledge_bases.indexing_jobs.with_streaming_response.update_cancel( + with client.indexing_jobs.with_streaming_response.update_cancel( path_uuid="uuid", ) as response: assert not response.is_closed @@ -228,7 +228,7 @@ def test_streaming_response_update_cancel(self, client: GradientAI) -> None: @parametrize def test_path_params_update_cancel(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - client.knowledge_bases.indexing_jobs.with_raw_response.update_cancel( + client.indexing_jobs.with_raw_response.update_cancel( path_uuid="", ) @@ -241,13 +241,13 @@ class TestAsyncIndexingJobs: @pytest.mark.skip() @parametrize async def test_method_create(self, async_client: AsyncGradientAI) -> None: - indexing_job = await async_client.knowledge_bases.indexing_jobs.create() + indexing_job = await async_client.indexing_jobs.create() assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: - indexing_job = await async_client.knowledge_bases.indexing_jobs.create( + indexing_job = await async_client.indexing_jobs.create( data_source_uuids=["string"], knowledge_base_uuid="knowledge_base_uuid", ) @@ -256,7 +256,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI @pytest.mark.skip() @parametrize async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: - response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.create() + response = await async_client.indexing_jobs.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -266,7 +266,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: - async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.create() as response: + async with async_client.indexing_jobs.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -278,7 +278,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> @pytest.mark.skip() @parametrize async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: - indexing_job = await async_client.knowledge_bases.indexing_jobs.retrieve( + indexing_job = await async_client.indexing_jobs.retrieve( "uuid", ) assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"]) @@ -286,7 +286,7 @@ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: - response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.retrieve( + response = await async_client.indexing_jobs.with_raw_response.retrieve( "uuid", ) @@ -298,7 +298,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> Non @pytest.mark.skip() @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: - async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.retrieve( + async with async_client.indexing_jobs.with_streaming_response.retrieve( "uuid", ) as response: assert not response.is_closed @@ -313,20 +313,20 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) @parametrize async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"): - await async_client.knowledge_bases.indexing_jobs.with_raw_response.retrieve( + await async_client.indexing_jobs.with_raw_response.retrieve( "", ) @pytest.mark.skip() @parametrize async def test_method_list(self, async_client: AsyncGradientAI) -> None: - indexing_job = await async_client.knowledge_bases.indexing_jobs.list() + indexing_job = await async_client.indexing_jobs.list() assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"]) @pytest.mark.skip() @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: - indexing_job = await async_client.knowledge_bases.indexing_jobs.list( + indexing_job = await async_client.indexing_jobs.list( page=0, per_page=0, ) @@ -335,7 +335,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: - response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.list() + response = await async_client.indexing_jobs.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -345,7 +345,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @pytest.mark.skip() @parametrize async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: - async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.list() as response: + async with async_client.indexing_jobs.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -357,7 +357,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @pytest.mark.skip() @parametrize async def test_method_retrieve_data_sources(self, async_client: AsyncGradientAI) -> None: - indexing_job = await async_client.knowledge_bases.indexing_jobs.retrieve_data_sources( + indexing_job = await async_client.indexing_jobs.retrieve_data_sources( "indexing_job_uuid", ) assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"]) @@ -365,7 +365,7 @@ async def test_method_retrieve_data_sources(self, async_client: AsyncGradientAI) @pytest.mark.skip() @parametrize async def test_raw_response_retrieve_data_sources(self, async_client: AsyncGradientAI) -> None: - response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.retrieve_data_sources( + response = await async_client.indexing_jobs.with_raw_response.retrieve_data_sources( "indexing_job_uuid", ) @@ -377,7 +377,7 @@ async def test_raw_response_retrieve_data_sources(self, async_client: AsyncGradi @pytest.mark.skip() @parametrize async def test_streaming_response_retrieve_data_sources(self, async_client: AsyncGradientAI) -> None: - async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.retrieve_data_sources( + async with async_client.indexing_jobs.with_streaming_response.retrieve_data_sources( "indexing_job_uuid", ) as response: assert not response.is_closed @@ -392,14 +392,14 @@ async def test_streaming_response_retrieve_data_sources(self, async_client: Asyn @parametrize async def test_path_params_retrieve_data_sources(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `indexing_job_uuid` but received ''"): - await async_client.knowledge_bases.indexing_jobs.with_raw_response.retrieve_data_sources( + await async_client.indexing_jobs.with_raw_response.retrieve_data_sources( "", ) @pytest.mark.skip() @parametrize async def test_method_update_cancel(self, async_client: AsyncGradientAI) -> None: - indexing_job = await async_client.knowledge_bases.indexing_jobs.update_cancel( + indexing_job = await async_client.indexing_jobs.update_cancel( path_uuid="uuid", ) assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) @@ -407,7 +407,7 @@ async def test_method_update_cancel(self, async_client: AsyncGradientAI) -> None @pytest.mark.skip() @parametrize async def test_method_update_cancel_with_all_params(self, async_client: AsyncGradientAI) -> None: - indexing_job = await async_client.knowledge_bases.indexing_jobs.update_cancel( + indexing_job = await async_client.indexing_jobs.update_cancel( path_uuid="uuid", body_uuid="uuid", ) @@ -416,7 +416,7 @@ async def test_method_update_cancel_with_all_params(self, async_client: AsyncGra @pytest.mark.skip() @parametrize async def test_raw_response_update_cancel(self, async_client: AsyncGradientAI) -> None: - response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.update_cancel( + response = await async_client.indexing_jobs.with_raw_response.update_cancel( path_uuid="uuid", ) @@ -428,7 +428,7 @@ async def test_raw_response_update_cancel(self, async_client: AsyncGradientAI) - @pytest.mark.skip() @parametrize async def test_streaming_response_update_cancel(self, async_client: AsyncGradientAI) -> None: - async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.update_cancel( + async with async_client.indexing_jobs.with_streaming_response.update_cancel( path_uuid="uuid", ) as response: assert not response.is_closed @@ -443,6 +443,6 @@ async def test_streaming_response_update_cancel(self, async_client: AsyncGradien @parametrize async def test_path_params_update_cancel(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"): - await async_client.knowledge_bases.indexing_jobs.with_raw_response.update_cancel( + await async_client.indexing_jobs.with_raw_response.update_cancel( path_uuid="", ) From 837122a44627d160b975b012040e7f3be951c3ca Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 25 Jun 2025 01:50:34 +0000 Subject: [PATCH 41/41] release: 0.1.0-alpha.4 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 58 +++++++++++++++++++++++++++++++++++ pyproject.toml | 2 +- src/gradientai/_version.py | 2 +- 4 files changed, 61 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index aaf968a1..b56c3d0b 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.1.0-alpha.3" + ".": "0.1.0-alpha.4" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 2f83e62e..be25824a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,63 @@ # Changelog +## 0.1.0-alpha.4 (2025-06-25) + +Full Changelog: [v0.1.0-alpha.3...v0.1.0-alpha.4](https://github.com/digitalocean/gradientai-python/compare/v0.1.0-alpha.3...v0.1.0-alpha.4) + +### Features + +* **api:** update via SDK Studio ([d1ea884](https://github.com/digitalocean/gradientai-python/commit/d1ea884c9be72b3f8804c5ba91bf4f77a3284a6c)) +* **api:** update via SDK Studio ([584f9f1](https://github.com/digitalocean/gradientai-python/commit/584f9f1304b3612eb25f1438041d287592463438)) +* **api:** update via SDK Studio ([7aee6e5](https://github.com/digitalocean/gradientai-python/commit/7aee6e55a0574fc1b6ab73a1777c92e4f3a940ea)) +* **api:** update via SDK Studio ([4212f62](https://github.com/digitalocean/gradientai-python/commit/4212f62b19c44bcb12c02fe396e8c51dd89d3868)) +* **api:** update via SDK Studio ([b16cceb](https://github.com/digitalocean/gradientai-python/commit/b16cceb63edb4253084036b693834bde5da10943)) +* **api:** update via SDK Studio ([34382c0](https://github.com/digitalocean/gradientai-python/commit/34382c06c5d61ac97572cb4977d020e1ede9d4ff)) +* **api:** update via SDK Studio ([c33920a](https://github.com/digitalocean/gradientai-python/commit/c33920aba0dc1f3b8f4f890ce706c86fd452dd6b)) +* **api:** update via SDK Studio ([359c8d8](https://github.com/digitalocean/gradientai-python/commit/359c8d88cec1d60f0beb810b5a0139443d0a3348)) +* **api:** update via SDK Studio ([f27643e](https://github.com/digitalocean/gradientai-python/commit/f27643e1e00f606029be919a7117801facc6e5b7)) +* **api:** update via SDK Studio ([e59144c](https://github.com/digitalocean/gradientai-python/commit/e59144c2d474a4003fd28b8eded08814ffa8d2f3)) +* **api:** update via SDK Studio ([97e1768](https://github.com/digitalocean/gradientai-python/commit/97e17687a348b8ef218c23a06729b6edb1ac5ea9)) +* **api:** update via SDK Studio ([eac41f1](https://github.com/digitalocean/gradientai-python/commit/eac41f12912b8d32ffa23d225f4ca56fa5c72505)) +* **api:** update via SDK Studio ([1fa7ebb](https://github.com/digitalocean/gradientai-python/commit/1fa7ebb0080db9087b82d29e7197e44dfbb1ebed)) +* **api:** update via SDK Studio ([aa2610a](https://github.com/digitalocean/gradientai-python/commit/aa2610afe7da79429e05bff64b4796de7f525681)) +* **api:** update via SDK Studio ([e5c8d76](https://github.com/digitalocean/gradientai-python/commit/e5c8d768388b16c06fcc2abee71a53dcc8b3e8c5)) +* **api:** update via SDK Studio ([5f700dc](https://github.com/digitalocean/gradientai-python/commit/5f700dc7a4e757015d3bd6f2e82a311114b82d77)) +* **api:** update via SDK Studio ([c042496](https://github.com/digitalocean/gradientai-python/commit/c04249614917198b1eb2324438605d99b719a1cf)) +* **api:** update via SDK Studio ([5ebec81](https://github.com/digitalocean/gradientai-python/commit/5ebec81604a206eba5e75a7e8990bd7711ba8f47)) +* **api:** update via SDK Studio ([cac54a8](https://github.com/digitalocean/gradientai-python/commit/cac54a81a3f22d34b2de0ebfac3c68a982178cad)) +* **api:** update via SDK Studio ([6d62ab0](https://github.com/digitalocean/gradientai-python/commit/6d62ab00594d70df0458a0a401f866af15a9298e)) +* **api:** update via SDK Studio ([0ccc62c](https://github.com/digitalocean/gradientai-python/commit/0ccc62cb8ef387e0aaf6784db25d5f99a587e5da)) +* **api:** update via SDK Studio ([e75adfb](https://github.com/digitalocean/gradientai-python/commit/e75adfbd2d035e57ae110a1d78ea40fb116975e5)) +* **api:** update via SDK Studio ([8bd264b](https://github.com/digitalocean/gradientai-python/commit/8bd264b4b4686ca078bf4eb4b5462f058406df3e)) +* **api:** update via SDK Studio ([6254ccf](https://github.com/digitalocean/gradientai-python/commit/6254ccf45cbe50ca8191c7149824964f5d00d82f)) +* **api:** update via SDK Studio ([8f5761b](https://github.com/digitalocean/gradientai-python/commit/8f5761b1d18fb48ad7488e6f0ad771c077eb7961)) +* **api:** update via SDK Studio ([f853616](https://github.com/digitalocean/gradientai-python/commit/f8536166320d1d5bacf1d10a5edb2f71691dde8b)) +* **client:** add support for aiohttp ([494afde](https://github.com/digitalocean/gradientai-python/commit/494afde754f735d1ba95011fc83d23d2410fcfdd)) + + +### Bug Fixes + +* **client:** correctly parse binary response | stream ([abba5be](https://github.com/digitalocean/gradientai-python/commit/abba5be958d03a7e5ce7d1cbf8069c0bcf52ee20)) +* **tests:** fix: tests which call HTTP endpoints directly with the example parameters ([e649dcb](https://github.com/digitalocean/gradientai-python/commit/e649dcb0f9416e9bf568cc9f3480d7e222052391)) + + +### Chores + +* **ci:** enable for pull requests ([b6b3f9e](https://github.com/digitalocean/gradientai-python/commit/b6b3f9ea85918cfc6fc7304b2d21c340d82a0083)) +* **internal:** codegen related update ([4126872](https://github.com/digitalocean/gradientai-python/commit/41268721eafd33fcca5688ca5dff7401f25bdeb2)) +* **internal:** codegen related update ([10b79fb](https://github.com/digitalocean/gradientai-python/commit/10b79fb1d51bcff6ed0d18e5ccd18fd1cd75af9f)) +* **internal:** update conftest.py ([12e2103](https://github.com/digitalocean/gradientai-python/commit/12e210389204ff74f504e1ec3aa5ba99f1b4971c)) +* **readme:** update badges ([6e40dc3](https://github.com/digitalocean/gradientai-python/commit/6e40dc3fa4e33082be7b0bbf65d07e9ae9ac6370)) +* **tests:** add tests for httpx client instantiation & proxies ([7ecf66c](https://github.com/digitalocean/gradientai-python/commit/7ecf66c58a124c153a32055967beacbd1a3bbcf3)) +* **tests:** run tests in parallel ([861dd6b](https://github.com/digitalocean/gradientai-python/commit/861dd6b75956f2c12814ad32b05624d8d8537d52)) +* **tests:** skip some failing tests on the latest python versions ([75b4539](https://github.com/digitalocean/gradientai-python/commit/75b45398c18e75be3389be20479f54521c2e474a)) +* update SDK settings ([ed595b0](https://github.com/digitalocean/gradientai-python/commit/ed595b0a23df125ffba733d7339e771997c3f149)) + + +### Documentation + +* **client:** fix httpx.Timeout documentation reference ([5d452d7](https://github.com/digitalocean/gradientai-python/commit/5d452d7245af6c80f47f8395f1c03493dfb53a52)) + ## 0.1.0-alpha.3 (2025-06-12) Full Changelog: [v0.1.0-alpha.2...v0.1.0-alpha.3](https://github.com/digitalocean/genai-python/compare/v0.1.0-alpha.2...v0.1.0-alpha.3) diff --git a/pyproject.toml b/pyproject.toml index 1c89346a..0dd5228b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python" -version = "0.1.0-alpha.3" +version = "0.1.0-alpha.4" description = "The official Python library for the GradientAI API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/gradientai/_version.py b/src/gradientai/_version.py index 2cf47e97..4d3df522 100644 --- a/src/gradientai/_version.py +++ b/src/gradientai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "gradientai" -__version__ = "0.1.0-alpha.3" # x-release-please-version +__version__ = "0.1.0-alpha.4" # x-release-please-version