From dcbe442efc67554e60b3b28360a4d9f7dcbb313a Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 25 Jun 2025 01:57:56 +0000
Subject: [PATCH 01/21] feat(api): update via SDK Studio
---
.stats.yml | 2 +-
CONTRIBUTING.md | 2 +-
README.md | 44 ++---
api.md | 180 +++++++++---------
mypy.ini | 2 +-
pyproject.toml | 8 +-
release-please-config.json | 2 +-
scripts/lint | 2 +-
src/{gradientai => do_gradientai}/__init__.py | 4 +-
.../_base_client.py | 2 +-
src/{gradientai => do_gradientai}/_client.py | 0
src/{gradientai => do_gradientai}/_compat.py | 0
.../_constants.py | 0
.../_exceptions.py | 0
src/{gradientai => do_gradientai}/_files.py | 0
src/{gradientai => do_gradientai}/_models.py | 0
src/{gradientai => do_gradientai}/_qs.py | 0
.../_resource.py | 0
.../_response.py | 8 +-
.../_streaming.py | 0
src/{gradientai => do_gradientai}/_types.py | 2 +-
.../_utils/__init__.py | 0
.../_utils/_logs.py | 4 +-
.../_utils/_proxy.py | 0
.../_utils/_reflection.py | 0
.../_utils/_resources_proxy.py | 8 +-
.../_utils/_streams.py | 0
.../_utils/_sync.py | 0
.../_utils/_transform.py | 0
.../_utils/_typing.py | 0
.../_utils/_utils.py | 0
src/{gradientai => do_gradientai}/_version.py | 2 +-
src/do_gradientai/lib/.keep | 4 +
src/{gradientai => do_gradientai}/py.typed | 0
.../resources/__init__.py | 0
.../resources/agents/__init__.py | 0
.../resources/agents/agents.py | 0
.../resources/agents/api_keys.py | 0
.../resources/agents/child_agents.py | 0
.../resources/agents/functions.py | 0
.../resources/agents/knowledge_bases.py | 0
.../resources/agents/versions.py | 0
.../resources/chat/__init__.py | 0
.../resources/chat/chat.py | 0
.../resources/chat/completions.py | 0
.../resources/indexing_jobs.py | 0
.../resources/inference/__init__.py | 0
.../resources/inference/api_keys.py | 0
.../resources/inference/inference.py | 0
.../resources/inference/models.py | 0
.../resources/knowledge_bases/__init__.py | 0
.../resources/knowledge_bases/data_sources.py | 0
.../knowledge_bases/knowledge_bases.py | 0
.../resources/models.py | 0
.../resources/providers/__init__.py | 0
.../resources/providers/anthropic/__init__.py | 0
.../providers/anthropic/anthropic.py | 0
.../resources/providers/anthropic/keys.py | 0
.../resources/providers/openai/__init__.py | 0
.../resources/providers/openai/keys.py | 0
.../resources/providers/openai/openai.py | 0
.../resources/providers/providers.py | 0
.../resources/regions/__init__.py | 0
.../resources/regions/evaluation_datasets.py | 0
.../regions/evaluation_runs/__init__.py | 0
.../evaluation_runs/evaluation_runs.py | 0
.../regions/evaluation_runs/results.py | 0
.../regions/evaluation_test_cases.py | 0
.../resources/regions/regions.py | 0
.../types/__init__.py | 0
.../types/agent_create_params.py | 0
.../types/agent_create_response.py | 0
.../types/agent_delete_response.py | 0
.../types/agent_list_params.py | 0
.../types/agent_list_response.py | 0
.../types/agent_retrieve_response.py | 0
.../types/agent_update_params.py | 0
.../types/agent_update_response.py | 0
.../types/agent_update_status_params.py | 0
.../types/agent_update_status_response.py | 0
.../types/agents/__init__.py | 0
.../types/agents/api_key_create_params.py | 0
.../types/agents/api_key_create_response.py | 0
.../types/agents/api_key_delete_response.py | 0
.../types/agents/api_key_list_params.py | 0
.../types/agents/api_key_list_response.py | 0
.../agents/api_key_regenerate_response.py | 0
.../types/agents/api_key_update_params.py | 0
.../types/agents/api_key_update_response.py | 0
.../agents/api_link_knowledge_base_output.py | 0
.../types/agents/api_links.py | 0
.../types/agents/api_meta.py | 0
.../types/agents/child_agent_add_params.py | 0
.../types/agents/child_agent_add_response.py | 0
.../agents/child_agent_delete_response.py | 0
.../types/agents/child_agent_update_params.py | 0
.../agents/child_agent_update_response.py | 0
.../types/agents/child_agent_view_response.py | 0
.../types/agents/function_create_params.py | 0
.../types/agents/function_create_response.py | 0
.../types/agents/function_delete_response.py | 0
.../types/agents/function_update_params.py | 0
.../types/agents/function_update_response.py | 0
.../agents/knowledge_base_detach_response.py | 0
.../types/agents/version_list_params.py | 0
.../types/agents/version_list_response.py | 0
.../types/agents/version_update_params.py | 0
.../types/agents/version_update_response.py | 0
.../types/api_agent.py | 0
.../types/api_agent_api_key_info.py | 0
.../types/api_agent_model.py | 0
.../types/api_agreement.py | 0
.../types/api_anthropic_api_key_info.py | 0
.../types/api_deployment_visibility.py | 0
.../types/api_evaluation_metric.py | 0
.../types/api_indexing_job.py | 0
.../types/api_knowledge_base.py | 0
.../types/api_model.py | 0
.../types/api_model_version.py | 0
.../types/api_openai_api_key_info.py | 0
.../types/api_retrieval_method.py | 0
.../types/api_workspace.py | 0
.../types/chat/__init__.py | 0
.../chat/chat_completion_token_logprob.py | 0
.../types/chat/completion_create_params.py | 0
.../types/chat/completion_create_response.py | 0
.../types/indexing_job_create_params.py | 0
.../types/indexing_job_create_response.py | 0
.../types/indexing_job_list_params.py | 0
.../types/indexing_job_list_response.py | 0
...xing_job_retrieve_data_sources_response.py | 0
.../types/indexing_job_retrieve_response.py | 0
.../indexing_job_update_cancel_params.py | 0
.../indexing_job_update_cancel_response.py | 0
.../types/inference/__init__.py | 0
.../types/inference/api_key_create_params.py | 0
.../inference/api_key_create_response.py | 0
.../inference/api_key_delete_response.py | 0
.../types/inference/api_key_list_params.py | 0
.../types/inference/api_key_list_response.py | 0
.../types/inference/api_key_update_params.py | 0
.../api_key_update_regenerate_response.py | 0
.../inference/api_key_update_response.py | 0
.../types/inference/api_model_api_key_info.py | 0
.../types/inference/model.py | 0
.../types/inference/model_list_response.py | 0
.../types/knowledge_base_create_params.py | 0
.../types/knowledge_base_create_response.py | 0
.../types/knowledge_base_delete_response.py | 0
.../types/knowledge_base_list_params.py | 0
.../types/knowledge_base_list_response.py | 0
.../types/knowledge_base_retrieve_response.py | 0
.../types/knowledge_base_update_params.py | 0
.../types/knowledge_base_update_response.py | 0
.../types/knowledge_bases/__init__.py | 0
.../api_file_upload_data_source.py | 0
.../api_file_upload_data_source_param.py | 0
.../api_knowledge_base_data_source.py | 0
.../knowledge_bases/api_spaces_data_source.py | 0
.../api_spaces_data_source_param.py | 0
.../api_web_crawler_data_source.py | 0
.../api_web_crawler_data_source_param.py | 0
.../knowledge_bases/aws_data_source_param.py | 0
.../data_source_create_params.py | 0
.../data_source_create_response.py | 0
.../data_source_delete_response.py | 0
.../data_source_list_params.py | 0
.../data_source_list_response.py | 0
.../types/model_list_params.py | 0
.../types/model_list_response.py | 0
.../types/providers/__init__.py | 0
.../types/providers/anthropic/__init__.py | 0
.../providers/anthropic/key_create_params.py | 0
.../anthropic/key_create_response.py | 0
.../anthropic/key_delete_response.py | 0
.../anthropic/key_list_agents_params.py | 0
.../anthropic/key_list_agents_response.py | 0
.../providers/anthropic/key_list_params.py | 0
.../providers/anthropic/key_list_response.py | 0
.../anthropic/key_retrieve_response.py | 0
.../providers/anthropic/key_update_params.py | 0
.../anthropic/key_update_response.py | 0
.../types/providers/openai/__init__.py | 0
.../providers/openai/key_create_params.py | 0
.../providers/openai/key_create_response.py | 0
.../providers/openai/key_delete_response.py | 0
.../types/providers/openai/key_list_params.py | 0
.../providers/openai/key_list_response.py | 0
.../openai/key_retrieve_agents_params.py | 0
.../openai/key_retrieve_agents_response.py | 0
.../providers/openai/key_retrieve_response.py | 0
.../providers/openai/key_update_params.py | 0
.../providers/openai/key_update_response.py | 0
...region_list_evaluation_metrics_response.py | 0
.../types/region_list_params.py | 0
.../types/region_list_response.py | 0
.../types/regions/__init__.py | 0
.../types/regions/api_evaluation_test_case.py | 0
.../types/regions/api_star_metric.py | 0
.../types/regions/api_star_metric_param.py | 0
...reate_file_upload_presigned_urls_params.py | 0
...ate_file_upload_presigned_urls_response.py | 0
.../evaluation_dataset_create_params.py | 0
.../evaluation_dataset_create_response.py | 0
.../regions/evaluation_run_create_params.py | 0
.../regions/evaluation_run_create_response.py | 0
.../evaluation_run_retrieve_response.py | 0
.../types/regions/evaluation_runs/__init__.py | 0
.../api_evaluation_metric_result.py | 0
.../evaluation_runs/api_evaluation_run.py | 0
.../regions/evaluation_runs/api_prompt.py | 0
.../result_retrieve_prompt_response.py | 0
.../result_retrieve_response.py | 0
.../evaluation_test_case_create_params.py | 0
.../evaluation_test_case_create_response.py | 0
...n_test_case_list_evaluation_runs_params.py | 0
...test_case_list_evaluation_runs_response.py | 0
.../evaluation_test_case_list_response.py | 0
.../evaluation_test_case_retrieve_response.py | 0
.../evaluation_test_case_update_params.py | 0
.../evaluation_test_case_update_response.py | 0
tests/api_resources/agents/test_api_keys.py | 4 +-
.../api_resources/agents/test_child_agents.py | 4 +-
tests/api_resources/agents/test_functions.py | 4 +-
.../agents/test_knowledge_bases.py | 4 +-
tests/api_resources/agents/test_versions.py | 4 +-
tests/api_resources/chat/test_completions.py | 4 +-
.../api_resources/inference/test_api_keys.py | 4 +-
tests/api_resources/inference/test_models.py | 4 +-
.../knowledge_bases/test_data_sources.py | 4 +-
.../providers/anthropic/test_keys.py | 4 +-
.../providers/openai/test_keys.py | 4 +-
.../regions/evaluation_runs/test_results.py | 4 +-
.../regions/test_evaluation_datasets.py | 4 +-
.../regions/test_evaluation_runs.py | 4 +-
.../regions/test_evaluation_test_cases.py | 4 +-
tests/api_resources/test_agents.py | 4 +-
tests/api_resources/test_indexing_jobs.py | 4 +-
tests/api_resources/test_knowledge_bases.py | 4 +-
tests/api_resources/test_models.py | 4 +-
tests/api_resources/test_regions.py | 4 +-
tests/conftest.py | 6 +-
tests/test_client.py | 46 ++---
tests/test_deepcopy.py | 2 +-
tests/test_extract_files.py | 4 +-
tests/test_files.py | 2 +-
tests/test_models.py | 6 +-
tests/test_qs.py | 2 +-
tests/test_required_args.py | 2 +-
tests/test_response.py | 14 +-
tests/test_streaming.py | 4 +-
tests/test_transform.py | 8 +-
tests/test_utils/test_proxy.py | 2 +-
tests/test_utils/test_typing.py | 2 +-
tests/utils.py | 8 +-
255 files changed, 234 insertions(+), 230 deletions(-)
rename src/{gradientai => do_gradientai}/__init__.py (95%)
rename src/{gradientai => do_gradientai}/_base_client.py (99%)
rename src/{gradientai => do_gradientai}/_client.py (100%)
rename src/{gradientai => do_gradientai}/_compat.py (100%)
rename src/{gradientai => do_gradientai}/_constants.py (100%)
rename src/{gradientai => do_gradientai}/_exceptions.py (100%)
rename src/{gradientai => do_gradientai}/_files.py (100%)
rename src/{gradientai => do_gradientai}/_models.py (100%)
rename src/{gradientai => do_gradientai}/_qs.py (100%)
rename src/{gradientai => do_gradientai}/_resource.py (100%)
rename src/{gradientai => do_gradientai}/_response.py (99%)
rename src/{gradientai => do_gradientai}/_streaming.py (100%)
rename src/{gradientai => do_gradientai}/_types.py (99%)
rename src/{gradientai => do_gradientai}/_utils/__init__.py (100%)
rename src/{gradientai => do_gradientai}/_utils/_logs.py (75%)
rename src/{gradientai => do_gradientai}/_utils/_proxy.py (100%)
rename src/{gradientai => do_gradientai}/_utils/_reflection.py (100%)
rename src/{gradientai => do_gradientai}/_utils/_resources_proxy.py (50%)
rename src/{gradientai => do_gradientai}/_utils/_streams.py (100%)
rename src/{gradientai => do_gradientai}/_utils/_sync.py (100%)
rename src/{gradientai => do_gradientai}/_utils/_transform.py (100%)
rename src/{gradientai => do_gradientai}/_utils/_typing.py (100%)
rename src/{gradientai => do_gradientai}/_utils/_utils.py (100%)
rename src/{gradientai => do_gradientai}/_version.py (83%)
create mode 100644 src/do_gradientai/lib/.keep
rename src/{gradientai => do_gradientai}/py.typed (100%)
rename src/{gradientai => do_gradientai}/resources/__init__.py (100%)
rename src/{gradientai => do_gradientai}/resources/agents/__init__.py (100%)
rename src/{gradientai => do_gradientai}/resources/agents/agents.py (100%)
rename src/{gradientai => do_gradientai}/resources/agents/api_keys.py (100%)
rename src/{gradientai => do_gradientai}/resources/agents/child_agents.py (100%)
rename src/{gradientai => do_gradientai}/resources/agents/functions.py (100%)
rename src/{gradientai => do_gradientai}/resources/agents/knowledge_bases.py (100%)
rename src/{gradientai => do_gradientai}/resources/agents/versions.py (100%)
rename src/{gradientai => do_gradientai}/resources/chat/__init__.py (100%)
rename src/{gradientai => do_gradientai}/resources/chat/chat.py (100%)
rename src/{gradientai => do_gradientai}/resources/chat/completions.py (100%)
rename src/{gradientai => do_gradientai}/resources/indexing_jobs.py (100%)
rename src/{gradientai => do_gradientai}/resources/inference/__init__.py (100%)
rename src/{gradientai => do_gradientai}/resources/inference/api_keys.py (100%)
rename src/{gradientai => do_gradientai}/resources/inference/inference.py (100%)
rename src/{gradientai => do_gradientai}/resources/inference/models.py (100%)
rename src/{gradientai => do_gradientai}/resources/knowledge_bases/__init__.py (100%)
rename src/{gradientai => do_gradientai}/resources/knowledge_bases/data_sources.py (100%)
rename src/{gradientai => do_gradientai}/resources/knowledge_bases/knowledge_bases.py (100%)
rename src/{gradientai => do_gradientai}/resources/models.py (100%)
rename src/{gradientai => do_gradientai}/resources/providers/__init__.py (100%)
rename src/{gradientai => do_gradientai}/resources/providers/anthropic/__init__.py (100%)
rename src/{gradientai => do_gradientai}/resources/providers/anthropic/anthropic.py (100%)
rename src/{gradientai => do_gradientai}/resources/providers/anthropic/keys.py (100%)
rename src/{gradientai => do_gradientai}/resources/providers/openai/__init__.py (100%)
rename src/{gradientai => do_gradientai}/resources/providers/openai/keys.py (100%)
rename src/{gradientai => do_gradientai}/resources/providers/openai/openai.py (100%)
rename src/{gradientai => do_gradientai}/resources/providers/providers.py (100%)
rename src/{gradientai => do_gradientai}/resources/regions/__init__.py (100%)
rename src/{gradientai => do_gradientai}/resources/regions/evaluation_datasets.py (100%)
rename src/{gradientai => do_gradientai}/resources/regions/evaluation_runs/__init__.py (100%)
rename src/{gradientai => do_gradientai}/resources/regions/evaluation_runs/evaluation_runs.py (100%)
rename src/{gradientai => do_gradientai}/resources/regions/evaluation_runs/results.py (100%)
rename src/{gradientai => do_gradientai}/resources/regions/evaluation_test_cases.py (100%)
rename src/{gradientai => do_gradientai}/resources/regions/regions.py (100%)
rename src/{gradientai => do_gradientai}/types/__init__.py (100%)
rename src/{gradientai => do_gradientai}/types/agent_create_params.py (100%)
rename src/{gradientai => do_gradientai}/types/agent_create_response.py (100%)
rename src/{gradientai => do_gradientai}/types/agent_delete_response.py (100%)
rename src/{gradientai => do_gradientai}/types/agent_list_params.py (100%)
rename src/{gradientai => do_gradientai}/types/agent_list_response.py (100%)
rename src/{gradientai => do_gradientai}/types/agent_retrieve_response.py (100%)
rename src/{gradientai => do_gradientai}/types/agent_update_params.py (100%)
rename src/{gradientai => do_gradientai}/types/agent_update_response.py (100%)
rename src/{gradientai => do_gradientai}/types/agent_update_status_params.py (100%)
rename src/{gradientai => do_gradientai}/types/agent_update_status_response.py (100%)
rename src/{gradientai => do_gradientai}/types/agents/__init__.py (100%)
rename src/{gradientai => do_gradientai}/types/agents/api_key_create_params.py (100%)
rename src/{gradientai => do_gradientai}/types/agents/api_key_create_response.py (100%)
rename src/{gradientai => do_gradientai}/types/agents/api_key_delete_response.py (100%)
rename src/{gradientai => do_gradientai}/types/agents/api_key_list_params.py (100%)
rename src/{gradientai => do_gradientai}/types/agents/api_key_list_response.py (100%)
rename src/{gradientai => do_gradientai}/types/agents/api_key_regenerate_response.py (100%)
rename src/{gradientai => do_gradientai}/types/agents/api_key_update_params.py (100%)
rename src/{gradientai => do_gradientai}/types/agents/api_key_update_response.py (100%)
rename src/{gradientai => do_gradientai}/types/agents/api_link_knowledge_base_output.py (100%)
rename src/{gradientai => do_gradientai}/types/agents/api_links.py (100%)
rename src/{gradientai => do_gradientai}/types/agents/api_meta.py (100%)
rename src/{gradientai => do_gradientai}/types/agents/child_agent_add_params.py (100%)
rename src/{gradientai => do_gradientai}/types/agents/child_agent_add_response.py (100%)
rename src/{gradientai => do_gradientai}/types/agents/child_agent_delete_response.py (100%)
rename src/{gradientai => do_gradientai}/types/agents/child_agent_update_params.py (100%)
rename src/{gradientai => do_gradientai}/types/agents/child_agent_update_response.py (100%)
rename src/{gradientai => do_gradientai}/types/agents/child_agent_view_response.py (100%)
rename src/{gradientai => do_gradientai}/types/agents/function_create_params.py (100%)
rename src/{gradientai => do_gradientai}/types/agents/function_create_response.py (100%)
rename src/{gradientai => do_gradientai}/types/agents/function_delete_response.py (100%)
rename src/{gradientai => do_gradientai}/types/agents/function_update_params.py (100%)
rename src/{gradientai => do_gradientai}/types/agents/function_update_response.py (100%)
rename src/{gradientai => do_gradientai}/types/agents/knowledge_base_detach_response.py (100%)
rename src/{gradientai => do_gradientai}/types/agents/version_list_params.py (100%)
rename src/{gradientai => do_gradientai}/types/agents/version_list_response.py (100%)
rename src/{gradientai => do_gradientai}/types/agents/version_update_params.py (100%)
rename src/{gradientai => do_gradientai}/types/agents/version_update_response.py (100%)
rename src/{gradientai => do_gradientai}/types/api_agent.py (100%)
rename src/{gradientai => do_gradientai}/types/api_agent_api_key_info.py (100%)
rename src/{gradientai => do_gradientai}/types/api_agent_model.py (100%)
rename src/{gradientai => do_gradientai}/types/api_agreement.py (100%)
rename src/{gradientai => do_gradientai}/types/api_anthropic_api_key_info.py (100%)
rename src/{gradientai => do_gradientai}/types/api_deployment_visibility.py (100%)
rename src/{gradientai => do_gradientai}/types/api_evaluation_metric.py (100%)
rename src/{gradientai => do_gradientai}/types/api_indexing_job.py (100%)
rename src/{gradientai => do_gradientai}/types/api_knowledge_base.py (100%)
rename src/{gradientai => do_gradientai}/types/api_model.py (100%)
rename src/{gradientai => do_gradientai}/types/api_model_version.py (100%)
rename src/{gradientai => do_gradientai}/types/api_openai_api_key_info.py (100%)
rename src/{gradientai => do_gradientai}/types/api_retrieval_method.py (100%)
rename src/{gradientai => do_gradientai}/types/api_workspace.py (100%)
rename src/{gradientai => do_gradientai}/types/chat/__init__.py (100%)
rename src/{gradientai => do_gradientai}/types/chat/chat_completion_token_logprob.py (100%)
rename src/{gradientai => do_gradientai}/types/chat/completion_create_params.py (100%)
rename src/{gradientai => do_gradientai}/types/chat/completion_create_response.py (100%)
rename src/{gradientai => do_gradientai}/types/indexing_job_create_params.py (100%)
rename src/{gradientai => do_gradientai}/types/indexing_job_create_response.py (100%)
rename src/{gradientai => do_gradientai}/types/indexing_job_list_params.py (100%)
rename src/{gradientai => do_gradientai}/types/indexing_job_list_response.py (100%)
rename src/{gradientai => do_gradientai}/types/indexing_job_retrieve_data_sources_response.py (100%)
rename src/{gradientai => do_gradientai}/types/indexing_job_retrieve_response.py (100%)
rename src/{gradientai => do_gradientai}/types/indexing_job_update_cancel_params.py (100%)
rename src/{gradientai => do_gradientai}/types/indexing_job_update_cancel_response.py (100%)
rename src/{gradientai => do_gradientai}/types/inference/__init__.py (100%)
rename src/{gradientai => do_gradientai}/types/inference/api_key_create_params.py (100%)
rename src/{gradientai => do_gradientai}/types/inference/api_key_create_response.py (100%)
rename src/{gradientai => do_gradientai}/types/inference/api_key_delete_response.py (100%)
rename src/{gradientai => do_gradientai}/types/inference/api_key_list_params.py (100%)
rename src/{gradientai => do_gradientai}/types/inference/api_key_list_response.py (100%)
rename src/{gradientai => do_gradientai}/types/inference/api_key_update_params.py (100%)
rename src/{gradientai => do_gradientai}/types/inference/api_key_update_regenerate_response.py (100%)
rename src/{gradientai => do_gradientai}/types/inference/api_key_update_response.py (100%)
rename src/{gradientai => do_gradientai}/types/inference/api_model_api_key_info.py (100%)
rename src/{gradientai => do_gradientai}/types/inference/model.py (100%)
rename src/{gradientai => do_gradientai}/types/inference/model_list_response.py (100%)
rename src/{gradientai => do_gradientai}/types/knowledge_base_create_params.py (100%)
rename src/{gradientai => do_gradientai}/types/knowledge_base_create_response.py (100%)
rename src/{gradientai => do_gradientai}/types/knowledge_base_delete_response.py (100%)
rename src/{gradientai => do_gradientai}/types/knowledge_base_list_params.py (100%)
rename src/{gradientai => do_gradientai}/types/knowledge_base_list_response.py (100%)
rename src/{gradientai => do_gradientai}/types/knowledge_base_retrieve_response.py (100%)
rename src/{gradientai => do_gradientai}/types/knowledge_base_update_params.py (100%)
rename src/{gradientai => do_gradientai}/types/knowledge_base_update_response.py (100%)
rename src/{gradientai => do_gradientai}/types/knowledge_bases/__init__.py (100%)
rename src/{gradientai => do_gradientai}/types/knowledge_bases/api_file_upload_data_source.py (100%)
rename src/{gradientai => do_gradientai}/types/knowledge_bases/api_file_upload_data_source_param.py (100%)
rename src/{gradientai => do_gradientai}/types/knowledge_bases/api_knowledge_base_data_source.py (100%)
rename src/{gradientai => do_gradientai}/types/knowledge_bases/api_spaces_data_source.py (100%)
rename src/{gradientai => do_gradientai}/types/knowledge_bases/api_spaces_data_source_param.py (100%)
rename src/{gradientai => do_gradientai}/types/knowledge_bases/api_web_crawler_data_source.py (100%)
rename src/{gradientai => do_gradientai}/types/knowledge_bases/api_web_crawler_data_source_param.py (100%)
rename src/{gradientai => do_gradientai}/types/knowledge_bases/aws_data_source_param.py (100%)
rename src/{gradientai => do_gradientai}/types/knowledge_bases/data_source_create_params.py (100%)
rename src/{gradientai => do_gradientai}/types/knowledge_bases/data_source_create_response.py (100%)
rename src/{gradientai => do_gradientai}/types/knowledge_bases/data_source_delete_response.py (100%)
rename src/{gradientai => do_gradientai}/types/knowledge_bases/data_source_list_params.py (100%)
rename src/{gradientai => do_gradientai}/types/knowledge_bases/data_source_list_response.py (100%)
rename src/{gradientai => do_gradientai}/types/model_list_params.py (100%)
rename src/{gradientai => do_gradientai}/types/model_list_response.py (100%)
rename src/{gradientai => do_gradientai}/types/providers/__init__.py (100%)
rename src/{gradientai => do_gradientai}/types/providers/anthropic/__init__.py (100%)
rename src/{gradientai => do_gradientai}/types/providers/anthropic/key_create_params.py (100%)
rename src/{gradientai => do_gradientai}/types/providers/anthropic/key_create_response.py (100%)
rename src/{gradientai => do_gradientai}/types/providers/anthropic/key_delete_response.py (100%)
rename src/{gradientai => do_gradientai}/types/providers/anthropic/key_list_agents_params.py (100%)
rename src/{gradientai => do_gradientai}/types/providers/anthropic/key_list_agents_response.py (100%)
rename src/{gradientai => do_gradientai}/types/providers/anthropic/key_list_params.py (100%)
rename src/{gradientai => do_gradientai}/types/providers/anthropic/key_list_response.py (100%)
rename src/{gradientai => do_gradientai}/types/providers/anthropic/key_retrieve_response.py (100%)
rename src/{gradientai => do_gradientai}/types/providers/anthropic/key_update_params.py (100%)
rename src/{gradientai => do_gradientai}/types/providers/anthropic/key_update_response.py (100%)
rename src/{gradientai => do_gradientai}/types/providers/openai/__init__.py (100%)
rename src/{gradientai => do_gradientai}/types/providers/openai/key_create_params.py (100%)
rename src/{gradientai => do_gradientai}/types/providers/openai/key_create_response.py (100%)
rename src/{gradientai => do_gradientai}/types/providers/openai/key_delete_response.py (100%)
rename src/{gradientai => do_gradientai}/types/providers/openai/key_list_params.py (100%)
rename src/{gradientai => do_gradientai}/types/providers/openai/key_list_response.py (100%)
rename src/{gradientai => do_gradientai}/types/providers/openai/key_retrieve_agents_params.py (100%)
rename src/{gradientai => do_gradientai}/types/providers/openai/key_retrieve_agents_response.py (100%)
rename src/{gradientai => do_gradientai}/types/providers/openai/key_retrieve_response.py (100%)
rename src/{gradientai => do_gradientai}/types/providers/openai/key_update_params.py (100%)
rename src/{gradientai => do_gradientai}/types/providers/openai/key_update_response.py (100%)
rename src/{gradientai => do_gradientai}/types/region_list_evaluation_metrics_response.py (100%)
rename src/{gradientai => do_gradientai}/types/region_list_params.py (100%)
rename src/{gradientai => do_gradientai}/types/region_list_response.py (100%)
rename src/{gradientai => do_gradientai}/types/regions/__init__.py (100%)
rename src/{gradientai => do_gradientai}/types/regions/api_evaluation_test_case.py (100%)
rename src/{gradientai => do_gradientai}/types/regions/api_star_metric.py (100%)
rename src/{gradientai => do_gradientai}/types/regions/api_star_metric_param.py (100%)
rename src/{gradientai => do_gradientai}/types/regions/evaluation_dataset_create_file_upload_presigned_urls_params.py (100%)
rename src/{gradientai => do_gradientai}/types/regions/evaluation_dataset_create_file_upload_presigned_urls_response.py (100%)
rename src/{gradientai => do_gradientai}/types/regions/evaluation_dataset_create_params.py (100%)
rename src/{gradientai => do_gradientai}/types/regions/evaluation_dataset_create_response.py (100%)
rename src/{gradientai => do_gradientai}/types/regions/evaluation_run_create_params.py (100%)
rename src/{gradientai => do_gradientai}/types/regions/evaluation_run_create_response.py (100%)
rename src/{gradientai => do_gradientai}/types/regions/evaluation_run_retrieve_response.py (100%)
rename src/{gradientai => do_gradientai}/types/regions/evaluation_runs/__init__.py (100%)
rename src/{gradientai => do_gradientai}/types/regions/evaluation_runs/api_evaluation_metric_result.py (100%)
rename src/{gradientai => do_gradientai}/types/regions/evaluation_runs/api_evaluation_run.py (100%)
rename src/{gradientai => do_gradientai}/types/regions/evaluation_runs/api_prompt.py (100%)
rename src/{gradientai => do_gradientai}/types/regions/evaluation_runs/result_retrieve_prompt_response.py (100%)
rename src/{gradientai => do_gradientai}/types/regions/evaluation_runs/result_retrieve_response.py (100%)
rename src/{gradientai => do_gradientai}/types/regions/evaluation_test_case_create_params.py (100%)
rename src/{gradientai => do_gradientai}/types/regions/evaluation_test_case_create_response.py (100%)
rename src/{gradientai => do_gradientai}/types/regions/evaluation_test_case_list_evaluation_runs_params.py (100%)
rename src/{gradientai => do_gradientai}/types/regions/evaluation_test_case_list_evaluation_runs_response.py (100%)
rename src/{gradientai => do_gradientai}/types/regions/evaluation_test_case_list_response.py (100%)
rename src/{gradientai => do_gradientai}/types/regions/evaluation_test_case_retrieve_response.py (100%)
rename src/{gradientai => do_gradientai}/types/regions/evaluation_test_case_update_params.py (100%)
rename src/{gradientai => do_gradientai}/types/regions/evaluation_test_case_update_response.py (100%)
diff --git a/.stats.yml b/.stats.yml
index f0863f5f..ed791f90 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 70
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml
openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667
-config_hash: 211ece2994c6ac52f84f78ee56c1097a
+config_hash: 0c94579072c21854f9e042dfaac74e1d
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 086907ef..4f59c83a 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -36,7 +36,7 @@ $ pip install -r requirements-dev.lock
Most of the SDK is generated code. Modifications to code will be persisted between generations, but may
result in merge conflicts between manual patches and changes from the generator. The generator will never
-modify the contents of the `src/gradientai/lib/` and `examples/` directories.
+modify the contents of the `src/do_gradientai/lib/` and `examples/` directories.
## Adding and running examples
diff --git a/README.md b/README.md
index 6f214027..15ee41ab 100644
--- a/README.md
+++ b/README.md
@@ -25,7 +25,7 @@ The full API of this library can be found in [api.md](api.md).
```python
import os
-from gradientai import GradientAI
+from do_gradientai import GradientAI
client = GradientAI(
api_key=os.environ.get("GRADIENTAI_API_KEY"), # This is the default and can be omitted
@@ -49,7 +49,7 @@ Simply import `AsyncGradientAI` instead of `GradientAI` and use `await` with eac
```python
import os
import asyncio
-from gradientai import AsyncGradientAI
+from do_gradientai import AsyncGradientAI
client = AsyncGradientAI(
api_key=os.environ.get("GRADIENTAI_API_KEY"), # This is the default and can be omitted
@@ -84,8 +84,8 @@ Then you can enable it by instantiating the client with `http_client=DefaultAioH
```python
import os
import asyncio
-from gradientai import DefaultAioHttpClient
-from gradientai import AsyncGradientAI
+from do_gradientai import DefaultAioHttpClient
+from do_gradientai import AsyncGradientAI
async def main() -> None:
@@ -116,7 +116,7 @@ Typed requests and responses provide autocomplete and documentation within your
Nested parameters are dictionaries, typed using `TypedDict`, for example:
```python
-from gradientai import GradientAI
+from do_gradientai import GradientAI
client = GradientAI()
@@ -128,16 +128,16 @@ print(evaluation_test_case.star_metric)
## Handling errors
-When the library is unable to connect to the API (for example, due to network connection problems or a timeout), a subclass of `gradientai.APIConnectionError` is raised.
+When the library is unable to connect to the API (for example, due to network connection problems or a timeout), a subclass of `do_gradientai.APIConnectionError` is raised.
When the API returns a non-success status code (that is, 4xx or 5xx
-response), a subclass of `gradientai.APIStatusError` is raised, containing `status_code` and `response` properties.
+response), a subclass of `do_gradientai.APIStatusError` is raised, containing `status_code` and `response` properties.
-All errors inherit from `gradientai.APIError`.
+All errors inherit from `do_gradientai.APIError`.
```python
-import gradientai
-from gradientai import GradientAI
+import do_gradientai
+from do_gradientai import GradientAI
client = GradientAI()
@@ -145,12 +145,12 @@ try:
client.agents.versions.list(
uuid="REPLACE_ME",
)
-except gradientai.APIConnectionError as e:
+except do_gradientai.APIConnectionError as e:
print("The server could not be reached")
print(e.__cause__) # an underlying Exception, likely raised within httpx.
-except gradientai.RateLimitError as e:
+except do_gradientai.RateLimitError as e:
print("A 429 status code was received; we should back off a bit.")
-except gradientai.APIStatusError as e:
+except do_gradientai.APIStatusError as e:
print("Another non-200-range status code was received")
print(e.status_code)
print(e.response)
@@ -178,7 +178,7 @@ Connection errors (for example, due to a network connectivity problem), 408 Requ
You can use the `max_retries` option to configure or disable retry settings:
```python
-from gradientai import GradientAI
+from do_gradientai import GradientAI
# Configure the default for all requests:
client = GradientAI(
@@ -198,7 +198,7 @@ By default requests time out after 1 minute. You can configure this with a `time
which accepts a float or an [`httpx.Timeout`](https://www.python-httpx.org/advanced/timeouts/#fine-tuning-the-configuration) object:
```python
-from gradientai import GradientAI
+from do_gradientai import GradientAI
# Configure the default for all requests:
client = GradientAI(
@@ -252,7 +252,7 @@ if response.my_field is None:
The "raw" Response object can be accessed by prefixing `.with_raw_response.` to any HTTP method call, e.g.,
```py
-from gradientai import GradientAI
+from do_gradientai import GradientAI
client = GradientAI()
response = client.agents.versions.with_raw_response.list(
@@ -264,9 +264,9 @@ version = response.parse() # get the object that `agents.versions.list()` would
print(version.agent_versions)
```
-These methods return an [`APIResponse`](https://github.com/digitalocean/gradientai-python/tree/main/src/gradientai/_response.py) object.
+These methods return an [`APIResponse`](https://github.com/digitalocean/gradientai-python/tree/main/src/do_gradientai/_response.py) object.
-The async client returns an [`AsyncAPIResponse`](https://github.com/digitalocean/gradientai-python/tree/main/src/gradientai/_response.py) with the same structure, the only difference being `await`able methods for reading the response content.
+The async client returns an [`AsyncAPIResponse`](https://github.com/digitalocean/gradientai-python/tree/main/src/do_gradientai/_response.py) with the same structure, the only difference being `await`able methods for reading the response content.
#### `.with_streaming_response`
@@ -330,7 +330,7 @@ You can directly override the [httpx client](https://www.python-httpx.org/api/#c
```python
import httpx
-from gradientai import GradientAI, DefaultHttpxClient
+from do_gradientai import GradientAI, DefaultHttpxClient
client = GradientAI(
# Or use the `GRADIENT_AI_BASE_URL` env var
@@ -353,7 +353,7 @@ client.with_options(http_client=DefaultHttpxClient(...))
By default the library closes underlying HTTP connections whenever the client is [garbage collected](https://docs.python.org/3/reference/datamodel.html#object.__del__). You can manually close the client using the `.close()` method if desired, or with a context manager that closes when exiting.
```py
-from gradientai import GradientAI
+from do_gradientai import GradientAI
with GradientAI() as client:
# make requests here
@@ -381,8 +381,8 @@ If you've upgraded to the latest version but aren't seeing any new features you
You can determine the version that is being used at runtime with:
```py
-import gradientai
-print(gradientai.__version__)
+import do_gradientai
+print(do_gradientai.__version__)
```
## Requirements
diff --git a/api.md b/api.md
index 970f6951..a10c03ef 100644
--- a/api.md
+++ b/api.md
@@ -3,7 +3,7 @@
Types:
```python
-from gradientai.types import (
+from do_gradientai.types import (
APIAgent,
APIAgentAPIKeyInfo,
APIAgentModel,
@@ -23,19 +23,19 @@ from gradientai.types import (
Methods:
-- client.agents.create(\*\*params) -> AgentCreateResponse
-- client.agents.retrieve(uuid) -> AgentRetrieveResponse
-- client.agents.update(path_uuid, \*\*params) -> AgentUpdateResponse
-- client.agents.list(\*\*params) -> AgentListResponse
-- client.agents.delete(uuid) -> AgentDeleteResponse
-- client.agents.update_status(path_uuid, \*\*params) -> AgentUpdateStatusResponse
+- client.agents.create(\*\*params) -> AgentCreateResponse
+- client.agents.retrieve(uuid) -> AgentRetrieveResponse
+- client.agents.update(path_uuid, \*\*params) -> AgentUpdateResponse
+- client.agents.list(\*\*params) -> AgentListResponse
+- client.agents.delete(uuid) -> AgentDeleteResponse
+- client.agents.update_status(path_uuid, \*\*params) -> AgentUpdateStatusResponse
## APIKeys
Types:
```python
-from gradientai.types.agents import (
+from do_gradientai.types.agents import (
APIKeyCreateResponse,
APIKeyUpdateResponse,
APIKeyListResponse,
@@ -46,18 +46,18 @@ from gradientai.types.agents import (
Methods:
-- client.agents.api_keys.create(path_agent_uuid, \*\*params) -> APIKeyCreateResponse
-- client.agents.api_keys.update(path_api_key_uuid, \*, path_agent_uuid, \*\*params) -> APIKeyUpdateResponse
-- client.agents.api_keys.list(agent_uuid, \*\*params) -> APIKeyListResponse
-- client.agents.api_keys.delete(api_key_uuid, \*, agent_uuid) -> APIKeyDeleteResponse
-- client.agents.api_keys.regenerate(api_key_uuid, \*, agent_uuid) -> APIKeyRegenerateResponse
+- client.agents.api_keys.create(path_agent_uuid, \*\*params) -> APIKeyCreateResponse
+- client.agents.api_keys.update(path_api_key_uuid, \*, path_agent_uuid, \*\*params) -> APIKeyUpdateResponse
+- client.agents.api_keys.list(agent_uuid, \*\*params) -> APIKeyListResponse
+- client.agents.api_keys.delete(api_key_uuid, \*, agent_uuid) -> APIKeyDeleteResponse
+- client.agents.api_keys.regenerate(api_key_uuid, \*, agent_uuid) -> APIKeyRegenerateResponse
## Functions
Types:
```python
-from gradientai.types.agents import (
+from do_gradientai.types.agents import (
FunctionCreateResponse,
FunctionUpdateResponse,
FunctionDeleteResponse,
@@ -66,43 +66,43 @@ from gradientai.types.agents import (
Methods:
-- client.agents.functions.create(path_agent_uuid, \*\*params) -> FunctionCreateResponse
-- client.agents.functions.update(path_function_uuid, \*, path_agent_uuid, \*\*params) -> FunctionUpdateResponse
-- client.agents.functions.delete(function_uuid, \*, agent_uuid) -> FunctionDeleteResponse
+- client.agents.functions.create(path_agent_uuid, \*\*params) -> FunctionCreateResponse
+- client.agents.functions.update(path_function_uuid, \*, path_agent_uuid, \*\*params) -> FunctionUpdateResponse
+- client.agents.functions.delete(function_uuid, \*, agent_uuid) -> FunctionDeleteResponse
## Versions
Types:
```python
-from gradientai.types.agents import APILinks, APIMeta, VersionUpdateResponse, VersionListResponse
+from do_gradientai.types.agents import APILinks, APIMeta, VersionUpdateResponse, VersionListResponse
```
Methods:
-- client.agents.versions.update(path_uuid, \*\*params) -> VersionUpdateResponse
-- client.agents.versions.list(uuid, \*\*params) -> VersionListResponse
+- client.agents.versions.update(path_uuid, \*\*params) -> VersionUpdateResponse
+- client.agents.versions.list(uuid, \*\*params) -> VersionListResponse
## KnowledgeBases
Types:
```python
-from gradientai.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse
+from do_gradientai.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse
```
Methods:
-- client.agents.knowledge_bases.attach(agent_uuid) -> APILinkKnowledgeBaseOutput
-- client.agents.knowledge_bases.attach_single(knowledge_base_uuid, \*, agent_uuid) -> APILinkKnowledgeBaseOutput
-- client.agents.knowledge_bases.detach(knowledge_base_uuid, \*, agent_uuid) -> KnowledgeBaseDetachResponse
+- client.agents.knowledge_bases.attach(agent_uuid) -> APILinkKnowledgeBaseOutput
+- client.agents.knowledge_bases.attach_single(knowledge_base_uuid, \*, agent_uuid) -> APILinkKnowledgeBaseOutput
+- client.agents.knowledge_bases.detach(knowledge_base_uuid, \*, agent_uuid) -> KnowledgeBaseDetachResponse
## ChildAgents
Types:
```python
-from gradientai.types.agents import (
+from do_gradientai.types.agents import (
ChildAgentUpdateResponse,
ChildAgentDeleteResponse,
ChildAgentAddResponse,
@@ -112,10 +112,10 @@ from gradientai.types.agents import (
Methods:
-- client.agents.child_agents.update(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> ChildAgentUpdateResponse
-- client.agents.child_agents.delete(child_agent_uuid, \*, parent_agent_uuid) -> ChildAgentDeleteResponse
-- client.agents.child_agents.add(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> ChildAgentAddResponse
-- client.agents.child_agents.view(uuid) -> ChildAgentViewResponse
+- client.agents.child_agents.update(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> ChildAgentUpdateResponse
+- client.agents.child_agents.delete(child_agent_uuid, \*, parent_agent_uuid) -> ChildAgentDeleteResponse
+- client.agents.child_agents.add(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> ChildAgentAddResponse
+- client.agents.child_agents.view(uuid) -> ChildAgentViewResponse
# Providers
@@ -126,7 +126,7 @@ Methods:
Types:
```python
-from gradientai.types.providers.anthropic import (
+from do_gradientai.types.providers.anthropic import (
KeyCreateResponse,
KeyRetrieveResponse,
KeyUpdateResponse,
@@ -138,12 +138,12 @@ from gradientai.types.providers.anthropic import (
Methods:
-- client.providers.anthropic.keys.create(\*\*params) -> KeyCreateResponse
-- client.providers.anthropic.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse
-- client.providers.anthropic.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse
-- client.providers.anthropic.keys.list(\*\*params) -> KeyListResponse
-- client.providers.anthropic.keys.delete(api_key_uuid) -> KeyDeleteResponse
-- client.providers.anthropic.keys.list_agents(uuid, \*\*params) -> KeyListAgentsResponse
+- client.providers.anthropic.keys.create(\*\*params) -> KeyCreateResponse
+- client.providers.anthropic.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse
+- client.providers.anthropic.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse
+- client.providers.anthropic.keys.list(\*\*params) -> KeyListResponse
+- client.providers.anthropic.keys.delete(api_key_uuid) -> KeyDeleteResponse
+- client.providers.anthropic.keys.list_agents(uuid, \*\*params) -> KeyListAgentsResponse
## OpenAI
@@ -152,7 +152,7 @@ Methods:
Types:
```python
-from gradientai.types.providers.openai import (
+from do_gradientai.types.providers.openai import (
KeyCreateResponse,
KeyRetrieveResponse,
KeyUpdateResponse,
@@ -164,19 +164,19 @@ from gradientai.types.providers.openai import (
Methods:
-- client.providers.openai.keys.create(\*\*params) -> KeyCreateResponse
-- client.providers.openai.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse
-- client.providers.openai.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse
-- client.providers.openai.keys.list(\*\*params) -> KeyListResponse
-- client.providers.openai.keys.delete(api_key_uuid) -> KeyDeleteResponse
-- client.providers.openai.keys.retrieve_agents(uuid, \*\*params) -> KeyRetrieveAgentsResponse
+- client.providers.openai.keys.create(\*\*params) -> KeyCreateResponse
+- client.providers.openai.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse
+- client.providers.openai.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse
+- client.providers.openai.keys.list(\*\*params) -> KeyListResponse
+- client.providers.openai.keys.delete(api_key_uuid) -> KeyDeleteResponse
+- client.providers.openai.keys.retrieve_agents(uuid, \*\*params) -> KeyRetrieveAgentsResponse
# Regions
Types:
```python
-from gradientai.types import (
+from do_gradientai.types import (
APIEvaluationMetric,
RegionListResponse,
RegionListEvaluationMetricsResponse,
@@ -185,28 +185,28 @@ from gradientai.types import (
Methods:
-- client.regions.list(\*\*params) -> RegionListResponse
-- client.regions.list_evaluation_metrics() -> RegionListEvaluationMetricsResponse
+- client.regions.list(\*\*params) -> RegionListResponse
+- client.regions.list_evaluation_metrics() -> RegionListEvaluationMetricsResponse
## EvaluationRuns
Types:
```python
-from gradientai.types.regions import EvaluationRunCreateResponse, EvaluationRunRetrieveResponse
+from do_gradientai.types.regions import EvaluationRunCreateResponse, EvaluationRunRetrieveResponse
```
Methods:
-- client.regions.evaluation_runs.create(\*\*params) -> EvaluationRunCreateResponse
-- client.regions.evaluation_runs.retrieve(evaluation_run_uuid) -> EvaluationRunRetrieveResponse
+- client.regions.evaluation_runs.create(\*\*params) -> EvaluationRunCreateResponse
+- client.regions.evaluation_runs.retrieve(evaluation_run_uuid) -> EvaluationRunRetrieveResponse
### Results
Types:
```python
-from gradientai.types.regions.evaluation_runs import (
+from do_gradientai.types.regions.evaluation_runs import (
APIEvaluationMetricResult,
APIEvaluationRun,
APIPrompt,
@@ -217,15 +217,15 @@ from gradientai.types.regions.evaluation_runs import (
Methods:
-- client.regions.evaluation_runs.results.retrieve(evaluation_run_uuid) -> ResultRetrieveResponse
-- client.regions.evaluation_runs.results.retrieve_prompt(prompt_id, \*, evaluation_run_uuid) -> ResultRetrievePromptResponse
+- client.regions.evaluation_runs.results.retrieve(evaluation_run_uuid) -> ResultRetrieveResponse
+- client.regions.evaluation_runs.results.retrieve_prompt(prompt_id, \*, evaluation_run_uuid) -> ResultRetrievePromptResponse
## EvaluationTestCases
Types:
```python
-from gradientai.types.regions import (
+from do_gradientai.types.regions import (
APIEvaluationTestCase,
APIStarMetric,
EvaluationTestCaseCreateResponse,
@@ -238,18 +238,18 @@ from gradientai.types.regions import (
Methods:
-- client.regions.evaluation_test_cases.create(\*\*params) -> EvaluationTestCaseCreateResponse
-- client.regions.evaluation_test_cases.retrieve(test_case_uuid) -> EvaluationTestCaseRetrieveResponse
-- client.regions.evaluation_test_cases.update(path_test_case_uuid, \*\*params) -> EvaluationTestCaseUpdateResponse
-- client.regions.evaluation_test_cases.list() -> EvaluationTestCaseListResponse
-- client.regions.evaluation_test_cases.list_evaluation_runs(evaluation_test_case_uuid, \*\*params) -> EvaluationTestCaseListEvaluationRunsResponse
+- client.regions.evaluation_test_cases.create(\*\*params) -> EvaluationTestCaseCreateResponse
+- client.regions.evaluation_test_cases.retrieve(test_case_uuid) -> EvaluationTestCaseRetrieveResponse
+- client.regions.evaluation_test_cases.update(path_test_case_uuid, \*\*params) -> EvaluationTestCaseUpdateResponse
+- client.regions.evaluation_test_cases.list() -> EvaluationTestCaseListResponse
+- client.regions.evaluation_test_cases.list_evaluation_runs(evaluation_test_case_uuid, \*\*params) -> EvaluationTestCaseListEvaluationRunsResponse
## EvaluationDatasets
Types:
```python
-from gradientai.types.regions import (
+from do_gradientai.types.regions import (
EvaluationDatasetCreateResponse,
EvaluationDatasetCreateFileUploadPresignedURLsResponse,
)
@@ -257,15 +257,15 @@ from gradientai.types.regions import (
Methods:
-- client.regions.evaluation_datasets.create(\*\*params) -> EvaluationDatasetCreateResponse
-- client.regions.evaluation_datasets.create_file_upload_presigned_urls(\*\*params) -> EvaluationDatasetCreateFileUploadPresignedURLsResponse
+- client.regions.evaluation_datasets.create(\*\*params) -> EvaluationDatasetCreateResponse
+- client.regions.evaluation_datasets.create_file_upload_presigned_urls(\*\*params) -> EvaluationDatasetCreateFileUploadPresignedURLsResponse
# IndexingJobs
Types:
```python
-from gradientai.types import (
+from do_gradientai.types import (
APIIndexingJob,
IndexingJobCreateResponse,
IndexingJobRetrieveResponse,
@@ -277,18 +277,18 @@ from gradientai.types import (
Methods:
-- client.indexing_jobs.create(\*\*params) -> IndexingJobCreateResponse
-- client.indexing_jobs.retrieve(uuid) -> IndexingJobRetrieveResponse
-- client.indexing_jobs.list(\*\*params) -> IndexingJobListResponse
-- client.indexing_jobs.retrieve_data_sources(indexing_job_uuid) -> IndexingJobRetrieveDataSourcesResponse
-- client.indexing_jobs.update_cancel(path_uuid, \*\*params) -> IndexingJobUpdateCancelResponse
+- client.indexing_jobs.create(\*\*params) -> IndexingJobCreateResponse
+- client.indexing_jobs.retrieve(uuid) -> IndexingJobRetrieveResponse
+- client.indexing_jobs.list(\*\*params) -> IndexingJobListResponse
+- client.indexing_jobs.retrieve_data_sources(indexing_job_uuid) -> IndexingJobRetrieveDataSourcesResponse
+- client.indexing_jobs.update_cancel(path_uuid, \*\*params) -> IndexingJobUpdateCancelResponse
# KnowledgeBases
Types:
```python
-from gradientai.types import (
+from do_gradientai.types import (
APIKnowledgeBase,
KnowledgeBaseCreateResponse,
KnowledgeBaseRetrieveResponse,
@@ -300,18 +300,18 @@ from gradientai.types import (
Methods:
-- client.knowledge_bases.create(\*\*params) -> KnowledgeBaseCreateResponse
-- client.knowledge_bases.retrieve(uuid) -> KnowledgeBaseRetrieveResponse
-- client.knowledge_bases.update(path_uuid, \*\*params) -> KnowledgeBaseUpdateResponse
-- client.knowledge_bases.list(\*\*params) -> KnowledgeBaseListResponse
-- client.knowledge_bases.delete(uuid) -> KnowledgeBaseDeleteResponse
+- client.knowledge_bases.create(\*\*params) -> KnowledgeBaseCreateResponse
+- client.knowledge_bases.retrieve(uuid) -> KnowledgeBaseRetrieveResponse
+- client.knowledge_bases.update(path_uuid, \*\*params) -> KnowledgeBaseUpdateResponse
+- client.knowledge_bases.list(\*\*params) -> KnowledgeBaseListResponse
+- client.knowledge_bases.delete(uuid) -> KnowledgeBaseDeleteResponse
## DataSources
Types:
```python
-from gradientai.types.knowledge_bases import (
+from do_gradientai.types.knowledge_bases import (
APIFileUploadDataSource,
APIKnowledgeBaseDataSource,
APISpacesDataSource,
@@ -325,9 +325,9 @@ from gradientai.types.knowledge_bases import (
Methods:
-- client.knowledge_bases.data_sources.create(path_knowledge_base_uuid, \*\*params) -> DataSourceCreateResponse
-- client.knowledge_bases.data_sources.list(knowledge_base_uuid, \*\*params) -> DataSourceListResponse
-- client.knowledge_bases.data_sources.delete(data_source_uuid, \*, knowledge_base_uuid) -> DataSourceDeleteResponse
+- client.knowledge_bases.data_sources.create(path_knowledge_base_uuid, \*\*params) -> DataSourceCreateResponse
+- client.knowledge_bases.data_sources.list(knowledge_base_uuid, \*\*params) -> DataSourceListResponse
+- client.knowledge_bases.data_sources.delete(data_source_uuid, \*, knowledge_base_uuid) -> DataSourceDeleteResponse
# Chat
@@ -336,12 +336,12 @@ Methods:
Types:
```python
-from gradientai.types.chat import ChatCompletionTokenLogprob, CompletionCreateResponse
+from do_gradientai.types.chat import ChatCompletionTokenLogprob, CompletionCreateResponse
```
Methods:
-- client.chat.completions.create(\*\*params) -> CompletionCreateResponse
+- client.chat.completions.create(\*\*params) -> CompletionCreateResponse
# Inference
@@ -350,7 +350,7 @@ Methods:
Types:
```python
-from gradientai.types.inference import (
+from do_gradientai.types.inference import (
APIModelAPIKeyInfo,
APIKeyCreateResponse,
APIKeyUpdateResponse,
@@ -362,33 +362,33 @@ from gradientai.types.inference import (
Methods:
-- client.inference.api_keys.create(\*\*params) -> APIKeyCreateResponse
-- client.inference.api_keys.update(path_api_key_uuid, \*\*params) -> APIKeyUpdateResponse
-- client.inference.api_keys.list(\*\*params) -> APIKeyListResponse
-- client.inference.api_keys.delete(api_key_uuid) -> APIKeyDeleteResponse
-- client.inference.api_keys.update_regenerate(api_key_uuid) -> APIKeyUpdateRegenerateResponse
+- client.inference.api_keys.create(\*\*params) -> APIKeyCreateResponse
+- client.inference.api_keys.update(path_api_key_uuid, \*\*params) -> APIKeyUpdateResponse
+- client.inference.api_keys.list(\*\*params) -> APIKeyListResponse
+- client.inference.api_keys.delete(api_key_uuid) -> APIKeyDeleteResponse
+- client.inference.api_keys.update_regenerate(api_key_uuid) -> APIKeyUpdateRegenerateResponse
## Models
Types:
```python
-from gradientai.types.inference import Model, ModelListResponse
+from do_gradientai.types.inference import Model, ModelListResponse
```
Methods:
-- client.inference.models.retrieve(model) -> Model
-- client.inference.models.list() -> ModelListResponse
+- client.inference.models.retrieve(model) -> Model
+- client.inference.models.list() -> ModelListResponse
# Models
Types:
```python
-from gradientai.types import APIAgreement, APIModel, APIModelVersion, ModelListResponse
+from do_gradientai.types import APIAgreement, APIModel, APIModelVersion, ModelListResponse
```
Methods:
-- client.models.list(\*\*params) -> ModelListResponse
+- client.models.list(\*\*params) -> ModelListResponse
diff --git a/mypy.ini b/mypy.ini
index 748d8234..82b0c891 100644
--- a/mypy.ini
+++ b/mypy.ini
@@ -8,7 +8,7 @@ show_error_codes = True
#
# We also exclude our `tests` as mypy doesn't always infer
# types correctly and Pyright will still catch any type errors.
-exclude = ^(src/gradientai/_files\.py|_dev/.*\.py|tests/.*)$
+exclude = ^(src/do_gradientai/_files\.py|_dev/.*\.py|tests/.*)$
strict_equality = True
implicit_reexport = True
diff --git a/pyproject.toml b/pyproject.toml
index 792cf5bd..c7b50822 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -78,14 +78,14 @@ format = { chain = [
"check:ruff" = "ruff check ."
"fix:ruff" = "ruff check --fix ."
-"check:importable" = "python -c 'import gradientai'"
+"check:importable" = "python -c 'import do_gradientai'"
typecheck = { chain = [
"typecheck:pyright",
"typecheck:mypy"
]}
"typecheck:pyright" = "pyright"
-"typecheck:verify-types" = "pyright --verifytypes gradientai --ignoreexternal"
+"typecheck:verify-types" = "pyright --verifytypes do_gradientai --ignoreexternal"
"typecheck:mypy" = "mypy ."
[build-system]
@@ -98,7 +98,7 @@ include = [
]
[tool.hatch.build.targets.wheel]
-packages = ["src/gradientai"]
+packages = ["src/do_gradientai"]
[tool.hatch.build.targets.sdist]
# Basically everything except hidden files/directories (such as .github, .devcontainers, .python-version, etc)
@@ -201,7 +201,7 @@ length-sort = true
length-sort-straight = true
combine-as-imports = true
extra-standard-library = ["typing_extensions"]
-known-first-party = ["gradientai", "tests"]
+known-first-party = ["do_gradientai", "tests"]
[tool.ruff.lint.per-file-ignores]
"bin/**.py" = ["T201", "T203"]
diff --git a/release-please-config.json b/release-please-config.json
index 2ff9a58c..a320c1a8 100644
--- a/release-please-config.json
+++ b/release-please-config.json
@@ -61,6 +61,6 @@
],
"release-type": "python",
"extra-files": [
- "src/gradientai/_version.py"
+ "src/do_gradientai/_version.py"
]
}
\ No newline at end of file
diff --git a/scripts/lint b/scripts/lint
index 37b38f6f..e46e909b 100755
--- a/scripts/lint
+++ b/scripts/lint
@@ -8,4 +8,4 @@ echo "==> Running lints"
rye run lint
echo "==> Making sure it imports"
-rye run python -c 'import gradientai'
+rye run python -c 'import do_gradientai'
diff --git a/src/gradientai/__init__.py b/src/do_gradientai/__init__.py
similarity index 95%
rename from src/gradientai/__init__.py
rename to src/do_gradientai/__init__.py
index 3316fe47..41b943b2 100644
--- a/src/gradientai/__init__.py
+++ b/src/do_gradientai/__init__.py
@@ -89,12 +89,12 @@
# Update the __module__ attribute for exported symbols so that
# error messages point to this module instead of the module
# it was originally defined in, e.g.
-# gradientai._exceptions.NotFoundError -> gradientai.NotFoundError
+# do_gradientai._exceptions.NotFoundError -> do_gradientai.NotFoundError
__locals = locals()
for __name in __all__:
if not __name.startswith("__"):
try:
- __locals[__name].__module__ = "gradientai"
+ __locals[__name].__module__ = "do_gradientai"
except (TypeError, AttributeError):
# Some of our exported symbols are builtins which we can't set attributes for.
pass
diff --git a/src/gradientai/_base_client.py b/src/do_gradientai/_base_client.py
similarity index 99%
rename from src/gradientai/_base_client.py
rename to src/do_gradientai/_base_client.py
index 6dce600b..30108c9d 100644
--- a/src/gradientai/_base_client.py
+++ b/src/do_gradientai/_base_client.py
@@ -389,7 +389,7 @@ def __init__(
if max_retries is None: # pyright: ignore[reportUnnecessaryComparison]
raise TypeError(
- "max_retries cannot be None. If you want to disable retries, pass `0`; if you want unlimited retries, pass `math.inf` or a very high number; if you want the default behavior, pass `gradientai.DEFAULT_MAX_RETRIES`"
+ "max_retries cannot be None. If you want to disable retries, pass `0`; if you want unlimited retries, pass `math.inf` or a very high number; if you want the default behavior, pass `do_gradientai.DEFAULT_MAX_RETRIES`"
)
def _enforce_trailing_slash(self, url: URL) -> URL:
diff --git a/src/gradientai/_client.py b/src/do_gradientai/_client.py
similarity index 100%
rename from src/gradientai/_client.py
rename to src/do_gradientai/_client.py
diff --git a/src/gradientai/_compat.py b/src/do_gradientai/_compat.py
similarity index 100%
rename from src/gradientai/_compat.py
rename to src/do_gradientai/_compat.py
diff --git a/src/gradientai/_constants.py b/src/do_gradientai/_constants.py
similarity index 100%
rename from src/gradientai/_constants.py
rename to src/do_gradientai/_constants.py
diff --git a/src/gradientai/_exceptions.py b/src/do_gradientai/_exceptions.py
similarity index 100%
rename from src/gradientai/_exceptions.py
rename to src/do_gradientai/_exceptions.py
diff --git a/src/gradientai/_files.py b/src/do_gradientai/_files.py
similarity index 100%
rename from src/gradientai/_files.py
rename to src/do_gradientai/_files.py
diff --git a/src/gradientai/_models.py b/src/do_gradientai/_models.py
similarity index 100%
rename from src/gradientai/_models.py
rename to src/do_gradientai/_models.py
diff --git a/src/gradientai/_qs.py b/src/do_gradientai/_qs.py
similarity index 100%
rename from src/gradientai/_qs.py
rename to src/do_gradientai/_qs.py
diff --git a/src/gradientai/_resource.py b/src/do_gradientai/_resource.py
similarity index 100%
rename from src/gradientai/_resource.py
rename to src/do_gradientai/_resource.py
diff --git a/src/gradientai/_response.py b/src/do_gradientai/_response.py
similarity index 99%
rename from src/gradientai/_response.py
rename to src/do_gradientai/_response.py
index 2037e4ca..8ca43971 100644
--- a/src/gradientai/_response.py
+++ b/src/do_gradientai/_response.py
@@ -218,7 +218,7 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T:
and issubclass(origin, pydantic.BaseModel)
):
raise TypeError(
- "Pydantic models must subclass our base model type, e.g. `from gradientai import BaseModel`"
+ "Pydantic models must subclass our base model type, e.g. `from do_gradientai import BaseModel`"
)
if (
@@ -285,7 +285,7 @@ def parse(self, *, to: type[_T] | None = None) -> R | _T:
the `to` argument, e.g.
```py
- from gradientai import BaseModel
+ from do_gradientai import BaseModel
class MyModel(BaseModel):
@@ -387,7 +387,7 @@ async def parse(self, *, to: type[_T] | None = None) -> R | _T:
the `to` argument, e.g.
```py
- from gradientai import BaseModel
+ from do_gradientai import BaseModel
class MyModel(BaseModel):
@@ -558,7 +558,7 @@ async def stream_to_file(
class MissingStreamClassError(TypeError):
def __init__(self) -> None:
super().__init__(
- "The `stream` argument was set to `True` but the `stream_cls` argument was not given. See `gradientai._streaming` for reference",
+ "The `stream` argument was set to `True` but the `stream_cls` argument was not given. See `do_gradientai._streaming` for reference",
)
diff --git a/src/gradientai/_streaming.py b/src/do_gradientai/_streaming.py
similarity index 100%
rename from src/gradientai/_streaming.py
rename to src/do_gradientai/_streaming.py
diff --git a/src/gradientai/_types.py b/src/do_gradientai/_types.py
similarity index 99%
rename from src/gradientai/_types.py
rename to src/do_gradientai/_types.py
index 1bac876d..c356c700 100644
--- a/src/gradientai/_types.py
+++ b/src/do_gradientai/_types.py
@@ -81,7 +81,7 @@
# This unfortunately means that you will either have
# to import this type and pass it explicitly:
#
-# from gradientai import NoneType
+# from do_gradientai import NoneType
# client.get('/foo', cast_to=NoneType)
#
# or build it yourself:
diff --git a/src/gradientai/_utils/__init__.py b/src/do_gradientai/_utils/__init__.py
similarity index 100%
rename from src/gradientai/_utils/__init__.py
rename to src/do_gradientai/_utils/__init__.py
diff --git a/src/gradientai/_utils/_logs.py b/src/do_gradientai/_utils/_logs.py
similarity index 75%
rename from src/gradientai/_utils/_logs.py
rename to src/do_gradientai/_utils/_logs.py
index 9047e5c8..ac45b1a5 100644
--- a/src/gradientai/_utils/_logs.py
+++ b/src/do_gradientai/_utils/_logs.py
@@ -1,12 +1,12 @@
import os
import logging
-logger: logging.Logger = logging.getLogger("gradientai")
+logger: logging.Logger = logging.getLogger("do_gradientai")
httpx_logger: logging.Logger = logging.getLogger("httpx")
def _basic_config() -> None:
- # e.g. [2023-10-05 14:12:26 - gradientai._base_client:818 - DEBUG] HTTP Request: POST http://127.0.0.1:4010/foo/bar "200 OK"
+ # e.g. [2023-10-05 14:12:26 - do_gradientai._base_client:818 - DEBUG] HTTP Request: POST http://127.0.0.1:4010/foo/bar "200 OK"
logging.basicConfig(
format="[%(asctime)s - %(name)s:%(lineno)d - %(levelname)s] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
diff --git a/src/gradientai/_utils/_proxy.py b/src/do_gradientai/_utils/_proxy.py
similarity index 100%
rename from src/gradientai/_utils/_proxy.py
rename to src/do_gradientai/_utils/_proxy.py
diff --git a/src/gradientai/_utils/_reflection.py b/src/do_gradientai/_utils/_reflection.py
similarity index 100%
rename from src/gradientai/_utils/_reflection.py
rename to src/do_gradientai/_utils/_reflection.py
diff --git a/src/gradientai/_utils/_resources_proxy.py b/src/do_gradientai/_utils/_resources_proxy.py
similarity index 50%
rename from src/gradientai/_utils/_resources_proxy.py
rename to src/do_gradientai/_utils/_resources_proxy.py
index b3bc4931..03763c3b 100644
--- a/src/gradientai/_utils/_resources_proxy.py
+++ b/src/do_gradientai/_utils/_resources_proxy.py
@@ -7,17 +7,17 @@
class ResourcesProxy(LazyProxy[Any]):
- """A proxy for the `gradientai.resources` module.
+ """A proxy for the `do_gradientai.resources` module.
- This is used so that we can lazily import `gradientai.resources` only when
- needed *and* so that users can just import `gradientai` and reference `gradientai.resources`
+ This is used so that we can lazily import `do_gradientai.resources` only when
+ needed *and* so that users can just import `do_gradientai` and reference `do_gradientai.resources`
"""
@override
def __load__(self) -> Any:
import importlib
- mod = importlib.import_module("gradientai.resources")
+ mod = importlib.import_module("do_gradientai.resources")
return mod
diff --git a/src/gradientai/_utils/_streams.py b/src/do_gradientai/_utils/_streams.py
similarity index 100%
rename from src/gradientai/_utils/_streams.py
rename to src/do_gradientai/_utils/_streams.py
diff --git a/src/gradientai/_utils/_sync.py b/src/do_gradientai/_utils/_sync.py
similarity index 100%
rename from src/gradientai/_utils/_sync.py
rename to src/do_gradientai/_utils/_sync.py
diff --git a/src/gradientai/_utils/_transform.py b/src/do_gradientai/_utils/_transform.py
similarity index 100%
rename from src/gradientai/_utils/_transform.py
rename to src/do_gradientai/_utils/_transform.py
diff --git a/src/gradientai/_utils/_typing.py b/src/do_gradientai/_utils/_typing.py
similarity index 100%
rename from src/gradientai/_utils/_typing.py
rename to src/do_gradientai/_utils/_typing.py
diff --git a/src/gradientai/_utils/_utils.py b/src/do_gradientai/_utils/_utils.py
similarity index 100%
rename from src/gradientai/_utils/_utils.py
rename to src/do_gradientai/_utils/_utils.py
diff --git a/src/gradientai/_version.py b/src/do_gradientai/_version.py
similarity index 83%
rename from src/gradientai/_version.py
rename to src/do_gradientai/_version.py
index 4d3df522..83bf8865 100644
--- a/src/gradientai/_version.py
+++ b/src/do_gradientai/_version.py
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-__title__ = "gradientai"
+__title__ = "do_gradientai"
__version__ = "0.1.0-alpha.4" # x-release-please-version
diff --git a/src/do_gradientai/lib/.keep b/src/do_gradientai/lib/.keep
new file mode 100644
index 00000000..5e2c99fd
--- /dev/null
+++ b/src/do_gradientai/lib/.keep
@@ -0,0 +1,4 @@
+File generated from our OpenAPI spec by Stainless.
+
+This directory can be used to store custom files to expand the SDK.
+It is ignored by Stainless code generation and its content (other than this keep file) won't be touched.
\ No newline at end of file
diff --git a/src/gradientai/py.typed b/src/do_gradientai/py.typed
similarity index 100%
rename from src/gradientai/py.typed
rename to src/do_gradientai/py.typed
diff --git a/src/gradientai/resources/__init__.py b/src/do_gradientai/resources/__init__.py
similarity index 100%
rename from src/gradientai/resources/__init__.py
rename to src/do_gradientai/resources/__init__.py
diff --git a/src/gradientai/resources/agents/__init__.py b/src/do_gradientai/resources/agents/__init__.py
similarity index 100%
rename from src/gradientai/resources/agents/__init__.py
rename to src/do_gradientai/resources/agents/__init__.py
diff --git a/src/gradientai/resources/agents/agents.py b/src/do_gradientai/resources/agents/agents.py
similarity index 100%
rename from src/gradientai/resources/agents/agents.py
rename to src/do_gradientai/resources/agents/agents.py
diff --git a/src/gradientai/resources/agents/api_keys.py b/src/do_gradientai/resources/agents/api_keys.py
similarity index 100%
rename from src/gradientai/resources/agents/api_keys.py
rename to src/do_gradientai/resources/agents/api_keys.py
diff --git a/src/gradientai/resources/agents/child_agents.py b/src/do_gradientai/resources/agents/child_agents.py
similarity index 100%
rename from src/gradientai/resources/agents/child_agents.py
rename to src/do_gradientai/resources/agents/child_agents.py
diff --git a/src/gradientai/resources/agents/functions.py b/src/do_gradientai/resources/agents/functions.py
similarity index 100%
rename from src/gradientai/resources/agents/functions.py
rename to src/do_gradientai/resources/agents/functions.py
diff --git a/src/gradientai/resources/agents/knowledge_bases.py b/src/do_gradientai/resources/agents/knowledge_bases.py
similarity index 100%
rename from src/gradientai/resources/agents/knowledge_bases.py
rename to src/do_gradientai/resources/agents/knowledge_bases.py
diff --git a/src/gradientai/resources/agents/versions.py b/src/do_gradientai/resources/agents/versions.py
similarity index 100%
rename from src/gradientai/resources/agents/versions.py
rename to src/do_gradientai/resources/agents/versions.py
diff --git a/src/gradientai/resources/chat/__init__.py b/src/do_gradientai/resources/chat/__init__.py
similarity index 100%
rename from src/gradientai/resources/chat/__init__.py
rename to src/do_gradientai/resources/chat/__init__.py
diff --git a/src/gradientai/resources/chat/chat.py b/src/do_gradientai/resources/chat/chat.py
similarity index 100%
rename from src/gradientai/resources/chat/chat.py
rename to src/do_gradientai/resources/chat/chat.py
diff --git a/src/gradientai/resources/chat/completions.py b/src/do_gradientai/resources/chat/completions.py
similarity index 100%
rename from src/gradientai/resources/chat/completions.py
rename to src/do_gradientai/resources/chat/completions.py
diff --git a/src/gradientai/resources/indexing_jobs.py b/src/do_gradientai/resources/indexing_jobs.py
similarity index 100%
rename from src/gradientai/resources/indexing_jobs.py
rename to src/do_gradientai/resources/indexing_jobs.py
diff --git a/src/gradientai/resources/inference/__init__.py b/src/do_gradientai/resources/inference/__init__.py
similarity index 100%
rename from src/gradientai/resources/inference/__init__.py
rename to src/do_gradientai/resources/inference/__init__.py
diff --git a/src/gradientai/resources/inference/api_keys.py b/src/do_gradientai/resources/inference/api_keys.py
similarity index 100%
rename from src/gradientai/resources/inference/api_keys.py
rename to src/do_gradientai/resources/inference/api_keys.py
diff --git a/src/gradientai/resources/inference/inference.py b/src/do_gradientai/resources/inference/inference.py
similarity index 100%
rename from src/gradientai/resources/inference/inference.py
rename to src/do_gradientai/resources/inference/inference.py
diff --git a/src/gradientai/resources/inference/models.py b/src/do_gradientai/resources/inference/models.py
similarity index 100%
rename from src/gradientai/resources/inference/models.py
rename to src/do_gradientai/resources/inference/models.py
diff --git a/src/gradientai/resources/knowledge_bases/__init__.py b/src/do_gradientai/resources/knowledge_bases/__init__.py
similarity index 100%
rename from src/gradientai/resources/knowledge_bases/__init__.py
rename to src/do_gradientai/resources/knowledge_bases/__init__.py
diff --git a/src/gradientai/resources/knowledge_bases/data_sources.py b/src/do_gradientai/resources/knowledge_bases/data_sources.py
similarity index 100%
rename from src/gradientai/resources/knowledge_bases/data_sources.py
rename to src/do_gradientai/resources/knowledge_bases/data_sources.py
diff --git a/src/gradientai/resources/knowledge_bases/knowledge_bases.py b/src/do_gradientai/resources/knowledge_bases/knowledge_bases.py
similarity index 100%
rename from src/gradientai/resources/knowledge_bases/knowledge_bases.py
rename to src/do_gradientai/resources/knowledge_bases/knowledge_bases.py
diff --git a/src/gradientai/resources/models.py b/src/do_gradientai/resources/models.py
similarity index 100%
rename from src/gradientai/resources/models.py
rename to src/do_gradientai/resources/models.py
diff --git a/src/gradientai/resources/providers/__init__.py b/src/do_gradientai/resources/providers/__init__.py
similarity index 100%
rename from src/gradientai/resources/providers/__init__.py
rename to src/do_gradientai/resources/providers/__init__.py
diff --git a/src/gradientai/resources/providers/anthropic/__init__.py b/src/do_gradientai/resources/providers/anthropic/__init__.py
similarity index 100%
rename from src/gradientai/resources/providers/anthropic/__init__.py
rename to src/do_gradientai/resources/providers/anthropic/__init__.py
diff --git a/src/gradientai/resources/providers/anthropic/anthropic.py b/src/do_gradientai/resources/providers/anthropic/anthropic.py
similarity index 100%
rename from src/gradientai/resources/providers/anthropic/anthropic.py
rename to src/do_gradientai/resources/providers/anthropic/anthropic.py
diff --git a/src/gradientai/resources/providers/anthropic/keys.py b/src/do_gradientai/resources/providers/anthropic/keys.py
similarity index 100%
rename from src/gradientai/resources/providers/anthropic/keys.py
rename to src/do_gradientai/resources/providers/anthropic/keys.py
diff --git a/src/gradientai/resources/providers/openai/__init__.py b/src/do_gradientai/resources/providers/openai/__init__.py
similarity index 100%
rename from src/gradientai/resources/providers/openai/__init__.py
rename to src/do_gradientai/resources/providers/openai/__init__.py
diff --git a/src/gradientai/resources/providers/openai/keys.py b/src/do_gradientai/resources/providers/openai/keys.py
similarity index 100%
rename from src/gradientai/resources/providers/openai/keys.py
rename to src/do_gradientai/resources/providers/openai/keys.py
diff --git a/src/gradientai/resources/providers/openai/openai.py b/src/do_gradientai/resources/providers/openai/openai.py
similarity index 100%
rename from src/gradientai/resources/providers/openai/openai.py
rename to src/do_gradientai/resources/providers/openai/openai.py
diff --git a/src/gradientai/resources/providers/providers.py b/src/do_gradientai/resources/providers/providers.py
similarity index 100%
rename from src/gradientai/resources/providers/providers.py
rename to src/do_gradientai/resources/providers/providers.py
diff --git a/src/gradientai/resources/regions/__init__.py b/src/do_gradientai/resources/regions/__init__.py
similarity index 100%
rename from src/gradientai/resources/regions/__init__.py
rename to src/do_gradientai/resources/regions/__init__.py
diff --git a/src/gradientai/resources/regions/evaluation_datasets.py b/src/do_gradientai/resources/regions/evaluation_datasets.py
similarity index 100%
rename from src/gradientai/resources/regions/evaluation_datasets.py
rename to src/do_gradientai/resources/regions/evaluation_datasets.py
diff --git a/src/gradientai/resources/regions/evaluation_runs/__init__.py b/src/do_gradientai/resources/regions/evaluation_runs/__init__.py
similarity index 100%
rename from src/gradientai/resources/regions/evaluation_runs/__init__.py
rename to src/do_gradientai/resources/regions/evaluation_runs/__init__.py
diff --git a/src/gradientai/resources/regions/evaluation_runs/evaluation_runs.py b/src/do_gradientai/resources/regions/evaluation_runs/evaluation_runs.py
similarity index 100%
rename from src/gradientai/resources/regions/evaluation_runs/evaluation_runs.py
rename to src/do_gradientai/resources/regions/evaluation_runs/evaluation_runs.py
diff --git a/src/gradientai/resources/regions/evaluation_runs/results.py b/src/do_gradientai/resources/regions/evaluation_runs/results.py
similarity index 100%
rename from src/gradientai/resources/regions/evaluation_runs/results.py
rename to src/do_gradientai/resources/regions/evaluation_runs/results.py
diff --git a/src/gradientai/resources/regions/evaluation_test_cases.py b/src/do_gradientai/resources/regions/evaluation_test_cases.py
similarity index 100%
rename from src/gradientai/resources/regions/evaluation_test_cases.py
rename to src/do_gradientai/resources/regions/evaluation_test_cases.py
diff --git a/src/gradientai/resources/regions/regions.py b/src/do_gradientai/resources/regions/regions.py
similarity index 100%
rename from src/gradientai/resources/regions/regions.py
rename to src/do_gradientai/resources/regions/regions.py
diff --git a/src/gradientai/types/__init__.py b/src/do_gradientai/types/__init__.py
similarity index 100%
rename from src/gradientai/types/__init__.py
rename to src/do_gradientai/types/__init__.py
diff --git a/src/gradientai/types/agent_create_params.py b/src/do_gradientai/types/agent_create_params.py
similarity index 100%
rename from src/gradientai/types/agent_create_params.py
rename to src/do_gradientai/types/agent_create_params.py
diff --git a/src/gradientai/types/agent_create_response.py b/src/do_gradientai/types/agent_create_response.py
similarity index 100%
rename from src/gradientai/types/agent_create_response.py
rename to src/do_gradientai/types/agent_create_response.py
diff --git a/src/gradientai/types/agent_delete_response.py b/src/do_gradientai/types/agent_delete_response.py
similarity index 100%
rename from src/gradientai/types/agent_delete_response.py
rename to src/do_gradientai/types/agent_delete_response.py
diff --git a/src/gradientai/types/agent_list_params.py b/src/do_gradientai/types/agent_list_params.py
similarity index 100%
rename from src/gradientai/types/agent_list_params.py
rename to src/do_gradientai/types/agent_list_params.py
diff --git a/src/gradientai/types/agent_list_response.py b/src/do_gradientai/types/agent_list_response.py
similarity index 100%
rename from src/gradientai/types/agent_list_response.py
rename to src/do_gradientai/types/agent_list_response.py
diff --git a/src/gradientai/types/agent_retrieve_response.py b/src/do_gradientai/types/agent_retrieve_response.py
similarity index 100%
rename from src/gradientai/types/agent_retrieve_response.py
rename to src/do_gradientai/types/agent_retrieve_response.py
diff --git a/src/gradientai/types/agent_update_params.py b/src/do_gradientai/types/agent_update_params.py
similarity index 100%
rename from src/gradientai/types/agent_update_params.py
rename to src/do_gradientai/types/agent_update_params.py
diff --git a/src/gradientai/types/agent_update_response.py b/src/do_gradientai/types/agent_update_response.py
similarity index 100%
rename from src/gradientai/types/agent_update_response.py
rename to src/do_gradientai/types/agent_update_response.py
diff --git a/src/gradientai/types/agent_update_status_params.py b/src/do_gradientai/types/agent_update_status_params.py
similarity index 100%
rename from src/gradientai/types/agent_update_status_params.py
rename to src/do_gradientai/types/agent_update_status_params.py
diff --git a/src/gradientai/types/agent_update_status_response.py b/src/do_gradientai/types/agent_update_status_response.py
similarity index 100%
rename from src/gradientai/types/agent_update_status_response.py
rename to src/do_gradientai/types/agent_update_status_response.py
diff --git a/src/gradientai/types/agents/__init__.py b/src/do_gradientai/types/agents/__init__.py
similarity index 100%
rename from src/gradientai/types/agents/__init__.py
rename to src/do_gradientai/types/agents/__init__.py
diff --git a/src/gradientai/types/agents/api_key_create_params.py b/src/do_gradientai/types/agents/api_key_create_params.py
similarity index 100%
rename from src/gradientai/types/agents/api_key_create_params.py
rename to src/do_gradientai/types/agents/api_key_create_params.py
diff --git a/src/gradientai/types/agents/api_key_create_response.py b/src/do_gradientai/types/agents/api_key_create_response.py
similarity index 100%
rename from src/gradientai/types/agents/api_key_create_response.py
rename to src/do_gradientai/types/agents/api_key_create_response.py
diff --git a/src/gradientai/types/agents/api_key_delete_response.py b/src/do_gradientai/types/agents/api_key_delete_response.py
similarity index 100%
rename from src/gradientai/types/agents/api_key_delete_response.py
rename to src/do_gradientai/types/agents/api_key_delete_response.py
diff --git a/src/gradientai/types/agents/api_key_list_params.py b/src/do_gradientai/types/agents/api_key_list_params.py
similarity index 100%
rename from src/gradientai/types/agents/api_key_list_params.py
rename to src/do_gradientai/types/agents/api_key_list_params.py
diff --git a/src/gradientai/types/agents/api_key_list_response.py b/src/do_gradientai/types/agents/api_key_list_response.py
similarity index 100%
rename from src/gradientai/types/agents/api_key_list_response.py
rename to src/do_gradientai/types/agents/api_key_list_response.py
diff --git a/src/gradientai/types/agents/api_key_regenerate_response.py b/src/do_gradientai/types/agents/api_key_regenerate_response.py
similarity index 100%
rename from src/gradientai/types/agents/api_key_regenerate_response.py
rename to src/do_gradientai/types/agents/api_key_regenerate_response.py
diff --git a/src/gradientai/types/agents/api_key_update_params.py b/src/do_gradientai/types/agents/api_key_update_params.py
similarity index 100%
rename from src/gradientai/types/agents/api_key_update_params.py
rename to src/do_gradientai/types/agents/api_key_update_params.py
diff --git a/src/gradientai/types/agents/api_key_update_response.py b/src/do_gradientai/types/agents/api_key_update_response.py
similarity index 100%
rename from src/gradientai/types/agents/api_key_update_response.py
rename to src/do_gradientai/types/agents/api_key_update_response.py
diff --git a/src/gradientai/types/agents/api_link_knowledge_base_output.py b/src/do_gradientai/types/agents/api_link_knowledge_base_output.py
similarity index 100%
rename from src/gradientai/types/agents/api_link_knowledge_base_output.py
rename to src/do_gradientai/types/agents/api_link_knowledge_base_output.py
diff --git a/src/gradientai/types/agents/api_links.py b/src/do_gradientai/types/agents/api_links.py
similarity index 100%
rename from src/gradientai/types/agents/api_links.py
rename to src/do_gradientai/types/agents/api_links.py
diff --git a/src/gradientai/types/agents/api_meta.py b/src/do_gradientai/types/agents/api_meta.py
similarity index 100%
rename from src/gradientai/types/agents/api_meta.py
rename to src/do_gradientai/types/agents/api_meta.py
diff --git a/src/gradientai/types/agents/child_agent_add_params.py b/src/do_gradientai/types/agents/child_agent_add_params.py
similarity index 100%
rename from src/gradientai/types/agents/child_agent_add_params.py
rename to src/do_gradientai/types/agents/child_agent_add_params.py
diff --git a/src/gradientai/types/agents/child_agent_add_response.py b/src/do_gradientai/types/agents/child_agent_add_response.py
similarity index 100%
rename from src/gradientai/types/agents/child_agent_add_response.py
rename to src/do_gradientai/types/agents/child_agent_add_response.py
diff --git a/src/gradientai/types/agents/child_agent_delete_response.py b/src/do_gradientai/types/agents/child_agent_delete_response.py
similarity index 100%
rename from src/gradientai/types/agents/child_agent_delete_response.py
rename to src/do_gradientai/types/agents/child_agent_delete_response.py
diff --git a/src/gradientai/types/agents/child_agent_update_params.py b/src/do_gradientai/types/agents/child_agent_update_params.py
similarity index 100%
rename from src/gradientai/types/agents/child_agent_update_params.py
rename to src/do_gradientai/types/agents/child_agent_update_params.py
diff --git a/src/gradientai/types/agents/child_agent_update_response.py b/src/do_gradientai/types/agents/child_agent_update_response.py
similarity index 100%
rename from src/gradientai/types/agents/child_agent_update_response.py
rename to src/do_gradientai/types/agents/child_agent_update_response.py
diff --git a/src/gradientai/types/agents/child_agent_view_response.py b/src/do_gradientai/types/agents/child_agent_view_response.py
similarity index 100%
rename from src/gradientai/types/agents/child_agent_view_response.py
rename to src/do_gradientai/types/agents/child_agent_view_response.py
diff --git a/src/gradientai/types/agents/function_create_params.py b/src/do_gradientai/types/agents/function_create_params.py
similarity index 100%
rename from src/gradientai/types/agents/function_create_params.py
rename to src/do_gradientai/types/agents/function_create_params.py
diff --git a/src/gradientai/types/agents/function_create_response.py b/src/do_gradientai/types/agents/function_create_response.py
similarity index 100%
rename from src/gradientai/types/agents/function_create_response.py
rename to src/do_gradientai/types/agents/function_create_response.py
diff --git a/src/gradientai/types/agents/function_delete_response.py b/src/do_gradientai/types/agents/function_delete_response.py
similarity index 100%
rename from src/gradientai/types/agents/function_delete_response.py
rename to src/do_gradientai/types/agents/function_delete_response.py
diff --git a/src/gradientai/types/agents/function_update_params.py b/src/do_gradientai/types/agents/function_update_params.py
similarity index 100%
rename from src/gradientai/types/agents/function_update_params.py
rename to src/do_gradientai/types/agents/function_update_params.py
diff --git a/src/gradientai/types/agents/function_update_response.py b/src/do_gradientai/types/agents/function_update_response.py
similarity index 100%
rename from src/gradientai/types/agents/function_update_response.py
rename to src/do_gradientai/types/agents/function_update_response.py
diff --git a/src/gradientai/types/agents/knowledge_base_detach_response.py b/src/do_gradientai/types/agents/knowledge_base_detach_response.py
similarity index 100%
rename from src/gradientai/types/agents/knowledge_base_detach_response.py
rename to src/do_gradientai/types/agents/knowledge_base_detach_response.py
diff --git a/src/gradientai/types/agents/version_list_params.py b/src/do_gradientai/types/agents/version_list_params.py
similarity index 100%
rename from src/gradientai/types/agents/version_list_params.py
rename to src/do_gradientai/types/agents/version_list_params.py
diff --git a/src/gradientai/types/agents/version_list_response.py b/src/do_gradientai/types/agents/version_list_response.py
similarity index 100%
rename from src/gradientai/types/agents/version_list_response.py
rename to src/do_gradientai/types/agents/version_list_response.py
diff --git a/src/gradientai/types/agents/version_update_params.py b/src/do_gradientai/types/agents/version_update_params.py
similarity index 100%
rename from src/gradientai/types/agents/version_update_params.py
rename to src/do_gradientai/types/agents/version_update_params.py
diff --git a/src/gradientai/types/agents/version_update_response.py b/src/do_gradientai/types/agents/version_update_response.py
similarity index 100%
rename from src/gradientai/types/agents/version_update_response.py
rename to src/do_gradientai/types/agents/version_update_response.py
diff --git a/src/gradientai/types/api_agent.py b/src/do_gradientai/types/api_agent.py
similarity index 100%
rename from src/gradientai/types/api_agent.py
rename to src/do_gradientai/types/api_agent.py
diff --git a/src/gradientai/types/api_agent_api_key_info.py b/src/do_gradientai/types/api_agent_api_key_info.py
similarity index 100%
rename from src/gradientai/types/api_agent_api_key_info.py
rename to src/do_gradientai/types/api_agent_api_key_info.py
diff --git a/src/gradientai/types/api_agent_model.py b/src/do_gradientai/types/api_agent_model.py
similarity index 100%
rename from src/gradientai/types/api_agent_model.py
rename to src/do_gradientai/types/api_agent_model.py
diff --git a/src/gradientai/types/api_agreement.py b/src/do_gradientai/types/api_agreement.py
similarity index 100%
rename from src/gradientai/types/api_agreement.py
rename to src/do_gradientai/types/api_agreement.py
diff --git a/src/gradientai/types/api_anthropic_api_key_info.py b/src/do_gradientai/types/api_anthropic_api_key_info.py
similarity index 100%
rename from src/gradientai/types/api_anthropic_api_key_info.py
rename to src/do_gradientai/types/api_anthropic_api_key_info.py
diff --git a/src/gradientai/types/api_deployment_visibility.py b/src/do_gradientai/types/api_deployment_visibility.py
similarity index 100%
rename from src/gradientai/types/api_deployment_visibility.py
rename to src/do_gradientai/types/api_deployment_visibility.py
diff --git a/src/gradientai/types/api_evaluation_metric.py b/src/do_gradientai/types/api_evaluation_metric.py
similarity index 100%
rename from src/gradientai/types/api_evaluation_metric.py
rename to src/do_gradientai/types/api_evaluation_metric.py
diff --git a/src/gradientai/types/api_indexing_job.py b/src/do_gradientai/types/api_indexing_job.py
similarity index 100%
rename from src/gradientai/types/api_indexing_job.py
rename to src/do_gradientai/types/api_indexing_job.py
diff --git a/src/gradientai/types/api_knowledge_base.py b/src/do_gradientai/types/api_knowledge_base.py
similarity index 100%
rename from src/gradientai/types/api_knowledge_base.py
rename to src/do_gradientai/types/api_knowledge_base.py
diff --git a/src/gradientai/types/api_model.py b/src/do_gradientai/types/api_model.py
similarity index 100%
rename from src/gradientai/types/api_model.py
rename to src/do_gradientai/types/api_model.py
diff --git a/src/gradientai/types/api_model_version.py b/src/do_gradientai/types/api_model_version.py
similarity index 100%
rename from src/gradientai/types/api_model_version.py
rename to src/do_gradientai/types/api_model_version.py
diff --git a/src/gradientai/types/api_openai_api_key_info.py b/src/do_gradientai/types/api_openai_api_key_info.py
similarity index 100%
rename from src/gradientai/types/api_openai_api_key_info.py
rename to src/do_gradientai/types/api_openai_api_key_info.py
diff --git a/src/gradientai/types/api_retrieval_method.py b/src/do_gradientai/types/api_retrieval_method.py
similarity index 100%
rename from src/gradientai/types/api_retrieval_method.py
rename to src/do_gradientai/types/api_retrieval_method.py
diff --git a/src/gradientai/types/api_workspace.py b/src/do_gradientai/types/api_workspace.py
similarity index 100%
rename from src/gradientai/types/api_workspace.py
rename to src/do_gradientai/types/api_workspace.py
diff --git a/src/gradientai/types/chat/__init__.py b/src/do_gradientai/types/chat/__init__.py
similarity index 100%
rename from src/gradientai/types/chat/__init__.py
rename to src/do_gradientai/types/chat/__init__.py
diff --git a/src/gradientai/types/chat/chat_completion_token_logprob.py b/src/do_gradientai/types/chat/chat_completion_token_logprob.py
similarity index 100%
rename from src/gradientai/types/chat/chat_completion_token_logprob.py
rename to src/do_gradientai/types/chat/chat_completion_token_logprob.py
diff --git a/src/gradientai/types/chat/completion_create_params.py b/src/do_gradientai/types/chat/completion_create_params.py
similarity index 100%
rename from src/gradientai/types/chat/completion_create_params.py
rename to src/do_gradientai/types/chat/completion_create_params.py
diff --git a/src/gradientai/types/chat/completion_create_response.py b/src/do_gradientai/types/chat/completion_create_response.py
similarity index 100%
rename from src/gradientai/types/chat/completion_create_response.py
rename to src/do_gradientai/types/chat/completion_create_response.py
diff --git a/src/gradientai/types/indexing_job_create_params.py b/src/do_gradientai/types/indexing_job_create_params.py
similarity index 100%
rename from src/gradientai/types/indexing_job_create_params.py
rename to src/do_gradientai/types/indexing_job_create_params.py
diff --git a/src/gradientai/types/indexing_job_create_response.py b/src/do_gradientai/types/indexing_job_create_response.py
similarity index 100%
rename from src/gradientai/types/indexing_job_create_response.py
rename to src/do_gradientai/types/indexing_job_create_response.py
diff --git a/src/gradientai/types/indexing_job_list_params.py b/src/do_gradientai/types/indexing_job_list_params.py
similarity index 100%
rename from src/gradientai/types/indexing_job_list_params.py
rename to src/do_gradientai/types/indexing_job_list_params.py
diff --git a/src/gradientai/types/indexing_job_list_response.py b/src/do_gradientai/types/indexing_job_list_response.py
similarity index 100%
rename from src/gradientai/types/indexing_job_list_response.py
rename to src/do_gradientai/types/indexing_job_list_response.py
diff --git a/src/gradientai/types/indexing_job_retrieve_data_sources_response.py b/src/do_gradientai/types/indexing_job_retrieve_data_sources_response.py
similarity index 100%
rename from src/gradientai/types/indexing_job_retrieve_data_sources_response.py
rename to src/do_gradientai/types/indexing_job_retrieve_data_sources_response.py
diff --git a/src/gradientai/types/indexing_job_retrieve_response.py b/src/do_gradientai/types/indexing_job_retrieve_response.py
similarity index 100%
rename from src/gradientai/types/indexing_job_retrieve_response.py
rename to src/do_gradientai/types/indexing_job_retrieve_response.py
diff --git a/src/gradientai/types/indexing_job_update_cancel_params.py b/src/do_gradientai/types/indexing_job_update_cancel_params.py
similarity index 100%
rename from src/gradientai/types/indexing_job_update_cancel_params.py
rename to src/do_gradientai/types/indexing_job_update_cancel_params.py
diff --git a/src/gradientai/types/indexing_job_update_cancel_response.py b/src/do_gradientai/types/indexing_job_update_cancel_response.py
similarity index 100%
rename from src/gradientai/types/indexing_job_update_cancel_response.py
rename to src/do_gradientai/types/indexing_job_update_cancel_response.py
diff --git a/src/gradientai/types/inference/__init__.py b/src/do_gradientai/types/inference/__init__.py
similarity index 100%
rename from src/gradientai/types/inference/__init__.py
rename to src/do_gradientai/types/inference/__init__.py
diff --git a/src/gradientai/types/inference/api_key_create_params.py b/src/do_gradientai/types/inference/api_key_create_params.py
similarity index 100%
rename from src/gradientai/types/inference/api_key_create_params.py
rename to src/do_gradientai/types/inference/api_key_create_params.py
diff --git a/src/gradientai/types/inference/api_key_create_response.py b/src/do_gradientai/types/inference/api_key_create_response.py
similarity index 100%
rename from src/gradientai/types/inference/api_key_create_response.py
rename to src/do_gradientai/types/inference/api_key_create_response.py
diff --git a/src/gradientai/types/inference/api_key_delete_response.py b/src/do_gradientai/types/inference/api_key_delete_response.py
similarity index 100%
rename from src/gradientai/types/inference/api_key_delete_response.py
rename to src/do_gradientai/types/inference/api_key_delete_response.py
diff --git a/src/gradientai/types/inference/api_key_list_params.py b/src/do_gradientai/types/inference/api_key_list_params.py
similarity index 100%
rename from src/gradientai/types/inference/api_key_list_params.py
rename to src/do_gradientai/types/inference/api_key_list_params.py
diff --git a/src/gradientai/types/inference/api_key_list_response.py b/src/do_gradientai/types/inference/api_key_list_response.py
similarity index 100%
rename from src/gradientai/types/inference/api_key_list_response.py
rename to src/do_gradientai/types/inference/api_key_list_response.py
diff --git a/src/gradientai/types/inference/api_key_update_params.py b/src/do_gradientai/types/inference/api_key_update_params.py
similarity index 100%
rename from src/gradientai/types/inference/api_key_update_params.py
rename to src/do_gradientai/types/inference/api_key_update_params.py
diff --git a/src/gradientai/types/inference/api_key_update_regenerate_response.py b/src/do_gradientai/types/inference/api_key_update_regenerate_response.py
similarity index 100%
rename from src/gradientai/types/inference/api_key_update_regenerate_response.py
rename to src/do_gradientai/types/inference/api_key_update_regenerate_response.py
diff --git a/src/gradientai/types/inference/api_key_update_response.py b/src/do_gradientai/types/inference/api_key_update_response.py
similarity index 100%
rename from src/gradientai/types/inference/api_key_update_response.py
rename to src/do_gradientai/types/inference/api_key_update_response.py
diff --git a/src/gradientai/types/inference/api_model_api_key_info.py b/src/do_gradientai/types/inference/api_model_api_key_info.py
similarity index 100%
rename from src/gradientai/types/inference/api_model_api_key_info.py
rename to src/do_gradientai/types/inference/api_model_api_key_info.py
diff --git a/src/gradientai/types/inference/model.py b/src/do_gradientai/types/inference/model.py
similarity index 100%
rename from src/gradientai/types/inference/model.py
rename to src/do_gradientai/types/inference/model.py
diff --git a/src/gradientai/types/inference/model_list_response.py b/src/do_gradientai/types/inference/model_list_response.py
similarity index 100%
rename from src/gradientai/types/inference/model_list_response.py
rename to src/do_gradientai/types/inference/model_list_response.py
diff --git a/src/gradientai/types/knowledge_base_create_params.py b/src/do_gradientai/types/knowledge_base_create_params.py
similarity index 100%
rename from src/gradientai/types/knowledge_base_create_params.py
rename to src/do_gradientai/types/knowledge_base_create_params.py
diff --git a/src/gradientai/types/knowledge_base_create_response.py b/src/do_gradientai/types/knowledge_base_create_response.py
similarity index 100%
rename from src/gradientai/types/knowledge_base_create_response.py
rename to src/do_gradientai/types/knowledge_base_create_response.py
diff --git a/src/gradientai/types/knowledge_base_delete_response.py b/src/do_gradientai/types/knowledge_base_delete_response.py
similarity index 100%
rename from src/gradientai/types/knowledge_base_delete_response.py
rename to src/do_gradientai/types/knowledge_base_delete_response.py
diff --git a/src/gradientai/types/knowledge_base_list_params.py b/src/do_gradientai/types/knowledge_base_list_params.py
similarity index 100%
rename from src/gradientai/types/knowledge_base_list_params.py
rename to src/do_gradientai/types/knowledge_base_list_params.py
diff --git a/src/gradientai/types/knowledge_base_list_response.py b/src/do_gradientai/types/knowledge_base_list_response.py
similarity index 100%
rename from src/gradientai/types/knowledge_base_list_response.py
rename to src/do_gradientai/types/knowledge_base_list_response.py
diff --git a/src/gradientai/types/knowledge_base_retrieve_response.py b/src/do_gradientai/types/knowledge_base_retrieve_response.py
similarity index 100%
rename from src/gradientai/types/knowledge_base_retrieve_response.py
rename to src/do_gradientai/types/knowledge_base_retrieve_response.py
diff --git a/src/gradientai/types/knowledge_base_update_params.py b/src/do_gradientai/types/knowledge_base_update_params.py
similarity index 100%
rename from src/gradientai/types/knowledge_base_update_params.py
rename to src/do_gradientai/types/knowledge_base_update_params.py
diff --git a/src/gradientai/types/knowledge_base_update_response.py b/src/do_gradientai/types/knowledge_base_update_response.py
similarity index 100%
rename from src/gradientai/types/knowledge_base_update_response.py
rename to src/do_gradientai/types/knowledge_base_update_response.py
diff --git a/src/gradientai/types/knowledge_bases/__init__.py b/src/do_gradientai/types/knowledge_bases/__init__.py
similarity index 100%
rename from src/gradientai/types/knowledge_bases/__init__.py
rename to src/do_gradientai/types/knowledge_bases/__init__.py
diff --git a/src/gradientai/types/knowledge_bases/api_file_upload_data_source.py b/src/do_gradientai/types/knowledge_bases/api_file_upload_data_source.py
similarity index 100%
rename from src/gradientai/types/knowledge_bases/api_file_upload_data_source.py
rename to src/do_gradientai/types/knowledge_bases/api_file_upload_data_source.py
diff --git a/src/gradientai/types/knowledge_bases/api_file_upload_data_source_param.py b/src/do_gradientai/types/knowledge_bases/api_file_upload_data_source_param.py
similarity index 100%
rename from src/gradientai/types/knowledge_bases/api_file_upload_data_source_param.py
rename to src/do_gradientai/types/knowledge_bases/api_file_upload_data_source_param.py
diff --git a/src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py b/src/do_gradientai/types/knowledge_bases/api_knowledge_base_data_source.py
similarity index 100%
rename from src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py
rename to src/do_gradientai/types/knowledge_bases/api_knowledge_base_data_source.py
diff --git a/src/gradientai/types/knowledge_bases/api_spaces_data_source.py b/src/do_gradientai/types/knowledge_bases/api_spaces_data_source.py
similarity index 100%
rename from src/gradientai/types/knowledge_bases/api_spaces_data_source.py
rename to src/do_gradientai/types/knowledge_bases/api_spaces_data_source.py
diff --git a/src/gradientai/types/knowledge_bases/api_spaces_data_source_param.py b/src/do_gradientai/types/knowledge_bases/api_spaces_data_source_param.py
similarity index 100%
rename from src/gradientai/types/knowledge_bases/api_spaces_data_source_param.py
rename to src/do_gradientai/types/knowledge_bases/api_spaces_data_source_param.py
diff --git a/src/gradientai/types/knowledge_bases/api_web_crawler_data_source.py b/src/do_gradientai/types/knowledge_bases/api_web_crawler_data_source.py
similarity index 100%
rename from src/gradientai/types/knowledge_bases/api_web_crawler_data_source.py
rename to src/do_gradientai/types/knowledge_bases/api_web_crawler_data_source.py
diff --git a/src/gradientai/types/knowledge_bases/api_web_crawler_data_source_param.py b/src/do_gradientai/types/knowledge_bases/api_web_crawler_data_source_param.py
similarity index 100%
rename from src/gradientai/types/knowledge_bases/api_web_crawler_data_source_param.py
rename to src/do_gradientai/types/knowledge_bases/api_web_crawler_data_source_param.py
diff --git a/src/gradientai/types/knowledge_bases/aws_data_source_param.py b/src/do_gradientai/types/knowledge_bases/aws_data_source_param.py
similarity index 100%
rename from src/gradientai/types/knowledge_bases/aws_data_source_param.py
rename to src/do_gradientai/types/knowledge_bases/aws_data_source_param.py
diff --git a/src/gradientai/types/knowledge_bases/data_source_create_params.py b/src/do_gradientai/types/knowledge_bases/data_source_create_params.py
similarity index 100%
rename from src/gradientai/types/knowledge_bases/data_source_create_params.py
rename to src/do_gradientai/types/knowledge_bases/data_source_create_params.py
diff --git a/src/gradientai/types/knowledge_bases/data_source_create_response.py b/src/do_gradientai/types/knowledge_bases/data_source_create_response.py
similarity index 100%
rename from src/gradientai/types/knowledge_bases/data_source_create_response.py
rename to src/do_gradientai/types/knowledge_bases/data_source_create_response.py
diff --git a/src/gradientai/types/knowledge_bases/data_source_delete_response.py b/src/do_gradientai/types/knowledge_bases/data_source_delete_response.py
similarity index 100%
rename from src/gradientai/types/knowledge_bases/data_source_delete_response.py
rename to src/do_gradientai/types/knowledge_bases/data_source_delete_response.py
diff --git a/src/gradientai/types/knowledge_bases/data_source_list_params.py b/src/do_gradientai/types/knowledge_bases/data_source_list_params.py
similarity index 100%
rename from src/gradientai/types/knowledge_bases/data_source_list_params.py
rename to src/do_gradientai/types/knowledge_bases/data_source_list_params.py
diff --git a/src/gradientai/types/knowledge_bases/data_source_list_response.py b/src/do_gradientai/types/knowledge_bases/data_source_list_response.py
similarity index 100%
rename from src/gradientai/types/knowledge_bases/data_source_list_response.py
rename to src/do_gradientai/types/knowledge_bases/data_source_list_response.py
diff --git a/src/gradientai/types/model_list_params.py b/src/do_gradientai/types/model_list_params.py
similarity index 100%
rename from src/gradientai/types/model_list_params.py
rename to src/do_gradientai/types/model_list_params.py
diff --git a/src/gradientai/types/model_list_response.py b/src/do_gradientai/types/model_list_response.py
similarity index 100%
rename from src/gradientai/types/model_list_response.py
rename to src/do_gradientai/types/model_list_response.py
diff --git a/src/gradientai/types/providers/__init__.py b/src/do_gradientai/types/providers/__init__.py
similarity index 100%
rename from src/gradientai/types/providers/__init__.py
rename to src/do_gradientai/types/providers/__init__.py
diff --git a/src/gradientai/types/providers/anthropic/__init__.py b/src/do_gradientai/types/providers/anthropic/__init__.py
similarity index 100%
rename from src/gradientai/types/providers/anthropic/__init__.py
rename to src/do_gradientai/types/providers/anthropic/__init__.py
diff --git a/src/gradientai/types/providers/anthropic/key_create_params.py b/src/do_gradientai/types/providers/anthropic/key_create_params.py
similarity index 100%
rename from src/gradientai/types/providers/anthropic/key_create_params.py
rename to src/do_gradientai/types/providers/anthropic/key_create_params.py
diff --git a/src/gradientai/types/providers/anthropic/key_create_response.py b/src/do_gradientai/types/providers/anthropic/key_create_response.py
similarity index 100%
rename from src/gradientai/types/providers/anthropic/key_create_response.py
rename to src/do_gradientai/types/providers/anthropic/key_create_response.py
diff --git a/src/gradientai/types/providers/anthropic/key_delete_response.py b/src/do_gradientai/types/providers/anthropic/key_delete_response.py
similarity index 100%
rename from src/gradientai/types/providers/anthropic/key_delete_response.py
rename to src/do_gradientai/types/providers/anthropic/key_delete_response.py
diff --git a/src/gradientai/types/providers/anthropic/key_list_agents_params.py b/src/do_gradientai/types/providers/anthropic/key_list_agents_params.py
similarity index 100%
rename from src/gradientai/types/providers/anthropic/key_list_agents_params.py
rename to src/do_gradientai/types/providers/anthropic/key_list_agents_params.py
diff --git a/src/gradientai/types/providers/anthropic/key_list_agents_response.py b/src/do_gradientai/types/providers/anthropic/key_list_agents_response.py
similarity index 100%
rename from src/gradientai/types/providers/anthropic/key_list_agents_response.py
rename to src/do_gradientai/types/providers/anthropic/key_list_agents_response.py
diff --git a/src/gradientai/types/providers/anthropic/key_list_params.py b/src/do_gradientai/types/providers/anthropic/key_list_params.py
similarity index 100%
rename from src/gradientai/types/providers/anthropic/key_list_params.py
rename to src/do_gradientai/types/providers/anthropic/key_list_params.py
diff --git a/src/gradientai/types/providers/anthropic/key_list_response.py b/src/do_gradientai/types/providers/anthropic/key_list_response.py
similarity index 100%
rename from src/gradientai/types/providers/anthropic/key_list_response.py
rename to src/do_gradientai/types/providers/anthropic/key_list_response.py
diff --git a/src/gradientai/types/providers/anthropic/key_retrieve_response.py b/src/do_gradientai/types/providers/anthropic/key_retrieve_response.py
similarity index 100%
rename from src/gradientai/types/providers/anthropic/key_retrieve_response.py
rename to src/do_gradientai/types/providers/anthropic/key_retrieve_response.py
diff --git a/src/gradientai/types/providers/anthropic/key_update_params.py b/src/do_gradientai/types/providers/anthropic/key_update_params.py
similarity index 100%
rename from src/gradientai/types/providers/anthropic/key_update_params.py
rename to src/do_gradientai/types/providers/anthropic/key_update_params.py
diff --git a/src/gradientai/types/providers/anthropic/key_update_response.py b/src/do_gradientai/types/providers/anthropic/key_update_response.py
similarity index 100%
rename from src/gradientai/types/providers/anthropic/key_update_response.py
rename to src/do_gradientai/types/providers/anthropic/key_update_response.py
diff --git a/src/gradientai/types/providers/openai/__init__.py b/src/do_gradientai/types/providers/openai/__init__.py
similarity index 100%
rename from src/gradientai/types/providers/openai/__init__.py
rename to src/do_gradientai/types/providers/openai/__init__.py
diff --git a/src/gradientai/types/providers/openai/key_create_params.py b/src/do_gradientai/types/providers/openai/key_create_params.py
similarity index 100%
rename from src/gradientai/types/providers/openai/key_create_params.py
rename to src/do_gradientai/types/providers/openai/key_create_params.py
diff --git a/src/gradientai/types/providers/openai/key_create_response.py b/src/do_gradientai/types/providers/openai/key_create_response.py
similarity index 100%
rename from src/gradientai/types/providers/openai/key_create_response.py
rename to src/do_gradientai/types/providers/openai/key_create_response.py
diff --git a/src/gradientai/types/providers/openai/key_delete_response.py b/src/do_gradientai/types/providers/openai/key_delete_response.py
similarity index 100%
rename from src/gradientai/types/providers/openai/key_delete_response.py
rename to src/do_gradientai/types/providers/openai/key_delete_response.py
diff --git a/src/gradientai/types/providers/openai/key_list_params.py b/src/do_gradientai/types/providers/openai/key_list_params.py
similarity index 100%
rename from src/gradientai/types/providers/openai/key_list_params.py
rename to src/do_gradientai/types/providers/openai/key_list_params.py
diff --git a/src/gradientai/types/providers/openai/key_list_response.py b/src/do_gradientai/types/providers/openai/key_list_response.py
similarity index 100%
rename from src/gradientai/types/providers/openai/key_list_response.py
rename to src/do_gradientai/types/providers/openai/key_list_response.py
diff --git a/src/gradientai/types/providers/openai/key_retrieve_agents_params.py b/src/do_gradientai/types/providers/openai/key_retrieve_agents_params.py
similarity index 100%
rename from src/gradientai/types/providers/openai/key_retrieve_agents_params.py
rename to src/do_gradientai/types/providers/openai/key_retrieve_agents_params.py
diff --git a/src/gradientai/types/providers/openai/key_retrieve_agents_response.py b/src/do_gradientai/types/providers/openai/key_retrieve_agents_response.py
similarity index 100%
rename from src/gradientai/types/providers/openai/key_retrieve_agents_response.py
rename to src/do_gradientai/types/providers/openai/key_retrieve_agents_response.py
diff --git a/src/gradientai/types/providers/openai/key_retrieve_response.py b/src/do_gradientai/types/providers/openai/key_retrieve_response.py
similarity index 100%
rename from src/gradientai/types/providers/openai/key_retrieve_response.py
rename to src/do_gradientai/types/providers/openai/key_retrieve_response.py
diff --git a/src/gradientai/types/providers/openai/key_update_params.py b/src/do_gradientai/types/providers/openai/key_update_params.py
similarity index 100%
rename from src/gradientai/types/providers/openai/key_update_params.py
rename to src/do_gradientai/types/providers/openai/key_update_params.py
diff --git a/src/gradientai/types/providers/openai/key_update_response.py b/src/do_gradientai/types/providers/openai/key_update_response.py
similarity index 100%
rename from src/gradientai/types/providers/openai/key_update_response.py
rename to src/do_gradientai/types/providers/openai/key_update_response.py
diff --git a/src/gradientai/types/region_list_evaluation_metrics_response.py b/src/do_gradientai/types/region_list_evaluation_metrics_response.py
similarity index 100%
rename from src/gradientai/types/region_list_evaluation_metrics_response.py
rename to src/do_gradientai/types/region_list_evaluation_metrics_response.py
diff --git a/src/gradientai/types/region_list_params.py b/src/do_gradientai/types/region_list_params.py
similarity index 100%
rename from src/gradientai/types/region_list_params.py
rename to src/do_gradientai/types/region_list_params.py
diff --git a/src/gradientai/types/region_list_response.py b/src/do_gradientai/types/region_list_response.py
similarity index 100%
rename from src/gradientai/types/region_list_response.py
rename to src/do_gradientai/types/region_list_response.py
diff --git a/src/gradientai/types/regions/__init__.py b/src/do_gradientai/types/regions/__init__.py
similarity index 100%
rename from src/gradientai/types/regions/__init__.py
rename to src/do_gradientai/types/regions/__init__.py
diff --git a/src/gradientai/types/regions/api_evaluation_test_case.py b/src/do_gradientai/types/regions/api_evaluation_test_case.py
similarity index 100%
rename from src/gradientai/types/regions/api_evaluation_test_case.py
rename to src/do_gradientai/types/regions/api_evaluation_test_case.py
diff --git a/src/gradientai/types/regions/api_star_metric.py b/src/do_gradientai/types/regions/api_star_metric.py
similarity index 100%
rename from src/gradientai/types/regions/api_star_metric.py
rename to src/do_gradientai/types/regions/api_star_metric.py
diff --git a/src/gradientai/types/regions/api_star_metric_param.py b/src/do_gradientai/types/regions/api_star_metric_param.py
similarity index 100%
rename from src/gradientai/types/regions/api_star_metric_param.py
rename to src/do_gradientai/types/regions/api_star_metric_param.py
diff --git a/src/gradientai/types/regions/evaluation_dataset_create_file_upload_presigned_urls_params.py b/src/do_gradientai/types/regions/evaluation_dataset_create_file_upload_presigned_urls_params.py
similarity index 100%
rename from src/gradientai/types/regions/evaluation_dataset_create_file_upload_presigned_urls_params.py
rename to src/do_gradientai/types/regions/evaluation_dataset_create_file_upload_presigned_urls_params.py
diff --git a/src/gradientai/types/regions/evaluation_dataset_create_file_upload_presigned_urls_response.py b/src/do_gradientai/types/regions/evaluation_dataset_create_file_upload_presigned_urls_response.py
similarity index 100%
rename from src/gradientai/types/regions/evaluation_dataset_create_file_upload_presigned_urls_response.py
rename to src/do_gradientai/types/regions/evaluation_dataset_create_file_upload_presigned_urls_response.py
diff --git a/src/gradientai/types/regions/evaluation_dataset_create_params.py b/src/do_gradientai/types/regions/evaluation_dataset_create_params.py
similarity index 100%
rename from src/gradientai/types/regions/evaluation_dataset_create_params.py
rename to src/do_gradientai/types/regions/evaluation_dataset_create_params.py
diff --git a/src/gradientai/types/regions/evaluation_dataset_create_response.py b/src/do_gradientai/types/regions/evaluation_dataset_create_response.py
similarity index 100%
rename from src/gradientai/types/regions/evaluation_dataset_create_response.py
rename to src/do_gradientai/types/regions/evaluation_dataset_create_response.py
diff --git a/src/gradientai/types/regions/evaluation_run_create_params.py b/src/do_gradientai/types/regions/evaluation_run_create_params.py
similarity index 100%
rename from src/gradientai/types/regions/evaluation_run_create_params.py
rename to src/do_gradientai/types/regions/evaluation_run_create_params.py
diff --git a/src/gradientai/types/regions/evaluation_run_create_response.py b/src/do_gradientai/types/regions/evaluation_run_create_response.py
similarity index 100%
rename from src/gradientai/types/regions/evaluation_run_create_response.py
rename to src/do_gradientai/types/regions/evaluation_run_create_response.py
diff --git a/src/gradientai/types/regions/evaluation_run_retrieve_response.py b/src/do_gradientai/types/regions/evaluation_run_retrieve_response.py
similarity index 100%
rename from src/gradientai/types/regions/evaluation_run_retrieve_response.py
rename to src/do_gradientai/types/regions/evaluation_run_retrieve_response.py
diff --git a/src/gradientai/types/regions/evaluation_runs/__init__.py b/src/do_gradientai/types/regions/evaluation_runs/__init__.py
similarity index 100%
rename from src/gradientai/types/regions/evaluation_runs/__init__.py
rename to src/do_gradientai/types/regions/evaluation_runs/__init__.py
diff --git a/src/gradientai/types/regions/evaluation_runs/api_evaluation_metric_result.py b/src/do_gradientai/types/regions/evaluation_runs/api_evaluation_metric_result.py
similarity index 100%
rename from src/gradientai/types/regions/evaluation_runs/api_evaluation_metric_result.py
rename to src/do_gradientai/types/regions/evaluation_runs/api_evaluation_metric_result.py
diff --git a/src/gradientai/types/regions/evaluation_runs/api_evaluation_run.py b/src/do_gradientai/types/regions/evaluation_runs/api_evaluation_run.py
similarity index 100%
rename from src/gradientai/types/regions/evaluation_runs/api_evaluation_run.py
rename to src/do_gradientai/types/regions/evaluation_runs/api_evaluation_run.py
diff --git a/src/gradientai/types/regions/evaluation_runs/api_prompt.py b/src/do_gradientai/types/regions/evaluation_runs/api_prompt.py
similarity index 100%
rename from src/gradientai/types/regions/evaluation_runs/api_prompt.py
rename to src/do_gradientai/types/regions/evaluation_runs/api_prompt.py
diff --git a/src/gradientai/types/regions/evaluation_runs/result_retrieve_prompt_response.py b/src/do_gradientai/types/regions/evaluation_runs/result_retrieve_prompt_response.py
similarity index 100%
rename from src/gradientai/types/regions/evaluation_runs/result_retrieve_prompt_response.py
rename to src/do_gradientai/types/regions/evaluation_runs/result_retrieve_prompt_response.py
diff --git a/src/gradientai/types/regions/evaluation_runs/result_retrieve_response.py b/src/do_gradientai/types/regions/evaluation_runs/result_retrieve_response.py
similarity index 100%
rename from src/gradientai/types/regions/evaluation_runs/result_retrieve_response.py
rename to src/do_gradientai/types/regions/evaluation_runs/result_retrieve_response.py
diff --git a/src/gradientai/types/regions/evaluation_test_case_create_params.py b/src/do_gradientai/types/regions/evaluation_test_case_create_params.py
similarity index 100%
rename from src/gradientai/types/regions/evaluation_test_case_create_params.py
rename to src/do_gradientai/types/regions/evaluation_test_case_create_params.py
diff --git a/src/gradientai/types/regions/evaluation_test_case_create_response.py b/src/do_gradientai/types/regions/evaluation_test_case_create_response.py
similarity index 100%
rename from src/gradientai/types/regions/evaluation_test_case_create_response.py
rename to src/do_gradientai/types/regions/evaluation_test_case_create_response.py
diff --git a/src/gradientai/types/regions/evaluation_test_case_list_evaluation_runs_params.py b/src/do_gradientai/types/regions/evaluation_test_case_list_evaluation_runs_params.py
similarity index 100%
rename from src/gradientai/types/regions/evaluation_test_case_list_evaluation_runs_params.py
rename to src/do_gradientai/types/regions/evaluation_test_case_list_evaluation_runs_params.py
diff --git a/src/gradientai/types/regions/evaluation_test_case_list_evaluation_runs_response.py b/src/do_gradientai/types/regions/evaluation_test_case_list_evaluation_runs_response.py
similarity index 100%
rename from src/gradientai/types/regions/evaluation_test_case_list_evaluation_runs_response.py
rename to src/do_gradientai/types/regions/evaluation_test_case_list_evaluation_runs_response.py
diff --git a/src/gradientai/types/regions/evaluation_test_case_list_response.py b/src/do_gradientai/types/regions/evaluation_test_case_list_response.py
similarity index 100%
rename from src/gradientai/types/regions/evaluation_test_case_list_response.py
rename to src/do_gradientai/types/regions/evaluation_test_case_list_response.py
diff --git a/src/gradientai/types/regions/evaluation_test_case_retrieve_response.py b/src/do_gradientai/types/regions/evaluation_test_case_retrieve_response.py
similarity index 100%
rename from src/gradientai/types/regions/evaluation_test_case_retrieve_response.py
rename to src/do_gradientai/types/regions/evaluation_test_case_retrieve_response.py
diff --git a/src/gradientai/types/regions/evaluation_test_case_update_params.py b/src/do_gradientai/types/regions/evaluation_test_case_update_params.py
similarity index 100%
rename from src/gradientai/types/regions/evaluation_test_case_update_params.py
rename to src/do_gradientai/types/regions/evaluation_test_case_update_params.py
diff --git a/src/gradientai/types/regions/evaluation_test_case_update_response.py b/src/do_gradientai/types/regions/evaluation_test_case_update_response.py
similarity index 100%
rename from src/gradientai/types/regions/evaluation_test_case_update_response.py
rename to src/do_gradientai/types/regions/evaluation_test_case_update_response.py
diff --git a/tests/api_resources/agents/test_api_keys.py b/tests/api_resources/agents/test_api_keys.py
index beb9666a..65351922 100644
--- a/tests/api_resources/agents/test_api_keys.py
+++ b/tests/api_resources/agents/test_api_keys.py
@@ -7,9 +7,9 @@
import pytest
-from gradientai import GradientAI, AsyncGradientAI
from tests.utils import assert_matches_type
-from gradientai.types.agents import (
+from do_gradientai import GradientAI, AsyncGradientAI
+from do_gradientai.types.agents import (
APIKeyListResponse,
APIKeyCreateResponse,
APIKeyDeleteResponse,
diff --git a/tests/api_resources/agents/test_child_agents.py b/tests/api_resources/agents/test_child_agents.py
index daa7b10e..c5108463 100644
--- a/tests/api_resources/agents/test_child_agents.py
+++ b/tests/api_resources/agents/test_child_agents.py
@@ -7,9 +7,9 @@
import pytest
-from gradientai import GradientAI, AsyncGradientAI
from tests.utils import assert_matches_type
-from gradientai.types.agents import (
+from do_gradientai import GradientAI, AsyncGradientAI
+from do_gradientai.types.agents import (
ChildAgentAddResponse,
ChildAgentViewResponse,
ChildAgentDeleteResponse,
diff --git a/tests/api_resources/agents/test_functions.py b/tests/api_resources/agents/test_functions.py
index 5a3693cb..2c5ceaf7 100644
--- a/tests/api_resources/agents/test_functions.py
+++ b/tests/api_resources/agents/test_functions.py
@@ -7,9 +7,9 @@
import pytest
-from gradientai import GradientAI, AsyncGradientAI
from tests.utils import assert_matches_type
-from gradientai.types.agents import (
+from do_gradientai import GradientAI, AsyncGradientAI
+from do_gradientai.types.agents import (
FunctionCreateResponse,
FunctionDeleteResponse,
FunctionUpdateResponse,
diff --git a/tests/api_resources/agents/test_knowledge_bases.py b/tests/api_resources/agents/test_knowledge_bases.py
index e62c05ff..0a007840 100644
--- a/tests/api_resources/agents/test_knowledge_bases.py
+++ b/tests/api_resources/agents/test_knowledge_bases.py
@@ -7,9 +7,9 @@
import pytest
-from gradientai import GradientAI, AsyncGradientAI
from tests.utils import assert_matches_type
-from gradientai.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse
+from do_gradientai import GradientAI, AsyncGradientAI
+from do_gradientai.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
diff --git a/tests/api_resources/agents/test_versions.py b/tests/api_resources/agents/test_versions.py
index 79f73672..314cd2e2 100644
--- a/tests/api_resources/agents/test_versions.py
+++ b/tests/api_resources/agents/test_versions.py
@@ -7,9 +7,9 @@
import pytest
-from gradientai import GradientAI, AsyncGradientAI
from tests.utils import assert_matches_type
-from gradientai.types.agents import (
+from do_gradientai import GradientAI, AsyncGradientAI
+from do_gradientai.types.agents import (
VersionListResponse,
VersionUpdateResponse,
)
diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py
index b4c09579..62f24534 100644
--- a/tests/api_resources/chat/test_completions.py
+++ b/tests/api_resources/chat/test_completions.py
@@ -7,9 +7,9 @@
import pytest
-from gradientai import GradientAI, AsyncGradientAI
from tests.utils import assert_matches_type
-from gradientai.types.chat import CompletionCreateResponse
+from do_gradientai import GradientAI, AsyncGradientAI
+from do_gradientai.types.chat import CompletionCreateResponse
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
diff --git a/tests/api_resources/inference/test_api_keys.py b/tests/api_resources/inference/test_api_keys.py
index 90bf95b9..c48a5420 100644
--- a/tests/api_resources/inference/test_api_keys.py
+++ b/tests/api_resources/inference/test_api_keys.py
@@ -7,9 +7,9 @@
import pytest
-from gradientai import GradientAI, AsyncGradientAI
from tests.utils import assert_matches_type
-from gradientai.types.inference import (
+from do_gradientai import GradientAI, AsyncGradientAI
+from do_gradientai.types.inference import (
APIKeyListResponse,
APIKeyCreateResponse,
APIKeyDeleteResponse,
diff --git a/tests/api_resources/inference/test_models.py b/tests/api_resources/inference/test_models.py
index 569345ed..e930d83f 100644
--- a/tests/api_resources/inference/test_models.py
+++ b/tests/api_resources/inference/test_models.py
@@ -7,9 +7,9 @@
import pytest
-from gradientai import GradientAI, AsyncGradientAI
from tests.utils import assert_matches_type
-from gradientai.types.inference import Model, ModelListResponse
+from do_gradientai import GradientAI, AsyncGradientAI
+from do_gradientai.types.inference import Model, ModelListResponse
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
diff --git a/tests/api_resources/knowledge_bases/test_data_sources.py b/tests/api_resources/knowledge_bases/test_data_sources.py
index 9c466e2f..15665a84 100644
--- a/tests/api_resources/knowledge_bases/test_data_sources.py
+++ b/tests/api_resources/knowledge_bases/test_data_sources.py
@@ -7,9 +7,9 @@
import pytest
-from gradientai import GradientAI, AsyncGradientAI
from tests.utils import assert_matches_type
-from gradientai.types.knowledge_bases import (
+from do_gradientai import GradientAI, AsyncGradientAI
+from do_gradientai.types.knowledge_bases import (
DataSourceListResponse,
DataSourceCreateResponse,
DataSourceDeleteResponse,
diff --git a/tests/api_resources/providers/anthropic/test_keys.py b/tests/api_resources/providers/anthropic/test_keys.py
index 86ec19f4..7aa595f7 100644
--- a/tests/api_resources/providers/anthropic/test_keys.py
+++ b/tests/api_resources/providers/anthropic/test_keys.py
@@ -7,9 +7,9 @@
import pytest
-from gradientai import GradientAI, AsyncGradientAI
from tests.utils import assert_matches_type
-from gradientai.types.providers.anthropic import (
+from do_gradientai import GradientAI, AsyncGradientAI
+from do_gradientai.types.providers.anthropic import (
KeyListResponse,
KeyCreateResponse,
KeyDeleteResponse,
diff --git a/tests/api_resources/providers/openai/test_keys.py b/tests/api_resources/providers/openai/test_keys.py
index ce5cb4f5..714dc4bd 100644
--- a/tests/api_resources/providers/openai/test_keys.py
+++ b/tests/api_resources/providers/openai/test_keys.py
@@ -7,9 +7,9 @@
import pytest
-from gradientai import GradientAI, AsyncGradientAI
from tests.utils import assert_matches_type
-from gradientai.types.providers.openai import (
+from do_gradientai import GradientAI, AsyncGradientAI
+from do_gradientai.types.providers.openai import (
KeyListResponse,
KeyCreateResponse,
KeyDeleteResponse,
diff --git a/tests/api_resources/regions/evaluation_runs/test_results.py b/tests/api_resources/regions/evaluation_runs/test_results.py
index 29deb8b2..e4b906bd 100644
--- a/tests/api_resources/regions/evaluation_runs/test_results.py
+++ b/tests/api_resources/regions/evaluation_runs/test_results.py
@@ -7,9 +7,9 @@
import pytest
-from gradientai import GradientAI, AsyncGradientAI
from tests.utils import assert_matches_type
-from gradientai.types.regions.evaluation_runs import ResultRetrieveResponse, ResultRetrievePromptResponse
+from do_gradientai import GradientAI, AsyncGradientAI
+from do_gradientai.types.regions.evaluation_runs import ResultRetrieveResponse, ResultRetrievePromptResponse
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
diff --git a/tests/api_resources/regions/test_evaluation_datasets.py b/tests/api_resources/regions/test_evaluation_datasets.py
index 3e3da0fe..6e7a5e52 100644
--- a/tests/api_resources/regions/test_evaluation_datasets.py
+++ b/tests/api_resources/regions/test_evaluation_datasets.py
@@ -7,9 +7,9 @@
import pytest
-from gradientai import GradientAI, AsyncGradientAI
from tests.utils import assert_matches_type
-from gradientai.types.regions import (
+from do_gradientai import GradientAI, AsyncGradientAI
+from do_gradientai.types.regions import (
EvaluationDatasetCreateResponse,
EvaluationDatasetCreateFileUploadPresignedURLsResponse,
)
diff --git a/tests/api_resources/regions/test_evaluation_runs.py b/tests/api_resources/regions/test_evaluation_runs.py
index b2d3c634..09bf8525 100644
--- a/tests/api_resources/regions/test_evaluation_runs.py
+++ b/tests/api_resources/regions/test_evaluation_runs.py
@@ -7,9 +7,9 @@
import pytest
-from gradientai import GradientAI, AsyncGradientAI
from tests.utils import assert_matches_type
-from gradientai.types.regions import (
+from do_gradientai import GradientAI, AsyncGradientAI
+from do_gradientai.types.regions import (
EvaluationRunCreateResponse,
EvaluationRunRetrieveResponse,
)
diff --git a/tests/api_resources/regions/test_evaluation_test_cases.py b/tests/api_resources/regions/test_evaluation_test_cases.py
index a01ace90..7cc18835 100644
--- a/tests/api_resources/regions/test_evaluation_test_cases.py
+++ b/tests/api_resources/regions/test_evaluation_test_cases.py
@@ -7,9 +7,9 @@
import pytest
-from gradientai import GradientAI, AsyncGradientAI
from tests.utils import assert_matches_type
-from gradientai.types.regions import (
+from do_gradientai import GradientAI, AsyncGradientAI
+from do_gradientai.types.regions import (
EvaluationTestCaseListResponse,
EvaluationTestCaseCreateResponse,
EvaluationTestCaseUpdateResponse,
diff --git a/tests/api_resources/test_agents.py b/tests/api_resources/test_agents.py
index 2cc0e080..74c8cdab 100644
--- a/tests/api_resources/test_agents.py
+++ b/tests/api_resources/test_agents.py
@@ -7,9 +7,9 @@
import pytest
-from gradientai import GradientAI, AsyncGradientAI
from tests.utils import assert_matches_type
-from gradientai.types import (
+from do_gradientai import GradientAI, AsyncGradientAI
+from do_gradientai.types import (
AgentListResponse,
AgentCreateResponse,
AgentDeleteResponse,
diff --git a/tests/api_resources/test_indexing_jobs.py b/tests/api_resources/test_indexing_jobs.py
index 6a50d9b5..41ba0f8c 100644
--- a/tests/api_resources/test_indexing_jobs.py
+++ b/tests/api_resources/test_indexing_jobs.py
@@ -7,9 +7,9 @@
import pytest
-from gradientai import GradientAI, AsyncGradientAI
from tests.utils import assert_matches_type
-from gradientai.types import (
+from do_gradientai import GradientAI, AsyncGradientAI
+from do_gradientai.types import (
IndexingJobListResponse,
IndexingJobCreateResponse,
IndexingJobRetrieveResponse,
diff --git a/tests/api_resources/test_knowledge_bases.py b/tests/api_resources/test_knowledge_bases.py
index 508820ce..2132cd50 100644
--- a/tests/api_resources/test_knowledge_bases.py
+++ b/tests/api_resources/test_knowledge_bases.py
@@ -7,9 +7,9 @@
import pytest
-from gradientai import GradientAI, AsyncGradientAI
from tests.utils import assert_matches_type
-from gradientai.types import (
+from do_gradientai import GradientAI, AsyncGradientAI
+from do_gradientai.types import (
KnowledgeBaseListResponse,
KnowledgeBaseCreateResponse,
KnowledgeBaseDeleteResponse,
diff --git a/tests/api_resources/test_models.py b/tests/api_resources/test_models.py
index 5e119f71..f7e21015 100644
--- a/tests/api_resources/test_models.py
+++ b/tests/api_resources/test_models.py
@@ -7,9 +7,9 @@
import pytest
-from gradientai import GradientAI, AsyncGradientAI
from tests.utils import assert_matches_type
-from gradientai.types import ModelListResponse
+from do_gradientai import GradientAI, AsyncGradientAI
+from do_gradientai.types import ModelListResponse
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
diff --git a/tests/api_resources/test_regions.py b/tests/api_resources/test_regions.py
index 9cb24b0a..bf51ef96 100644
--- a/tests/api_resources/test_regions.py
+++ b/tests/api_resources/test_regions.py
@@ -7,9 +7,9 @@
import pytest
-from gradientai import GradientAI, AsyncGradientAI
from tests.utils import assert_matches_type
-from gradientai.types import RegionListResponse, RegionListEvaluationMetricsResponse
+from do_gradientai import GradientAI, AsyncGradientAI
+from do_gradientai.types import RegionListResponse, RegionListEvaluationMetricsResponse
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
diff --git a/tests/conftest.py b/tests/conftest.py
index 23079a7e..daa5b955 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -10,15 +10,15 @@
import pytest
from pytest_asyncio import is_async_test
-from gradientai import GradientAI, AsyncGradientAI, DefaultAioHttpClient
-from gradientai._utils import is_dict
+from do_gradientai import GradientAI, AsyncGradientAI, DefaultAioHttpClient
+from do_gradientai._utils import is_dict
if TYPE_CHECKING:
from _pytest.fixtures import FixtureRequest # pyright: ignore[reportPrivateImportUsage]
pytest.register_assert_rewrite("tests.utils")
-logging.getLogger("gradientai").setLevel(logging.DEBUG)
+logging.getLogger("do_gradientai").setLevel(logging.DEBUG)
# automatically add `pytest.mark.asyncio()` to all of our async tests
diff --git a/tests/test_client.py b/tests/test_client.py
index f19a5edb..4d20a597 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -21,11 +21,11 @@
from respx import MockRouter
from pydantic import ValidationError
-from gradientai import GradientAI, AsyncGradientAI, APIResponseValidationError
-from gradientai._types import Omit
-from gradientai._models import BaseModel, FinalRequestOptions
-from gradientai._exceptions import APIStatusError, APITimeoutError, GradientAIError, APIResponseValidationError
-from gradientai._base_client import (
+from do_gradientai import GradientAI, AsyncGradientAI, APIResponseValidationError
+from do_gradientai._types import Omit
+from do_gradientai._models import BaseModel, FinalRequestOptions
+from do_gradientai._exceptions import APIStatusError, APITimeoutError, GradientAIError, APIResponseValidationError
+from do_gradientai._base_client import (
DEFAULT_TIMEOUT,
HTTPX_DEFAULT_TIMEOUT,
BaseClient,
@@ -231,10 +231,10 @@ def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.Statistic
# to_raw_response_wrapper leaks through the @functools.wraps() decorator.
#
# removing the decorator fixes the leak for reasons we don't understand.
- "gradientai/_legacy_response.py",
- "gradientai/_response.py",
+ "do_gradientai/_legacy_response.py",
+ "do_gradientai/_response.py",
# pydantic.BaseModel.model_dump || pydantic.BaseModel.dict leak memory for some reason.
- "gradientai/_compat.py",
+ "do_gradientai/_compat.py",
# Standard library leaks we don't care about.
"/logging/__init__.py",
]
@@ -718,7 +718,7 @@ def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str
calculated = client._calculate_retry_timeout(remaining_retries, options, headers)
assert calculated == pytest.approx(timeout, 0.5 * 0.875) # pyright: ignore[reportUnknownMemberType]
- @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
+ @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, client: GradientAI) -> None:
respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(side_effect=httpx.TimeoutException("Test timeout error"))
@@ -728,7 +728,7 @@ def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, clien
assert _get_open_connections(self.client) == 0
- @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
+ @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, client: GradientAI) -> None:
respx_mock.get("/v2/gen-ai/agents/uuid/versions").mock(return_value=httpx.Response(500))
@@ -738,7 +738,7 @@ def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, client
assert _get_open_connections(self.client) == 0
@pytest.mark.parametrize("failures_before_success", [0, 2, 4])
- @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
+ @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
@pytest.mark.parametrize("failure_mode", ["status", "exception"])
def test_retries_taken(
@@ -769,7 +769,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success
@pytest.mark.parametrize("failures_before_success", [0, 2, 4])
- @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
+ @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
def test_omit_retry_count_header(
self, client: GradientAI, failures_before_success: int, respx_mock: MockRouter
@@ -794,7 +794,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0
@pytest.mark.parametrize("failures_before_success", [0, 2, 4])
- @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
+ @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
def test_overwrite_retry_count_header(
self, client: GradientAI, failures_before_success: int, respx_mock: MockRouter
@@ -1043,10 +1043,10 @@ def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.Statistic
# to_raw_response_wrapper leaks through the @functools.wraps() decorator.
#
# removing the decorator fixes the leak for reasons we don't understand.
- "gradientai/_legacy_response.py",
- "gradientai/_response.py",
+ "do_gradientai/_legacy_response.py",
+ "do_gradientai/_response.py",
# pydantic.BaseModel.model_dump || pydantic.BaseModel.dict leak memory for some reason.
- "gradientai/_compat.py",
+ "do_gradientai/_compat.py",
# Standard library leaks we don't care about.
"/logging/__init__.py",
]
@@ -1536,7 +1536,7 @@ async def test_parse_retry_after_header(self, remaining_retries: int, retry_afte
calculated = client._calculate_retry_timeout(remaining_retries, options, headers)
assert calculated == pytest.approx(timeout, 0.5 * 0.875) # pyright: ignore[reportUnknownMemberType]
- @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
+ @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
async def test_retrying_timeout_errors_doesnt_leak(
self, respx_mock: MockRouter, async_client: AsyncGradientAI
@@ -1548,7 +1548,7 @@ async def test_retrying_timeout_errors_doesnt_leak(
assert _get_open_connections(self.client) == 0
- @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
+ @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
async def test_retrying_status_errors_doesnt_leak(
self, respx_mock: MockRouter, async_client: AsyncGradientAI
@@ -1560,7 +1560,7 @@ async def test_retrying_status_errors_doesnt_leak(
assert _get_open_connections(self.client) == 0
@pytest.mark.parametrize("failures_before_success", [0, 2, 4])
- @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
+ @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
@pytest.mark.asyncio
@pytest.mark.parametrize("failure_mode", ["status", "exception"])
@@ -1592,7 +1592,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success
@pytest.mark.parametrize("failures_before_success", [0, 2, 4])
- @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
+ @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
@pytest.mark.asyncio
async def test_omit_retry_count_header(
@@ -1618,7 +1618,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0
@pytest.mark.parametrize("failures_before_success", [0, 2, 4])
- @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
+ @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
@pytest.mark.asyncio
async def test_overwrite_retry_count_header(
@@ -1654,8 +1654,8 @@ def test_get_platform(self) -> None:
import nest_asyncio
import threading
- from gradientai._utils import asyncify
- from gradientai._base_client import get_platform
+ from do_gradientai._utils import asyncify
+ from do_gradientai._base_client import get_platform
async def test_main() -> None:
result = await asyncify(get_platform)()
diff --git a/tests/test_deepcopy.py b/tests/test_deepcopy.py
index 9d1579a8..5a98ce1b 100644
--- a/tests/test_deepcopy.py
+++ b/tests/test_deepcopy.py
@@ -1,4 +1,4 @@
-from gradientai._utils import deepcopy_minimal
+from do_gradientai._utils import deepcopy_minimal
def assert_different_identities(obj1: object, obj2: object) -> None:
diff --git a/tests/test_extract_files.py b/tests/test_extract_files.py
index 2905d59c..341e65ae 100644
--- a/tests/test_extract_files.py
+++ b/tests/test_extract_files.py
@@ -4,8 +4,8 @@
import pytest
-from gradientai._types import FileTypes
-from gradientai._utils import extract_files
+from do_gradientai._types import FileTypes
+from do_gradientai._utils import extract_files
def test_removes_files_from_input() -> None:
diff --git a/tests/test_files.py b/tests/test_files.py
index 4a723313..ff7914bb 100644
--- a/tests/test_files.py
+++ b/tests/test_files.py
@@ -4,7 +4,7 @@
import pytest
from dirty_equals import IsDict, IsList, IsBytes, IsTuple
-from gradientai._files import to_httpx_files, async_to_httpx_files
+from do_gradientai._files import to_httpx_files, async_to_httpx_files
readme_path = Path(__file__).parent.parent.joinpath("README.md")
diff --git a/tests/test_models.py b/tests/test_models.py
index 28aff1f3..575dc3af 100644
--- a/tests/test_models.py
+++ b/tests/test_models.py
@@ -7,9 +7,9 @@
import pydantic
from pydantic import Field
-from gradientai._utils import PropertyInfo
-from gradientai._compat import PYDANTIC_V2, parse_obj, model_dump, model_json
-from gradientai._models import BaseModel, construct_type
+from do_gradientai._utils import PropertyInfo
+from do_gradientai._compat import PYDANTIC_V2, parse_obj, model_dump, model_json
+from do_gradientai._models import BaseModel, construct_type
class BasicModel(BaseModel):
diff --git a/tests/test_qs.py b/tests/test_qs.py
index 9080377b..c9213571 100644
--- a/tests/test_qs.py
+++ b/tests/test_qs.py
@@ -4,7 +4,7 @@
import pytest
-from gradientai._qs import Querystring, stringify
+from do_gradientai._qs import Querystring, stringify
def test_empty() -> None:
diff --git a/tests/test_required_args.py b/tests/test_required_args.py
index c4e6b9d8..434e9491 100644
--- a/tests/test_required_args.py
+++ b/tests/test_required_args.py
@@ -2,7 +2,7 @@
import pytest
-from gradientai._utils import required_args
+from do_gradientai._utils import required_args
def test_too_many_positional_params() -> None:
diff --git a/tests/test_response.py b/tests/test_response.py
index 1a8f241e..001ce776 100644
--- a/tests/test_response.py
+++ b/tests/test_response.py
@@ -6,8 +6,8 @@
import pytest
import pydantic
-from gradientai import BaseModel, GradientAI, AsyncGradientAI
-from gradientai._response import (
+from do_gradientai import BaseModel, GradientAI, AsyncGradientAI
+from do_gradientai._response import (
APIResponse,
BaseAPIResponse,
AsyncAPIResponse,
@@ -15,8 +15,8 @@
AsyncBinaryAPIResponse,
extract_response_type,
)
-from gradientai._streaming import Stream
-from gradientai._base_client import FinalRequestOptions
+from do_gradientai._streaming import Stream
+from do_gradientai._base_client import FinalRequestOptions
class ConcreteBaseAPIResponse(APIResponse[bytes]): ...
@@ -37,7 +37,7 @@ def test_extract_response_type_direct_classes() -> None:
def test_extract_response_type_direct_class_missing_type_arg() -> None:
with pytest.raises(
RuntimeError,
- match="Expected type to have a type argument at index 0 but it did not",
+ match="Expected type to have a type argument at index 0 but it did not",
):
extract_response_type(AsyncAPIResponse)
@@ -68,7 +68,7 @@ def test_response_parse_mismatched_basemodel(client: GradientAI) -> None:
with pytest.raises(
TypeError,
- match="Pydantic models must subclass our base model type, e.g. `from gradientai import BaseModel`",
+ match="Pydantic models must subclass our base model type, e.g. `from do_gradientai import BaseModel`",
):
response.parse(to=PydanticModel)
@@ -86,7 +86,7 @@ async def test_async_response_parse_mismatched_basemodel(async_client: AsyncGrad
with pytest.raises(
TypeError,
- match="Pydantic models must subclass our base model type, e.g. `from gradientai import BaseModel`",
+ match="Pydantic models must subclass our base model type, e.g. `from do_gradientai import BaseModel`",
):
await response.parse(to=PydanticModel)
diff --git a/tests/test_streaming.py b/tests/test_streaming.py
index cdb41a77..c1ce8e85 100644
--- a/tests/test_streaming.py
+++ b/tests/test_streaming.py
@@ -5,8 +5,8 @@
import httpx
import pytest
-from gradientai import GradientAI, AsyncGradientAI
-from gradientai._streaming import Stream, AsyncStream, ServerSentEvent
+from do_gradientai import GradientAI, AsyncGradientAI
+from do_gradientai._streaming import Stream, AsyncStream, ServerSentEvent
@pytest.mark.asyncio
diff --git a/tests/test_transform.py b/tests/test_transform.py
index 825fe048..30c06d6a 100644
--- a/tests/test_transform.py
+++ b/tests/test_transform.py
@@ -8,15 +8,15 @@
import pytest
-from gradientai._types import NOT_GIVEN, Base64FileInput
-from gradientai._utils import (
+from do_gradientai._types import NOT_GIVEN, Base64FileInput
+from do_gradientai._utils import (
PropertyInfo,
transform as _transform,
parse_datetime,
async_transform as _async_transform,
)
-from gradientai._compat import PYDANTIC_V2
-from gradientai._models import BaseModel
+from do_gradientai._compat import PYDANTIC_V2
+from do_gradientai._models import BaseModel
_T = TypeVar("_T")
diff --git a/tests/test_utils/test_proxy.py b/tests/test_utils/test_proxy.py
index 3856b2c9..9ce2e0d3 100644
--- a/tests/test_utils/test_proxy.py
+++ b/tests/test_utils/test_proxy.py
@@ -2,7 +2,7 @@
from typing import Any
from typing_extensions import override
-from gradientai._utils import LazyProxy
+from do_gradientai._utils import LazyProxy
class RecursiveLazyProxy(LazyProxy[Any]):
diff --git a/tests/test_utils/test_typing.py b/tests/test_utils/test_typing.py
index 66ad064f..c9129fdc 100644
--- a/tests/test_utils/test_typing.py
+++ b/tests/test_utils/test_typing.py
@@ -2,7 +2,7 @@
from typing import Generic, TypeVar, cast
-from gradientai._utils import extract_type_var_from_base
+from do_gradientai._utils import extract_type_var_from_base
_T = TypeVar("_T")
_T2 = TypeVar("_T2")
diff --git a/tests/utils.py b/tests/utils.py
index b539ed2c..9def7c60 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -8,8 +8,8 @@
from datetime import date, datetime
from typing_extensions import Literal, get_args, get_origin, assert_type
-from gradientai._types import Omit, NoneType
-from gradientai._utils import (
+from do_gradientai._types import Omit, NoneType
+from do_gradientai._utils import (
is_dict,
is_list,
is_list_type,
@@ -18,8 +18,8 @@
is_annotated_type,
is_type_alias_type,
)
-from gradientai._compat import PYDANTIC_V2, field_outer_type, get_model_fields
-from gradientai._models import BaseModel
+from do_gradientai._compat import PYDANTIC_V2, field_outer_type, get_model_fields
+from do_gradientai._models import BaseModel
BaseModelT = TypeVar("BaseModelT", bound=BaseModel)
From 58d7319ce68c639c2151a3e96a5d522ec06ff96f Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 25 Jun 2025 02:34:55 +0000
Subject: [PATCH 02/21] chore(internal): codegen related update
---
tests/test_client.py | 2 ++
1 file changed, 2 insertions(+)
diff --git a/tests/test_client.py b/tests/test_client.py
index 4d20a597..920275ae 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -191,6 +191,7 @@ def test_copy_signature(self) -> None:
copy_param = copy_signature.parameters.get(name)
assert copy_param is not None, f"copy() signature is missing the {name} param"
+ @pytest.mark.skipif(sys.version_info >= (3, 10), reason="fails because of a memory leak that started from 3.12")
def test_copy_build_request(self) -> None:
options = FinalRequestOptions(method="get", url="/foo")
@@ -1003,6 +1004,7 @@ def test_copy_signature(self) -> None:
copy_param = copy_signature.parameters.get(name)
assert copy_param is not None, f"copy() signature is missing the {name} param"
+ @pytest.mark.skipif(sys.version_info >= (3, 10), reason="fails because of a memory leak that started from 3.12")
def test_copy_build_request(self) -> None:
options = FinalRequestOptions(method="get", url="/foo")
From bd1b98953e9e8d15466e161c1d7ce98d88dc88e7 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 25 Jun 2025 02:35:17 +0000
Subject: [PATCH 03/21] codegen metadata
---
.stats.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.stats.yml b/.stats.yml
index ed791f90..be3d4054 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 70
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml
openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667
-config_hash: 0c94579072c21854f9e042dfaac74e1d
+config_hash: 72d372e69afa63549cdb9df236ac0cbf
From 891d6b32e5bdb07d23abf898cec17a60ee64f99d Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 25 Jun 2025 02:58:23 +0000
Subject: [PATCH 04/21] feat(api): update via SDK Studio
---
.stats.yml | 4 +-
README.md | 2 +-
api.md | 155 ++++----
src/do_gradientai/_client.py | 2 +-
.../resources/agents/__init__.py | 56 +++
src/do_gradientai/resources/agents/agents.py | 128 +++++++
.../evaluation_datasets.py | 6 +-
.../resources/agents/evaluation_metrics.py | 145 ++++++++
.../evaluation_runs.py | 139 +++++--
.../evaluation_test_cases.py | 14 +-
src/do_gradientai/resources/regions.py | 195 ++++++++++
.../resources/regions/__init__.py | 61 ---
.../regions/evaluation_runs/__init__.py | 33 --
.../regions/evaluation_runs/results.py | 264 -------------
.../resources/regions/regions.py | 352 ------------------
src/do_gradientai/types/__init__.py | 4 -
src/do_gradientai/types/agents/__init__.py | 34 ++
.../{ => agents}/api_evaluation_metric.py | 2 +-
.../api_evaluation_metric_result.py | 2 +-
.../api_evaluation_prompt.py} | 6 +-
.../api_evaluation_run.py | 2 +-
.../api_evaluation_test_case.py | 2 +-
.../{regions => agents}/api_star_metric.py | 0
.../api_star_metric_param.py | 0
...reate_file_upload_presigned_urls_params.py | 0
...ate_file_upload_presigned_urls_response.py | 0
.../evaluation_dataset_create_params.py | 0
.../evaluation_dataset_create_response.py | 0
.../evaluation_metric_list_response.py} | 6 +-
.../evaluation_run_create_params.py | 0
.../evaluation_run_create_response.py | 0
.../evaluation_run_list_results_response.py} | 10 +-
.../evaluation_run_retrieve_response.py | 2 +-
.../evaluation_test_case_create_params.py | 0
.../evaluation_test_case_create_response.py | 0
...n_test_case_list_evaluation_runs_params.py | 0
...test_case_list_evaluation_runs_response.py | 2 +-
.../evaluation_test_case_list_response.py | 0
.../evaluation_test_case_retrieve_response.py | 0
.../evaluation_test_case_update_params.py | 0
.../evaluation_test_case_update_response.py | 0
src/do_gradientai/types/api_workspace.py | 2 +-
src/do_gradientai/types/regions/__init__.py | 32 --
.../types/regions/evaluation_runs/__init__.py | 9 -
.../result_retrieve_prompt_response.py | 12 -
.../test_evaluation_datasets.py | 34 +-
.../agents/test_evaluation_metrics.py | 80 ++++
.../test_evaluation_runs.py | 119 +++++-
.../test_evaluation_test_cases.py | 86 ++---
tests/api_resources/regions/__init__.py | 1 -
.../regions/evaluation_runs/__init__.py | 1 -
.../regions/evaluation_runs/test_results.py | 200 ----------
tests/api_resources/test_regions.py | 58 +--
53 files changed, 1007 insertions(+), 1255 deletions(-)
rename src/do_gradientai/resources/{regions => agents}/evaluation_datasets.py (98%)
create mode 100644 src/do_gradientai/resources/agents/evaluation_metrics.py
rename src/do_gradientai/resources/{regions/evaluation_runs => agents}/evaluation_runs.py (70%)
rename src/do_gradientai/resources/{regions => agents}/evaluation_test_cases.py (97%)
create mode 100644 src/do_gradientai/resources/regions.py
delete mode 100644 src/do_gradientai/resources/regions/__init__.py
delete mode 100644 src/do_gradientai/resources/regions/evaluation_runs/__init__.py
delete mode 100644 src/do_gradientai/resources/regions/evaluation_runs/results.py
delete mode 100644 src/do_gradientai/resources/regions/regions.py
rename src/do_gradientai/types/{ => agents}/api_evaluation_metric.py (95%)
rename src/do_gradientai/types/{regions/evaluation_runs => agents}/api_evaluation_metric_result.py (92%)
rename src/do_gradientai/types/{regions/evaluation_runs/api_prompt.py => agents/api_evaluation_prompt.py} (90%)
rename src/do_gradientai/types/{regions/evaluation_runs => agents}/api_evaluation_run.py (97%)
rename src/do_gradientai/types/{regions => agents}/api_evaluation_test_case.py (94%)
rename src/do_gradientai/types/{regions => agents}/api_star_metric.py (100%)
rename src/do_gradientai/types/{regions => agents}/api_star_metric_param.py (100%)
rename src/do_gradientai/types/{regions => agents}/evaluation_dataset_create_file_upload_presigned_urls_params.py (100%)
rename src/do_gradientai/types/{regions => agents}/evaluation_dataset_create_file_upload_presigned_urls_response.py (100%)
rename src/do_gradientai/types/{regions => agents}/evaluation_dataset_create_params.py (100%)
rename src/do_gradientai/types/{regions => agents}/evaluation_dataset_create_response.py (100%)
rename src/do_gradientai/types/{region_list_evaluation_metrics_response.py => agents/evaluation_metric_list_response.py} (63%)
rename src/do_gradientai/types/{regions => agents}/evaluation_run_create_params.py (100%)
rename src/do_gradientai/types/{regions => agents}/evaluation_run_create_response.py (100%)
rename src/do_gradientai/types/{regions/evaluation_runs/result_retrieve_response.py => agents/evaluation_run_list_results_response.py} (52%)
rename src/do_gradientai/types/{regions => agents}/evaluation_run_retrieve_response.py (82%)
rename src/do_gradientai/types/{regions => agents}/evaluation_test_case_create_params.py (100%)
rename src/do_gradientai/types/{regions => agents}/evaluation_test_case_create_response.py (100%)
rename src/do_gradientai/types/{regions => agents}/evaluation_test_case_list_evaluation_runs_params.py (100%)
rename src/do_gradientai/types/{regions => agents}/evaluation_test_case_list_evaluation_runs_response.py (85%)
rename src/do_gradientai/types/{regions => agents}/evaluation_test_case_list_response.py (100%)
rename src/do_gradientai/types/{regions => agents}/evaluation_test_case_retrieve_response.py (100%)
rename src/do_gradientai/types/{regions => agents}/evaluation_test_case_update_params.py (100%)
rename src/do_gradientai/types/{regions => agents}/evaluation_test_case_update_response.py (100%)
delete mode 100644 src/do_gradientai/types/regions/__init__.py
delete mode 100644 src/do_gradientai/types/regions/evaluation_runs/__init__.py
delete mode 100644 src/do_gradientai/types/regions/evaluation_runs/result_retrieve_prompt_response.py
rename tests/api_resources/{regions => agents}/test_evaluation_datasets.py (82%)
create mode 100644 tests/api_resources/agents/test_evaluation_metrics.py
rename tests/api_resources/{regions => agents}/test_evaluation_runs.py (56%)
rename tests/api_resources/{regions => agents}/test_evaluation_test_cases.py (82%)
delete mode 100644 tests/api_resources/regions/__init__.py
delete mode 100644 tests/api_resources/regions/evaluation_runs/__init__.py
delete mode 100644 tests/api_resources/regions/evaluation_runs/test_results.py
diff --git a/.stats.yml b/.stats.yml
index be3d4054..49720dd2 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 70
+configured_endpoints: 69
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml
openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667
-config_hash: 72d372e69afa63549cdb9df236ac0cbf
+config_hash: 190bed33fe275347e4871077b32af63f
diff --git a/README.md b/README.md
index 15ee41ab..e7715d74 100644
--- a/README.md
+++ b/README.md
@@ -120,7 +120,7 @@ from do_gradientai import GradientAI
client = GradientAI()
-evaluation_test_case = client.regions.evaluation_test_cases.create(
+evaluation_test_case = client.agents.evaluation_test_cases.create(
star_metric={},
)
print(evaluation_test_case.star_metric)
diff --git a/api.md b/api.md
index a10c03ef..018742d7 100644
--- a/api.md
+++ b/api.md
@@ -52,6 +52,80 @@ Methods:
- client.agents.api_keys.delete(api_key_uuid, \*, agent_uuid) -> APIKeyDeleteResponse
- client.agents.api_keys.regenerate(api_key_uuid, \*, agent_uuid) -> APIKeyRegenerateResponse
+## EvaluationMetrics
+
+Types:
+
+```python
+from do_gradientai.types.agents import EvaluationMetricListResponse
+```
+
+Methods:
+
+- client.agents.evaluation_metrics.list() -> EvaluationMetricListResponse
+
+## EvaluationRuns
+
+Types:
+
+```python
+from do_gradientai.types.agents import (
+ APIEvaluationMetric,
+ APIEvaluationMetricResult,
+ APIEvaluationPrompt,
+ APIEvaluationRun,
+ EvaluationRunCreateResponse,
+ EvaluationRunRetrieveResponse,
+ EvaluationRunListResultsResponse,
+)
+```
+
+Methods:
+
+- client.agents.evaluation_runs.create(\*\*params) -> EvaluationRunCreateResponse
+- client.agents.evaluation_runs.retrieve(evaluation_run_uuid) -> EvaluationRunRetrieveResponse
+- client.agents.evaluation_runs.list_results(evaluation_run_uuid) -> EvaluationRunListResultsResponse
+
+## EvaluationTestCases
+
+Types:
+
+```python
+from do_gradientai.types.agents import (
+ APIEvaluationTestCase,
+ APIStarMetric,
+ EvaluationTestCaseCreateResponse,
+ EvaluationTestCaseRetrieveResponse,
+ EvaluationTestCaseUpdateResponse,
+ EvaluationTestCaseListResponse,
+ EvaluationTestCaseListEvaluationRunsResponse,
+)
+```
+
+Methods:
+
+- client.agents.evaluation_test_cases.create(\*\*params) -> EvaluationTestCaseCreateResponse
+- client.agents.evaluation_test_cases.retrieve(test_case_uuid) -> EvaluationTestCaseRetrieveResponse
+- client.agents.evaluation_test_cases.update(path_test_case_uuid, \*\*params) -> EvaluationTestCaseUpdateResponse
+- client.agents.evaluation_test_cases.list() -> EvaluationTestCaseListResponse
+- client.agents.evaluation_test_cases.list_evaluation_runs(evaluation_test_case_uuid, \*\*params) -> EvaluationTestCaseListEvaluationRunsResponse
+
+## EvaluationDatasets
+
+Types:
+
+```python
+from do_gradientai.types.agents import (
+ EvaluationDatasetCreateResponse,
+ EvaluationDatasetCreateFileUploadPresignedURLsResponse,
+)
+```
+
+Methods:
+
+- client.agents.evaluation_datasets.create(\*\*params) -> EvaluationDatasetCreateResponse
+- client.agents.evaluation_datasets.create_file_upload_presigned_urls(\*\*params) -> EvaluationDatasetCreateFileUploadPresignedURLsResponse
+
## Functions
Types:
@@ -176,89 +250,12 @@ Methods:
Types:
```python
-from do_gradientai.types import (
- APIEvaluationMetric,
- RegionListResponse,
- RegionListEvaluationMetricsResponse,
-)
-```
-
-Methods:
-
-- client.regions.list(\*\*params) -> RegionListResponse
-- client.regions.list_evaluation_metrics() -> RegionListEvaluationMetricsResponse
-
-## EvaluationRuns
-
-Types:
-
-```python
-from do_gradientai.types.regions import EvaluationRunCreateResponse, EvaluationRunRetrieveResponse
-```
-
-Methods:
-
-- client.regions.evaluation_runs.create(\*\*params) -> EvaluationRunCreateResponse
-- client.regions.evaluation_runs.retrieve(evaluation_run_uuid) -> EvaluationRunRetrieveResponse
-
-### Results
-
-Types:
-
-```python
-from do_gradientai.types.regions.evaluation_runs import (
- APIEvaluationMetricResult,
- APIEvaluationRun,
- APIPrompt,
- ResultRetrieveResponse,
- ResultRetrievePromptResponse,
-)
-```
-
-Methods:
-
-- client.regions.evaluation_runs.results.retrieve(evaluation_run_uuid) -> ResultRetrieveResponse
-- client.regions.evaluation_runs.results.retrieve_prompt(prompt_id, \*, evaluation_run_uuid) -> ResultRetrievePromptResponse
-
-## EvaluationTestCases
-
-Types:
-
-```python
-from do_gradientai.types.regions import (
- APIEvaluationTestCase,
- APIStarMetric,
- EvaluationTestCaseCreateResponse,
- EvaluationTestCaseRetrieveResponse,
- EvaluationTestCaseUpdateResponse,
- EvaluationTestCaseListResponse,
- EvaluationTestCaseListEvaluationRunsResponse,
-)
-```
-
-Methods:
-
-- client.regions.evaluation_test_cases.create(\*\*params) -> EvaluationTestCaseCreateResponse
-- client.regions.evaluation_test_cases.retrieve(test_case_uuid) -> EvaluationTestCaseRetrieveResponse
-- client.regions.evaluation_test_cases.update(path_test_case_uuid, \*\*params) -> EvaluationTestCaseUpdateResponse
-- client.regions.evaluation_test_cases.list() -> EvaluationTestCaseListResponse
-- client.regions.evaluation_test_cases.list_evaluation_runs(evaluation_test_case_uuid, \*\*params) -> EvaluationTestCaseListEvaluationRunsResponse
-
-## EvaluationDatasets
-
-Types:
-
-```python
-from do_gradientai.types.regions import (
- EvaluationDatasetCreateResponse,
- EvaluationDatasetCreateFileUploadPresignedURLsResponse,
-)
+from do_gradientai.types import RegionListResponse
```
Methods:
-- client.regions.evaluation_datasets.create(\*\*params) -> EvaluationDatasetCreateResponse
-- client.regions.evaluation_datasets.create_file_upload_presigned_urls(\*\*params) -> EvaluationDatasetCreateFileUploadPresignedURLsResponse
+- client.regions.list(\*\*params) -> RegionListResponse
# IndexingJobs
diff --git a/src/do_gradientai/_client.py b/src/do_gradientai/_client.py
index 71db35bc..8710fe68 100644
--- a/src/do_gradientai/_client.py
+++ b/src/do_gradientai/_client.py
@@ -33,10 +33,10 @@
if TYPE_CHECKING:
from .resources import chat, agents, models, regions, inference, providers, indexing_jobs, knowledge_bases
from .resources.models import ModelsResource, AsyncModelsResource
+ from .resources.regions import RegionsResource, AsyncRegionsResource
from .resources.chat.chat import ChatResource, AsyncChatResource
from .resources.agents.agents import AgentsResource, AsyncAgentsResource
from .resources.indexing_jobs import IndexingJobsResource, AsyncIndexingJobsResource
- from .resources.regions.regions import RegionsResource, AsyncRegionsResource
from .resources.inference.inference import InferenceResource, AsyncInferenceResource
from .resources.providers.providers import ProvidersResource, AsyncProvidersResource
from .resources.knowledge_bases.knowledge_bases import KnowledgeBasesResource, AsyncKnowledgeBasesResource
diff --git a/src/do_gradientai/resources/agents/__init__.py b/src/do_gradientai/resources/agents/__init__.py
index f41a0408..3eb9cde8 100644
--- a/src/do_gradientai/resources/agents/__init__.py
+++ b/src/do_gradientai/resources/agents/__init__.py
@@ -40,6 +40,14 @@
ChildAgentsResourceWithStreamingResponse,
AsyncChildAgentsResourceWithStreamingResponse,
)
+from .evaluation_runs import (
+ EvaluationRunsResource,
+ AsyncEvaluationRunsResource,
+ EvaluationRunsResourceWithRawResponse,
+ AsyncEvaluationRunsResourceWithRawResponse,
+ EvaluationRunsResourceWithStreamingResponse,
+ AsyncEvaluationRunsResourceWithStreamingResponse,
+)
from .knowledge_bases import (
KnowledgeBasesResource,
AsyncKnowledgeBasesResource,
@@ -48,6 +56,30 @@
KnowledgeBasesResourceWithStreamingResponse,
AsyncKnowledgeBasesResourceWithStreamingResponse,
)
+from .evaluation_metrics import (
+ EvaluationMetricsResource,
+ AsyncEvaluationMetricsResource,
+ EvaluationMetricsResourceWithRawResponse,
+ AsyncEvaluationMetricsResourceWithRawResponse,
+ EvaluationMetricsResourceWithStreamingResponse,
+ AsyncEvaluationMetricsResourceWithStreamingResponse,
+)
+from .evaluation_datasets import (
+ EvaluationDatasetsResource,
+ AsyncEvaluationDatasetsResource,
+ EvaluationDatasetsResourceWithRawResponse,
+ AsyncEvaluationDatasetsResourceWithRawResponse,
+ EvaluationDatasetsResourceWithStreamingResponse,
+ AsyncEvaluationDatasetsResourceWithStreamingResponse,
+)
+from .evaluation_test_cases import (
+ EvaluationTestCasesResource,
+ AsyncEvaluationTestCasesResource,
+ EvaluationTestCasesResourceWithRawResponse,
+ AsyncEvaluationTestCasesResourceWithRawResponse,
+ EvaluationTestCasesResourceWithStreamingResponse,
+ AsyncEvaluationTestCasesResourceWithStreamingResponse,
+)
__all__ = [
"APIKeysResource",
@@ -56,6 +88,30 @@
"AsyncAPIKeysResourceWithRawResponse",
"APIKeysResourceWithStreamingResponse",
"AsyncAPIKeysResourceWithStreamingResponse",
+ "EvaluationMetricsResource",
+ "AsyncEvaluationMetricsResource",
+ "EvaluationMetricsResourceWithRawResponse",
+ "AsyncEvaluationMetricsResourceWithRawResponse",
+ "EvaluationMetricsResourceWithStreamingResponse",
+ "AsyncEvaluationMetricsResourceWithStreamingResponse",
+ "EvaluationRunsResource",
+ "AsyncEvaluationRunsResource",
+ "EvaluationRunsResourceWithRawResponse",
+ "AsyncEvaluationRunsResourceWithRawResponse",
+ "EvaluationRunsResourceWithStreamingResponse",
+ "AsyncEvaluationRunsResourceWithStreamingResponse",
+ "EvaluationTestCasesResource",
+ "AsyncEvaluationTestCasesResource",
+ "EvaluationTestCasesResourceWithRawResponse",
+ "AsyncEvaluationTestCasesResourceWithRawResponse",
+ "EvaluationTestCasesResourceWithStreamingResponse",
+ "AsyncEvaluationTestCasesResourceWithStreamingResponse",
+ "EvaluationDatasetsResource",
+ "AsyncEvaluationDatasetsResource",
+ "EvaluationDatasetsResourceWithRawResponse",
+ "AsyncEvaluationDatasetsResourceWithRawResponse",
+ "EvaluationDatasetsResourceWithStreamingResponse",
+ "AsyncEvaluationDatasetsResourceWithStreamingResponse",
"FunctionsResource",
"AsyncFunctionsResource",
"FunctionsResourceWithRawResponse",
diff --git a/src/do_gradientai/resources/agents/agents.py b/src/do_gradientai/resources/agents/agents.py
index 63f0c4d4..6bb39894 100644
--- a/src/do_gradientai/resources/agents/agents.py
+++ b/src/do_gradientai/resources/agents/agents.py
@@ -57,6 +57,14 @@
AsyncChildAgentsResourceWithStreamingResponse,
)
from ..._base_client import make_request_options
+from .evaluation_runs import (
+ EvaluationRunsResource,
+ AsyncEvaluationRunsResource,
+ EvaluationRunsResourceWithRawResponse,
+ AsyncEvaluationRunsResourceWithRawResponse,
+ EvaluationRunsResourceWithStreamingResponse,
+ AsyncEvaluationRunsResourceWithStreamingResponse,
+)
from .knowledge_bases import (
KnowledgeBasesResource,
AsyncKnowledgeBasesResource,
@@ -65,6 +73,30 @@
KnowledgeBasesResourceWithStreamingResponse,
AsyncKnowledgeBasesResourceWithStreamingResponse,
)
+from .evaluation_metrics import (
+ EvaluationMetricsResource,
+ AsyncEvaluationMetricsResource,
+ EvaluationMetricsResourceWithRawResponse,
+ AsyncEvaluationMetricsResourceWithRawResponse,
+ EvaluationMetricsResourceWithStreamingResponse,
+ AsyncEvaluationMetricsResourceWithStreamingResponse,
+)
+from .evaluation_datasets import (
+ EvaluationDatasetsResource,
+ AsyncEvaluationDatasetsResource,
+ EvaluationDatasetsResourceWithRawResponse,
+ AsyncEvaluationDatasetsResourceWithRawResponse,
+ EvaluationDatasetsResourceWithStreamingResponse,
+ AsyncEvaluationDatasetsResourceWithStreamingResponse,
+)
+from .evaluation_test_cases import (
+ EvaluationTestCasesResource,
+ AsyncEvaluationTestCasesResource,
+ EvaluationTestCasesResourceWithRawResponse,
+ AsyncEvaluationTestCasesResourceWithRawResponse,
+ EvaluationTestCasesResourceWithStreamingResponse,
+ AsyncEvaluationTestCasesResourceWithStreamingResponse,
+)
from ...types.agent_list_response import AgentListResponse
from ...types.api_retrieval_method import APIRetrievalMethod
from ...types.agent_create_response import AgentCreateResponse
@@ -82,6 +114,22 @@ class AgentsResource(SyncAPIResource):
def api_keys(self) -> APIKeysResource:
return APIKeysResource(self._client)
+ @cached_property
+ def evaluation_metrics(self) -> EvaluationMetricsResource:
+ return EvaluationMetricsResource(self._client)
+
+ @cached_property
+ def evaluation_runs(self) -> EvaluationRunsResource:
+ return EvaluationRunsResource(self._client)
+
+ @cached_property
+ def evaluation_test_cases(self) -> EvaluationTestCasesResource:
+ return EvaluationTestCasesResource(self._client)
+
+ @cached_property
+ def evaluation_datasets(self) -> EvaluationDatasetsResource:
+ return EvaluationDatasetsResource(self._client)
+
@cached_property
def functions(self) -> FunctionsResource:
return FunctionsResource(self._client)
@@ -450,6 +498,22 @@ class AsyncAgentsResource(AsyncAPIResource):
def api_keys(self) -> AsyncAPIKeysResource:
return AsyncAPIKeysResource(self._client)
+ @cached_property
+ def evaluation_metrics(self) -> AsyncEvaluationMetricsResource:
+ return AsyncEvaluationMetricsResource(self._client)
+
+ @cached_property
+ def evaluation_runs(self) -> AsyncEvaluationRunsResource:
+ return AsyncEvaluationRunsResource(self._client)
+
+ @cached_property
+ def evaluation_test_cases(self) -> AsyncEvaluationTestCasesResource:
+ return AsyncEvaluationTestCasesResource(self._client)
+
+ @cached_property
+ def evaluation_datasets(self) -> AsyncEvaluationDatasetsResource:
+ return AsyncEvaluationDatasetsResource(self._client)
+
@cached_property
def functions(self) -> AsyncFunctionsResource:
return AsyncFunctionsResource(self._client)
@@ -840,6 +904,22 @@ def __init__(self, agents: AgentsResource) -> None:
def api_keys(self) -> APIKeysResourceWithRawResponse:
return APIKeysResourceWithRawResponse(self._agents.api_keys)
+ @cached_property
+ def evaluation_metrics(self) -> EvaluationMetricsResourceWithRawResponse:
+ return EvaluationMetricsResourceWithRawResponse(self._agents.evaluation_metrics)
+
+ @cached_property
+ def evaluation_runs(self) -> EvaluationRunsResourceWithRawResponse:
+ return EvaluationRunsResourceWithRawResponse(self._agents.evaluation_runs)
+
+ @cached_property
+ def evaluation_test_cases(self) -> EvaluationTestCasesResourceWithRawResponse:
+ return EvaluationTestCasesResourceWithRawResponse(self._agents.evaluation_test_cases)
+
+ @cached_property
+ def evaluation_datasets(self) -> EvaluationDatasetsResourceWithRawResponse:
+ return EvaluationDatasetsResourceWithRawResponse(self._agents.evaluation_datasets)
+
@cached_property
def functions(self) -> FunctionsResourceWithRawResponse:
return FunctionsResourceWithRawResponse(self._agents.functions)
@@ -884,6 +964,22 @@ def __init__(self, agents: AsyncAgentsResource) -> None:
def api_keys(self) -> AsyncAPIKeysResourceWithRawResponse:
return AsyncAPIKeysResourceWithRawResponse(self._agents.api_keys)
+ @cached_property
+ def evaluation_metrics(self) -> AsyncEvaluationMetricsResourceWithRawResponse:
+ return AsyncEvaluationMetricsResourceWithRawResponse(self._agents.evaluation_metrics)
+
+ @cached_property
+ def evaluation_runs(self) -> AsyncEvaluationRunsResourceWithRawResponse:
+ return AsyncEvaluationRunsResourceWithRawResponse(self._agents.evaluation_runs)
+
+ @cached_property
+ def evaluation_test_cases(self) -> AsyncEvaluationTestCasesResourceWithRawResponse:
+ return AsyncEvaluationTestCasesResourceWithRawResponse(self._agents.evaluation_test_cases)
+
+ @cached_property
+ def evaluation_datasets(self) -> AsyncEvaluationDatasetsResourceWithRawResponse:
+ return AsyncEvaluationDatasetsResourceWithRawResponse(self._agents.evaluation_datasets)
+
@cached_property
def functions(self) -> AsyncFunctionsResourceWithRawResponse:
return AsyncFunctionsResourceWithRawResponse(self._agents.functions)
@@ -928,6 +1024,22 @@ def __init__(self, agents: AgentsResource) -> None:
def api_keys(self) -> APIKeysResourceWithStreamingResponse:
return APIKeysResourceWithStreamingResponse(self._agents.api_keys)
+ @cached_property
+ def evaluation_metrics(self) -> EvaluationMetricsResourceWithStreamingResponse:
+ return EvaluationMetricsResourceWithStreamingResponse(self._agents.evaluation_metrics)
+
+ @cached_property
+ def evaluation_runs(self) -> EvaluationRunsResourceWithStreamingResponse:
+ return EvaluationRunsResourceWithStreamingResponse(self._agents.evaluation_runs)
+
+ @cached_property
+ def evaluation_test_cases(self) -> EvaluationTestCasesResourceWithStreamingResponse:
+ return EvaluationTestCasesResourceWithStreamingResponse(self._agents.evaluation_test_cases)
+
+ @cached_property
+ def evaluation_datasets(self) -> EvaluationDatasetsResourceWithStreamingResponse:
+ return EvaluationDatasetsResourceWithStreamingResponse(self._agents.evaluation_datasets)
+
@cached_property
def functions(self) -> FunctionsResourceWithStreamingResponse:
return FunctionsResourceWithStreamingResponse(self._agents.functions)
@@ -972,6 +1084,22 @@ def __init__(self, agents: AsyncAgentsResource) -> None:
def api_keys(self) -> AsyncAPIKeysResourceWithStreamingResponse:
return AsyncAPIKeysResourceWithStreamingResponse(self._agents.api_keys)
+ @cached_property
+ def evaluation_metrics(self) -> AsyncEvaluationMetricsResourceWithStreamingResponse:
+ return AsyncEvaluationMetricsResourceWithStreamingResponse(self._agents.evaluation_metrics)
+
+ @cached_property
+ def evaluation_runs(self) -> AsyncEvaluationRunsResourceWithStreamingResponse:
+ return AsyncEvaluationRunsResourceWithStreamingResponse(self._agents.evaluation_runs)
+
+ @cached_property
+ def evaluation_test_cases(self) -> AsyncEvaluationTestCasesResourceWithStreamingResponse:
+ return AsyncEvaluationTestCasesResourceWithStreamingResponse(self._agents.evaluation_test_cases)
+
+ @cached_property
+ def evaluation_datasets(self) -> AsyncEvaluationDatasetsResourceWithStreamingResponse:
+ return AsyncEvaluationDatasetsResourceWithStreamingResponse(self._agents.evaluation_datasets)
+
@cached_property
def functions(self) -> AsyncFunctionsResourceWithStreamingResponse:
return AsyncFunctionsResourceWithStreamingResponse(self._agents.functions)
diff --git a/src/do_gradientai/resources/regions/evaluation_datasets.py b/src/do_gradientai/resources/agents/evaluation_datasets.py
similarity index 98%
rename from src/do_gradientai/resources/regions/evaluation_datasets.py
rename to src/do_gradientai/resources/agents/evaluation_datasets.py
index f82e9701..42eca703 100644
--- a/src/do_gradientai/resources/regions/evaluation_datasets.py
+++ b/src/do_gradientai/resources/agents/evaluation_datasets.py
@@ -17,13 +17,13 @@
async_to_streamed_response_wrapper,
)
from ..._base_client import make_request_options
-from ...types.regions import (
+from ...types.agents import (
evaluation_dataset_create_params,
evaluation_dataset_create_file_upload_presigned_urls_params,
)
-from ...types.regions.evaluation_dataset_create_response import EvaluationDatasetCreateResponse
+from ...types.agents.evaluation_dataset_create_response import EvaluationDatasetCreateResponse
from ...types.knowledge_bases.api_file_upload_data_source_param import APIFileUploadDataSourceParam
-from ...types.regions.evaluation_dataset_create_file_upload_presigned_urls_response import (
+from ...types.agents.evaluation_dataset_create_file_upload_presigned_urls_response import (
EvaluationDatasetCreateFileUploadPresignedURLsResponse,
)
diff --git a/src/do_gradientai/resources/agents/evaluation_metrics.py b/src/do_gradientai/resources/agents/evaluation_metrics.py
new file mode 100644
index 00000000..c554be3e
--- /dev/null
+++ b/src/do_gradientai/resources/agents/evaluation_metrics.py
@@ -0,0 +1,145 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ..._base_client import make_request_options
+from ...types.agents.evaluation_metric_list_response import EvaluationMetricListResponse
+
+__all__ = ["EvaluationMetricsResource", "AsyncEvaluationMetricsResource"]
+
+
+class EvaluationMetricsResource(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> EvaluationMetricsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
+ """
+ return EvaluationMetricsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> EvaluationMetricsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
+ """
+ return EvaluationMetricsResourceWithStreamingResponse(self)
+
+ def list(
+ self,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> EvaluationMetricListResponse:
+ """
+ To list all evaluation metrics, send a GET request to
+ `/v2/gen-ai/evaluation_metrics`.
+ """
+ return self._get(
+ "/v2/gen-ai/evaluation_metrics"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/evaluation_metrics",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=EvaluationMetricListResponse,
+ )
+
+
+class AsyncEvaluationMetricsResource(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncEvaluationMetricsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncEvaluationMetricsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncEvaluationMetricsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
+ """
+ return AsyncEvaluationMetricsResourceWithStreamingResponse(self)
+
+ async def list(
+ self,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> EvaluationMetricListResponse:
+ """
+ To list all evaluation metrics, send a GET request to
+ `/v2/gen-ai/evaluation_metrics`.
+ """
+ return await self._get(
+ "/v2/gen-ai/evaluation_metrics"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/evaluation_metrics",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=EvaluationMetricListResponse,
+ )
+
+
+class EvaluationMetricsResourceWithRawResponse:
+ def __init__(self, evaluation_metrics: EvaluationMetricsResource) -> None:
+ self._evaluation_metrics = evaluation_metrics
+
+ self.list = to_raw_response_wrapper(
+ evaluation_metrics.list,
+ )
+
+
+class AsyncEvaluationMetricsResourceWithRawResponse:
+ def __init__(self, evaluation_metrics: AsyncEvaluationMetricsResource) -> None:
+ self._evaluation_metrics = evaluation_metrics
+
+ self.list = async_to_raw_response_wrapper(
+ evaluation_metrics.list,
+ )
+
+
+class EvaluationMetricsResourceWithStreamingResponse:
+ def __init__(self, evaluation_metrics: EvaluationMetricsResource) -> None:
+ self._evaluation_metrics = evaluation_metrics
+
+ self.list = to_streamed_response_wrapper(
+ evaluation_metrics.list,
+ )
+
+
+class AsyncEvaluationMetricsResourceWithStreamingResponse:
+ def __init__(self, evaluation_metrics: AsyncEvaluationMetricsResource) -> None:
+ self._evaluation_metrics = evaluation_metrics
+
+ self.list = async_to_streamed_response_wrapper(
+ evaluation_metrics.list,
+ )
diff --git a/src/do_gradientai/resources/regions/evaluation_runs/evaluation_runs.py b/src/do_gradientai/resources/agents/evaluation_runs.py
similarity index 70%
rename from src/do_gradientai/resources/regions/evaluation_runs/evaluation_runs.py
rename to src/do_gradientai/resources/agents/evaluation_runs.py
index 9221c45c..7e207e7d 100644
--- a/src/do_gradientai/resources/regions/evaluation_runs/evaluation_runs.py
+++ b/src/do_gradientai/resources/agents/evaluation_runs.py
@@ -4,37 +4,26 @@
import httpx
-from .results import (
- ResultsResource,
- AsyncResultsResource,
- ResultsResourceWithRawResponse,
- AsyncResultsResourceWithRawResponse,
- ResultsResourceWithStreamingResponse,
- AsyncResultsResourceWithStreamingResponse,
-)
-from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ...._utils import maybe_transform, async_maybe_transform
-from ...._compat import cached_property
-from ...._resource import SyncAPIResource, AsyncAPIResource
-from ...._response import (
+from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ..._utils import maybe_transform, async_maybe_transform
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import (
to_raw_response_wrapper,
to_streamed_response_wrapper,
async_to_raw_response_wrapper,
async_to_streamed_response_wrapper,
)
-from ...._base_client import make_request_options
-from ....types.regions import evaluation_run_create_params
-from ....types.regions.evaluation_run_create_response import EvaluationRunCreateResponse
-from ....types.regions.evaluation_run_retrieve_response import EvaluationRunRetrieveResponse
+from ..._base_client import make_request_options
+from ...types.agents import evaluation_run_create_params
+from ...types.agents.evaluation_run_create_response import EvaluationRunCreateResponse
+from ...types.agents.evaluation_run_retrieve_response import EvaluationRunRetrieveResponse
+from ...types.agents.evaluation_run_list_results_response import EvaluationRunListResultsResponse
__all__ = ["EvaluationRunsResource", "AsyncEvaluationRunsResource"]
class EvaluationRunsResource(SyncAPIResource):
- @cached_property
- def results(self) -> ResultsResource:
- return ResultsResource(self._client)
-
@cached_property
def with_raw_response(self) -> EvaluationRunsResourceWithRawResponse:
"""
@@ -140,12 +129,46 @@ def retrieve(
cast_to=EvaluationRunRetrieveResponse,
)
+ def list_results(
+ self,
+ evaluation_run_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> EvaluationRunListResultsResponse:
+ """
+ To retrieve results of an evaluation run, send a GET request to
+ `/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not evaluation_run_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `evaluation_run_uuid` but received {evaluation_run_uuid!r}"
+ )
+ return self._get(
+ f"/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=EvaluationRunListResultsResponse,
+ )
-class AsyncEvaluationRunsResource(AsyncAPIResource):
- @cached_property
- def results(self) -> AsyncResultsResource:
- return AsyncResultsResource(self._client)
+class AsyncEvaluationRunsResource(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncEvaluationRunsResourceWithRawResponse:
"""
@@ -251,6 +274,44 @@ async def retrieve(
cast_to=EvaluationRunRetrieveResponse,
)
+ async def list_results(
+ self,
+ evaluation_run_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> EvaluationRunListResultsResponse:
+ """
+ To retrieve results of an evaluation run, send a GET request to
+ `/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not evaluation_run_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `evaluation_run_uuid` but received {evaluation_run_uuid!r}"
+ )
+ return await self._get(
+ f"/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=EvaluationRunListResultsResponse,
+ )
+
class EvaluationRunsResourceWithRawResponse:
def __init__(self, evaluation_runs: EvaluationRunsResource) -> None:
@@ -262,10 +323,9 @@ def __init__(self, evaluation_runs: EvaluationRunsResource) -> None:
self.retrieve = to_raw_response_wrapper(
evaluation_runs.retrieve,
)
-
- @cached_property
- def results(self) -> ResultsResourceWithRawResponse:
- return ResultsResourceWithRawResponse(self._evaluation_runs.results)
+ self.list_results = to_raw_response_wrapper(
+ evaluation_runs.list_results,
+ )
class AsyncEvaluationRunsResourceWithRawResponse:
@@ -278,10 +338,9 @@ def __init__(self, evaluation_runs: AsyncEvaluationRunsResource) -> None:
self.retrieve = async_to_raw_response_wrapper(
evaluation_runs.retrieve,
)
-
- @cached_property
- def results(self) -> AsyncResultsResourceWithRawResponse:
- return AsyncResultsResourceWithRawResponse(self._evaluation_runs.results)
+ self.list_results = async_to_raw_response_wrapper(
+ evaluation_runs.list_results,
+ )
class EvaluationRunsResourceWithStreamingResponse:
@@ -294,10 +353,9 @@ def __init__(self, evaluation_runs: EvaluationRunsResource) -> None:
self.retrieve = to_streamed_response_wrapper(
evaluation_runs.retrieve,
)
-
- @cached_property
- def results(self) -> ResultsResourceWithStreamingResponse:
- return ResultsResourceWithStreamingResponse(self._evaluation_runs.results)
+ self.list_results = to_streamed_response_wrapper(
+ evaluation_runs.list_results,
+ )
class AsyncEvaluationRunsResourceWithStreamingResponse:
@@ -310,7 +368,6 @@ def __init__(self, evaluation_runs: AsyncEvaluationRunsResource) -> None:
self.retrieve = async_to_streamed_response_wrapper(
evaluation_runs.retrieve,
)
-
- @cached_property
- def results(self) -> AsyncResultsResourceWithStreamingResponse:
- return AsyncResultsResourceWithStreamingResponse(self._evaluation_runs.results)
+ self.list_results = async_to_streamed_response_wrapper(
+ evaluation_runs.list_results,
+ )
diff --git a/src/do_gradientai/resources/regions/evaluation_test_cases.py b/src/do_gradientai/resources/agents/evaluation_test_cases.py
similarity index 97%
rename from src/do_gradientai/resources/regions/evaluation_test_cases.py
rename to src/do_gradientai/resources/agents/evaluation_test_cases.py
index eed4d8b4..995df025 100644
--- a/src/do_gradientai/resources/regions/evaluation_test_cases.py
+++ b/src/do_gradientai/resources/agents/evaluation_test_cases.py
@@ -17,17 +17,17 @@
async_to_streamed_response_wrapper,
)
from ..._base_client import make_request_options
-from ...types.regions import (
+from ...types.agents import (
evaluation_test_case_create_params,
evaluation_test_case_update_params,
evaluation_test_case_list_evaluation_runs_params,
)
-from ...types.regions.api_star_metric_param import APIStarMetricParam
-from ...types.regions.evaluation_test_case_list_response import EvaluationTestCaseListResponse
-from ...types.regions.evaluation_test_case_create_response import EvaluationTestCaseCreateResponse
-from ...types.regions.evaluation_test_case_update_response import EvaluationTestCaseUpdateResponse
-from ...types.regions.evaluation_test_case_retrieve_response import EvaluationTestCaseRetrieveResponse
-from ...types.regions.evaluation_test_case_list_evaluation_runs_response import (
+from ...types.agents.api_star_metric_param import APIStarMetricParam
+from ...types.agents.evaluation_test_case_list_response import EvaluationTestCaseListResponse
+from ...types.agents.evaluation_test_case_create_response import EvaluationTestCaseCreateResponse
+from ...types.agents.evaluation_test_case_update_response import EvaluationTestCaseUpdateResponse
+from ...types.agents.evaluation_test_case_retrieve_response import EvaluationTestCaseRetrieveResponse
+from ...types.agents.evaluation_test_case_list_evaluation_runs_response import (
EvaluationTestCaseListEvaluationRunsResponse,
)
diff --git a/src/do_gradientai/resources/regions.py b/src/do_gradientai/resources/regions.py
new file mode 100644
index 00000000..4c50d9e6
--- /dev/null
+++ b/src/do_gradientai/resources/regions.py
@@ -0,0 +1,195 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from ..types import region_list_params
+from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from .._utils import maybe_transform, async_maybe_transform
+from .._compat import cached_property
+from .._resource import SyncAPIResource, AsyncAPIResource
+from .._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from .._base_client import make_request_options
+from ..types.region_list_response import RegionListResponse
+
+__all__ = ["RegionsResource", "AsyncRegionsResource"]
+
+
+class RegionsResource(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> RegionsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
+ """
+ return RegionsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> RegionsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
+ """
+ return RegionsResourceWithStreamingResponse(self)
+
+ def list(
+ self,
+ *,
+ serves_batch: bool | NotGiven = NOT_GIVEN,
+ serves_inference: bool | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> RegionListResponse:
+ """
+ To list all datacenter regions, send a GET request to `/v2/gen-ai/regions`.
+
+ Args:
+ serves_batch: include datacenters that are capable of running batch jobs.
+
+ serves_inference: include datacenters that serve inference.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ "/v2/gen-ai/regions"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/regions",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "serves_batch": serves_batch,
+ "serves_inference": serves_inference,
+ },
+ region_list_params.RegionListParams,
+ ),
+ ),
+ cast_to=RegionListResponse,
+ )
+
+
+class AsyncRegionsResource(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncRegionsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncRegionsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncRegionsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
+ """
+ return AsyncRegionsResourceWithStreamingResponse(self)
+
+ async def list(
+ self,
+ *,
+ serves_batch: bool | NotGiven = NOT_GIVEN,
+ serves_inference: bool | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> RegionListResponse:
+ """
+ To list all datacenter regions, send a GET request to `/v2/gen-ai/regions`.
+
+ Args:
+ serves_batch: include datacenters that are capable of running batch jobs.
+
+ serves_inference: include datacenters that serve inference.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ "/v2/gen-ai/regions"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/regions",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "serves_batch": serves_batch,
+ "serves_inference": serves_inference,
+ },
+ region_list_params.RegionListParams,
+ ),
+ ),
+ cast_to=RegionListResponse,
+ )
+
+
+class RegionsResourceWithRawResponse:
+ def __init__(self, regions: RegionsResource) -> None:
+ self._regions = regions
+
+ self.list = to_raw_response_wrapper(
+ regions.list,
+ )
+
+
+class AsyncRegionsResourceWithRawResponse:
+ def __init__(self, regions: AsyncRegionsResource) -> None:
+ self._regions = regions
+
+ self.list = async_to_raw_response_wrapper(
+ regions.list,
+ )
+
+
+class RegionsResourceWithStreamingResponse:
+ def __init__(self, regions: RegionsResource) -> None:
+ self._regions = regions
+
+ self.list = to_streamed_response_wrapper(
+ regions.list,
+ )
+
+
+class AsyncRegionsResourceWithStreamingResponse:
+ def __init__(self, regions: AsyncRegionsResource) -> None:
+ self._regions = regions
+
+ self.list = async_to_streamed_response_wrapper(
+ regions.list,
+ )
diff --git a/src/do_gradientai/resources/regions/__init__.py b/src/do_gradientai/resources/regions/__init__.py
deleted file mode 100644
index 51a96d61..00000000
--- a/src/do_gradientai/resources/regions/__init__.py
+++ /dev/null
@@ -1,61 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .regions import (
- RegionsResource,
- AsyncRegionsResource,
- RegionsResourceWithRawResponse,
- AsyncRegionsResourceWithRawResponse,
- RegionsResourceWithStreamingResponse,
- AsyncRegionsResourceWithStreamingResponse,
-)
-from .evaluation_runs import (
- EvaluationRunsResource,
- AsyncEvaluationRunsResource,
- EvaluationRunsResourceWithRawResponse,
- AsyncEvaluationRunsResourceWithRawResponse,
- EvaluationRunsResourceWithStreamingResponse,
- AsyncEvaluationRunsResourceWithStreamingResponse,
-)
-from .evaluation_datasets import (
- EvaluationDatasetsResource,
- AsyncEvaluationDatasetsResource,
- EvaluationDatasetsResourceWithRawResponse,
- AsyncEvaluationDatasetsResourceWithRawResponse,
- EvaluationDatasetsResourceWithStreamingResponse,
- AsyncEvaluationDatasetsResourceWithStreamingResponse,
-)
-from .evaluation_test_cases import (
- EvaluationTestCasesResource,
- AsyncEvaluationTestCasesResource,
- EvaluationTestCasesResourceWithRawResponse,
- AsyncEvaluationTestCasesResourceWithRawResponse,
- EvaluationTestCasesResourceWithStreamingResponse,
- AsyncEvaluationTestCasesResourceWithStreamingResponse,
-)
-
-__all__ = [
- "EvaluationRunsResource",
- "AsyncEvaluationRunsResource",
- "EvaluationRunsResourceWithRawResponse",
- "AsyncEvaluationRunsResourceWithRawResponse",
- "EvaluationRunsResourceWithStreamingResponse",
- "AsyncEvaluationRunsResourceWithStreamingResponse",
- "EvaluationTestCasesResource",
- "AsyncEvaluationTestCasesResource",
- "EvaluationTestCasesResourceWithRawResponse",
- "AsyncEvaluationTestCasesResourceWithRawResponse",
- "EvaluationTestCasesResourceWithStreamingResponse",
- "AsyncEvaluationTestCasesResourceWithStreamingResponse",
- "EvaluationDatasetsResource",
- "AsyncEvaluationDatasetsResource",
- "EvaluationDatasetsResourceWithRawResponse",
- "AsyncEvaluationDatasetsResourceWithRawResponse",
- "EvaluationDatasetsResourceWithStreamingResponse",
- "AsyncEvaluationDatasetsResourceWithStreamingResponse",
- "RegionsResource",
- "AsyncRegionsResource",
- "RegionsResourceWithRawResponse",
- "AsyncRegionsResourceWithRawResponse",
- "RegionsResourceWithStreamingResponse",
- "AsyncRegionsResourceWithStreamingResponse",
-]
diff --git a/src/do_gradientai/resources/regions/evaluation_runs/__init__.py b/src/do_gradientai/resources/regions/evaluation_runs/__init__.py
deleted file mode 100644
index e5580dd0..00000000
--- a/src/do_gradientai/resources/regions/evaluation_runs/__init__.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .results import (
- ResultsResource,
- AsyncResultsResource,
- ResultsResourceWithRawResponse,
- AsyncResultsResourceWithRawResponse,
- ResultsResourceWithStreamingResponse,
- AsyncResultsResourceWithStreamingResponse,
-)
-from .evaluation_runs import (
- EvaluationRunsResource,
- AsyncEvaluationRunsResource,
- EvaluationRunsResourceWithRawResponse,
- AsyncEvaluationRunsResourceWithRawResponse,
- EvaluationRunsResourceWithStreamingResponse,
- AsyncEvaluationRunsResourceWithStreamingResponse,
-)
-
-__all__ = [
- "ResultsResource",
- "AsyncResultsResource",
- "ResultsResourceWithRawResponse",
- "AsyncResultsResourceWithRawResponse",
- "ResultsResourceWithStreamingResponse",
- "AsyncResultsResourceWithStreamingResponse",
- "EvaluationRunsResource",
- "AsyncEvaluationRunsResource",
- "EvaluationRunsResourceWithRawResponse",
- "AsyncEvaluationRunsResourceWithRawResponse",
- "EvaluationRunsResourceWithStreamingResponse",
- "AsyncEvaluationRunsResourceWithStreamingResponse",
-]
diff --git a/src/do_gradientai/resources/regions/evaluation_runs/results.py b/src/do_gradientai/resources/regions/evaluation_runs/results.py
deleted file mode 100644
index ad74a778..00000000
--- a/src/do_gradientai/resources/regions/evaluation_runs/results.py
+++ /dev/null
@@ -1,264 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import httpx
-
-from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ...._compat import cached_property
-from ...._resource import SyncAPIResource, AsyncAPIResource
-from ...._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ...._base_client import make_request_options
-from ....types.regions.evaluation_runs.result_retrieve_response import ResultRetrieveResponse
-from ....types.regions.evaluation_runs.result_retrieve_prompt_response import ResultRetrievePromptResponse
-
-__all__ = ["ResultsResource", "AsyncResultsResource"]
-
-
-class ResultsResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> ResultsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
- """
- return ResultsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> ResultsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
- """
- return ResultsResourceWithStreamingResponse(self)
-
- def retrieve(
- self,
- evaluation_run_uuid: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ResultRetrieveResponse:
- """
- To retrieve results of an evaluation run, send a GET request to
- `/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results`.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not evaluation_run_uuid:
- raise ValueError(
- f"Expected a non-empty value for `evaluation_run_uuid` but received {evaluation_run_uuid!r}"
- )
- return self._get(
- f"/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results"
- if self._client._base_url_overridden
- else f"https://api.digitalocean.com/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ResultRetrieveResponse,
- )
-
- def retrieve_prompt(
- self,
- prompt_id: int,
- *,
- evaluation_run_uuid: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ResultRetrievePromptResponse:
- """
- To retrieve results of an evaluation run, send a GET request to
- `/v2/genai/evaluation_runs/{evaluation_run_uuid}/results/{prompt_id}`.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not evaluation_run_uuid:
- raise ValueError(
- f"Expected a non-empty value for `evaluation_run_uuid` but received {evaluation_run_uuid!r}"
- )
- return self._get(
- f"/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results/{prompt_id}"
- if self._client._base_url_overridden
- else f"https://api.digitalocean.com/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results/{prompt_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ResultRetrievePromptResponse,
- )
-
-
-class AsyncResultsResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncResultsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
- """
- return AsyncResultsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncResultsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
- """
- return AsyncResultsResourceWithStreamingResponse(self)
-
- async def retrieve(
- self,
- evaluation_run_uuid: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ResultRetrieveResponse:
- """
- To retrieve results of an evaluation run, send a GET request to
- `/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results`.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not evaluation_run_uuid:
- raise ValueError(
- f"Expected a non-empty value for `evaluation_run_uuid` but received {evaluation_run_uuid!r}"
- )
- return await self._get(
- f"/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results"
- if self._client._base_url_overridden
- else f"https://api.digitalocean.com/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ResultRetrieveResponse,
- )
-
- async def retrieve_prompt(
- self,
- prompt_id: int,
- *,
- evaluation_run_uuid: str,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ResultRetrievePromptResponse:
- """
- To retrieve results of an evaluation run, send a GET request to
- `/v2/genai/evaluation_runs/{evaluation_run_uuid}/results/{prompt_id}`.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not evaluation_run_uuid:
- raise ValueError(
- f"Expected a non-empty value for `evaluation_run_uuid` but received {evaluation_run_uuid!r}"
- )
- return await self._get(
- f"/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results/{prompt_id}"
- if self._client._base_url_overridden
- else f"https://api.digitalocean.com/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results/{prompt_id}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ResultRetrievePromptResponse,
- )
-
-
-class ResultsResourceWithRawResponse:
- def __init__(self, results: ResultsResource) -> None:
- self._results = results
-
- self.retrieve = to_raw_response_wrapper(
- results.retrieve,
- )
- self.retrieve_prompt = to_raw_response_wrapper(
- results.retrieve_prompt,
- )
-
-
-class AsyncResultsResourceWithRawResponse:
- def __init__(self, results: AsyncResultsResource) -> None:
- self._results = results
-
- self.retrieve = async_to_raw_response_wrapper(
- results.retrieve,
- )
- self.retrieve_prompt = async_to_raw_response_wrapper(
- results.retrieve_prompt,
- )
-
-
-class ResultsResourceWithStreamingResponse:
- def __init__(self, results: ResultsResource) -> None:
- self._results = results
-
- self.retrieve = to_streamed_response_wrapper(
- results.retrieve,
- )
- self.retrieve_prompt = to_streamed_response_wrapper(
- results.retrieve_prompt,
- )
-
-
-class AsyncResultsResourceWithStreamingResponse:
- def __init__(self, results: AsyncResultsResource) -> None:
- self._results = results
-
- self.retrieve = async_to_streamed_response_wrapper(
- results.retrieve,
- )
- self.retrieve_prompt = async_to_streamed_response_wrapper(
- results.retrieve_prompt,
- )
diff --git a/src/do_gradientai/resources/regions/regions.py b/src/do_gradientai/resources/regions/regions.py
deleted file mode 100644
index 5f74b2e8..00000000
--- a/src/do_gradientai/resources/regions/regions.py
+++ /dev/null
@@ -1,352 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import httpx
-
-from ...types import region_list_params
-from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ..._utils import maybe_transform, async_maybe_transform
-from ..._compat import cached_property
-from ..._resource import SyncAPIResource, AsyncAPIResource
-from ..._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ..._base_client import make_request_options
-from .evaluation_datasets import (
- EvaluationDatasetsResource,
- AsyncEvaluationDatasetsResource,
- EvaluationDatasetsResourceWithRawResponse,
- AsyncEvaluationDatasetsResourceWithRawResponse,
- EvaluationDatasetsResourceWithStreamingResponse,
- AsyncEvaluationDatasetsResourceWithStreamingResponse,
-)
-from .evaluation_test_cases import (
- EvaluationTestCasesResource,
- AsyncEvaluationTestCasesResource,
- EvaluationTestCasesResourceWithRawResponse,
- AsyncEvaluationTestCasesResourceWithRawResponse,
- EvaluationTestCasesResourceWithStreamingResponse,
- AsyncEvaluationTestCasesResourceWithStreamingResponse,
-)
-from ...types.region_list_response import RegionListResponse
-from .evaluation_runs.evaluation_runs import (
- EvaluationRunsResource,
- AsyncEvaluationRunsResource,
- EvaluationRunsResourceWithRawResponse,
- AsyncEvaluationRunsResourceWithRawResponse,
- EvaluationRunsResourceWithStreamingResponse,
- AsyncEvaluationRunsResourceWithStreamingResponse,
-)
-from ...types.region_list_evaluation_metrics_response import RegionListEvaluationMetricsResponse
-
-__all__ = ["RegionsResource", "AsyncRegionsResource"]
-
-
-class RegionsResource(SyncAPIResource):
- @cached_property
- def evaluation_runs(self) -> EvaluationRunsResource:
- return EvaluationRunsResource(self._client)
-
- @cached_property
- def evaluation_test_cases(self) -> EvaluationTestCasesResource:
- return EvaluationTestCasesResource(self._client)
-
- @cached_property
- def evaluation_datasets(self) -> EvaluationDatasetsResource:
- return EvaluationDatasetsResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> RegionsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
- """
- return RegionsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> RegionsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
- """
- return RegionsResourceWithStreamingResponse(self)
-
- def list(
- self,
- *,
- serves_batch: bool | NotGiven = NOT_GIVEN,
- serves_inference: bool | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RegionListResponse:
- """
- To list all datacenter regions, send a GET request to `/v2/gen-ai/regions`.
-
- Args:
- serves_batch: include datacenters that are capable of running batch jobs.
-
- serves_inference: include datacenters that serve inference.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/v2/gen-ai/regions"
- if self._client._base_url_overridden
- else "https://api.digitalocean.com/v2/gen-ai/regions",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "serves_batch": serves_batch,
- "serves_inference": serves_inference,
- },
- region_list_params.RegionListParams,
- ),
- ),
- cast_to=RegionListResponse,
- )
-
- def list_evaluation_metrics(
- self,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RegionListEvaluationMetricsResponse:
- """
- To list all evaluation metrics, send a GET request to
- `/v2/gen-ai/evaluation_metrics`.
- """
- return self._get(
- "/v2/gen-ai/evaluation_metrics"
- if self._client._base_url_overridden
- else "https://api.digitalocean.com/v2/gen-ai/evaluation_metrics",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=RegionListEvaluationMetricsResponse,
- )
-
-
-class AsyncRegionsResource(AsyncAPIResource):
- @cached_property
- def evaluation_runs(self) -> AsyncEvaluationRunsResource:
- return AsyncEvaluationRunsResource(self._client)
-
- @cached_property
- def evaluation_test_cases(self) -> AsyncEvaluationTestCasesResource:
- return AsyncEvaluationTestCasesResource(self._client)
-
- @cached_property
- def evaluation_datasets(self) -> AsyncEvaluationDatasetsResource:
- return AsyncEvaluationDatasetsResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> AsyncRegionsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
- """
- return AsyncRegionsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncRegionsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
- """
- return AsyncRegionsResourceWithStreamingResponse(self)
-
- async def list(
- self,
- *,
- serves_batch: bool | NotGiven = NOT_GIVEN,
- serves_inference: bool | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RegionListResponse:
- """
- To list all datacenter regions, send a GET request to `/v2/gen-ai/regions`.
-
- Args:
- serves_batch: include datacenters that are capable of running batch jobs.
-
- serves_inference: include datacenters that serve inference.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/v2/gen-ai/regions"
- if self._client._base_url_overridden
- else "https://api.digitalocean.com/v2/gen-ai/regions",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "serves_batch": serves_batch,
- "serves_inference": serves_inference,
- },
- region_list_params.RegionListParams,
- ),
- ),
- cast_to=RegionListResponse,
- )
-
- async def list_evaluation_metrics(
- self,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> RegionListEvaluationMetricsResponse:
- """
- To list all evaluation metrics, send a GET request to
- `/v2/gen-ai/evaluation_metrics`.
- """
- return await self._get(
- "/v2/gen-ai/evaluation_metrics"
- if self._client._base_url_overridden
- else "https://api.digitalocean.com/v2/gen-ai/evaluation_metrics",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=RegionListEvaluationMetricsResponse,
- )
-
-
-class RegionsResourceWithRawResponse:
- def __init__(self, regions: RegionsResource) -> None:
- self._regions = regions
-
- self.list = to_raw_response_wrapper(
- regions.list,
- )
- self.list_evaluation_metrics = to_raw_response_wrapper(
- regions.list_evaluation_metrics,
- )
-
- @cached_property
- def evaluation_runs(self) -> EvaluationRunsResourceWithRawResponse:
- return EvaluationRunsResourceWithRawResponse(self._regions.evaluation_runs)
-
- @cached_property
- def evaluation_test_cases(self) -> EvaluationTestCasesResourceWithRawResponse:
- return EvaluationTestCasesResourceWithRawResponse(self._regions.evaluation_test_cases)
-
- @cached_property
- def evaluation_datasets(self) -> EvaluationDatasetsResourceWithRawResponse:
- return EvaluationDatasetsResourceWithRawResponse(self._regions.evaluation_datasets)
-
-
-class AsyncRegionsResourceWithRawResponse:
- def __init__(self, regions: AsyncRegionsResource) -> None:
- self._regions = regions
-
- self.list = async_to_raw_response_wrapper(
- regions.list,
- )
- self.list_evaluation_metrics = async_to_raw_response_wrapper(
- regions.list_evaluation_metrics,
- )
-
- @cached_property
- def evaluation_runs(self) -> AsyncEvaluationRunsResourceWithRawResponse:
- return AsyncEvaluationRunsResourceWithRawResponse(self._regions.evaluation_runs)
-
- @cached_property
- def evaluation_test_cases(self) -> AsyncEvaluationTestCasesResourceWithRawResponse:
- return AsyncEvaluationTestCasesResourceWithRawResponse(self._regions.evaluation_test_cases)
-
- @cached_property
- def evaluation_datasets(self) -> AsyncEvaluationDatasetsResourceWithRawResponse:
- return AsyncEvaluationDatasetsResourceWithRawResponse(self._regions.evaluation_datasets)
-
-
-class RegionsResourceWithStreamingResponse:
- def __init__(self, regions: RegionsResource) -> None:
- self._regions = regions
-
- self.list = to_streamed_response_wrapper(
- regions.list,
- )
- self.list_evaluation_metrics = to_streamed_response_wrapper(
- regions.list_evaluation_metrics,
- )
-
- @cached_property
- def evaluation_runs(self) -> EvaluationRunsResourceWithStreamingResponse:
- return EvaluationRunsResourceWithStreamingResponse(self._regions.evaluation_runs)
-
- @cached_property
- def evaluation_test_cases(self) -> EvaluationTestCasesResourceWithStreamingResponse:
- return EvaluationTestCasesResourceWithStreamingResponse(self._regions.evaluation_test_cases)
-
- @cached_property
- def evaluation_datasets(self) -> EvaluationDatasetsResourceWithStreamingResponse:
- return EvaluationDatasetsResourceWithStreamingResponse(self._regions.evaluation_datasets)
-
-
-class AsyncRegionsResourceWithStreamingResponse:
- def __init__(self, regions: AsyncRegionsResource) -> None:
- self._regions = regions
-
- self.list = async_to_streamed_response_wrapper(
- regions.list,
- )
- self.list_evaluation_metrics = async_to_streamed_response_wrapper(
- regions.list_evaluation_metrics,
- )
-
- @cached_property
- def evaluation_runs(self) -> AsyncEvaluationRunsResourceWithStreamingResponse:
- return AsyncEvaluationRunsResourceWithStreamingResponse(self._regions.evaluation_runs)
-
- @cached_property
- def evaluation_test_cases(self) -> AsyncEvaluationTestCasesResourceWithStreamingResponse:
- return AsyncEvaluationTestCasesResourceWithStreamingResponse(self._regions.evaluation_test_cases)
-
- @cached_property
- def evaluation_datasets(self) -> AsyncEvaluationDatasetsResourceWithStreamingResponse:
- return AsyncEvaluationDatasetsResourceWithStreamingResponse(self._regions.evaluation_datasets)
diff --git a/src/do_gradientai/types/__init__.py b/src/do_gradientai/types/__init__.py
index d09aaa2a..e3c2ab9c 100644
--- a/src/do_gradientai/types/__init__.py
+++ b/src/do_gradientai/types/__init__.py
@@ -22,7 +22,6 @@
from .agent_create_response import AgentCreateResponse as AgentCreateResponse
from .agent_delete_response import AgentDeleteResponse as AgentDeleteResponse
from .agent_update_response import AgentUpdateResponse as AgentUpdateResponse
-from .api_evaluation_metric import APIEvaluationMetric as APIEvaluationMetric
from .api_agent_api_key_info import APIAgentAPIKeyInfo as APIAgentAPIKeyInfo
from .agent_retrieve_response import AgentRetrieveResponse as AgentRetrieveResponse
from .api_openai_api_key_info import APIOpenAIAPIKeyInfo as APIOpenAIAPIKeyInfo
@@ -45,9 +44,6 @@
from .knowledge_base_retrieve_response import KnowledgeBaseRetrieveResponse as KnowledgeBaseRetrieveResponse
from .indexing_job_update_cancel_params import IndexingJobUpdateCancelParams as IndexingJobUpdateCancelParams
from .indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse as IndexingJobUpdateCancelResponse
-from .region_list_evaluation_metrics_response import (
- RegionListEvaluationMetricsResponse as RegionListEvaluationMetricsResponse,
-)
from .indexing_job_retrieve_data_sources_response import (
IndexingJobRetrieveDataSourcesResponse as IndexingJobRetrieveDataSourcesResponse,
)
diff --git a/src/do_gradientai/types/agents/__init__.py b/src/do_gradientai/types/agents/__init__.py
index aae0ee6b..7e100741 100644
--- a/src/do_gradientai/types/agents/__init__.py
+++ b/src/do_gradientai/types/agents/__init__.py
@@ -4,11 +4,16 @@
from .api_meta import APIMeta as APIMeta
from .api_links import APILinks as APILinks
+from .api_star_metric import APIStarMetric as APIStarMetric
+from .api_evaluation_run import APIEvaluationRun as APIEvaluationRun
from .api_key_list_params import APIKeyListParams as APIKeyListParams
from .version_list_params import VersionListParams as VersionListParams
+from .api_evaluation_metric import APIEvaluationMetric as APIEvaluationMetric
+from .api_evaluation_prompt import APIEvaluationPrompt as APIEvaluationPrompt
from .api_key_create_params import APIKeyCreateParams as APIKeyCreateParams
from .api_key_list_response import APIKeyListResponse as APIKeyListResponse
from .api_key_update_params import APIKeyUpdateParams as APIKeyUpdateParams
+from .api_star_metric_param import APIStarMetricParam as APIStarMetricParam
from .version_list_response import VersionListResponse as VersionListResponse
from .version_update_params import VersionUpdateParams as VersionUpdateParams
from .child_agent_add_params import ChildAgentAddParams as ChildAgentAddParams
@@ -18,6 +23,7 @@
from .api_key_delete_response import APIKeyDeleteResponse as APIKeyDeleteResponse
from .api_key_update_response import APIKeyUpdateResponse as APIKeyUpdateResponse
from .version_update_response import VersionUpdateResponse as VersionUpdateResponse
+from .api_evaluation_test_case import APIEvaluationTestCase as APIEvaluationTestCase
from .child_agent_add_response import ChildAgentAddResponse as ChildAgentAddResponse
from .function_create_response import FunctionCreateResponse as FunctionCreateResponse
from .function_delete_response import FunctionDeleteResponse as FunctionDeleteResponse
@@ -27,5 +33,33 @@
from .api_key_regenerate_response import APIKeyRegenerateResponse as APIKeyRegenerateResponse
from .child_agent_delete_response import ChildAgentDeleteResponse as ChildAgentDeleteResponse
from .child_agent_update_response import ChildAgentUpdateResponse as ChildAgentUpdateResponse
+from .api_evaluation_metric_result import APIEvaluationMetricResult as APIEvaluationMetricResult
+from .evaluation_run_create_params import EvaluationRunCreateParams as EvaluationRunCreateParams
from .api_link_knowledge_base_output import APILinkKnowledgeBaseOutput as APILinkKnowledgeBaseOutput
+from .evaluation_run_create_response import EvaluationRunCreateResponse as EvaluationRunCreateResponse
from .knowledge_base_detach_response import KnowledgeBaseDetachResponse as KnowledgeBaseDetachResponse
+from .evaluation_metric_list_response import EvaluationMetricListResponse as EvaluationMetricListResponse
+from .evaluation_dataset_create_params import EvaluationDatasetCreateParams as EvaluationDatasetCreateParams
+from .evaluation_run_retrieve_response import EvaluationRunRetrieveResponse as EvaluationRunRetrieveResponse
+from .evaluation_dataset_create_response import EvaluationDatasetCreateResponse as EvaluationDatasetCreateResponse
+from .evaluation_test_case_create_params import EvaluationTestCaseCreateParams as EvaluationTestCaseCreateParams
+from .evaluation_test_case_list_response import EvaluationTestCaseListResponse as EvaluationTestCaseListResponse
+from .evaluation_test_case_update_params import EvaluationTestCaseUpdateParams as EvaluationTestCaseUpdateParams
+from .evaluation_run_list_results_response import EvaluationRunListResultsResponse as EvaluationRunListResultsResponse
+from .evaluation_test_case_create_response import EvaluationTestCaseCreateResponse as EvaluationTestCaseCreateResponse
+from .evaluation_test_case_update_response import EvaluationTestCaseUpdateResponse as EvaluationTestCaseUpdateResponse
+from .evaluation_test_case_retrieve_response import (
+ EvaluationTestCaseRetrieveResponse as EvaluationTestCaseRetrieveResponse,
+)
+from .evaluation_test_case_list_evaluation_runs_params import (
+ EvaluationTestCaseListEvaluationRunsParams as EvaluationTestCaseListEvaluationRunsParams,
+)
+from .evaluation_test_case_list_evaluation_runs_response import (
+ EvaluationTestCaseListEvaluationRunsResponse as EvaluationTestCaseListEvaluationRunsResponse,
+)
+from .evaluation_dataset_create_file_upload_presigned_urls_params import (
+ EvaluationDatasetCreateFileUploadPresignedURLsParams as EvaluationDatasetCreateFileUploadPresignedURLsParams,
+)
+from .evaluation_dataset_create_file_upload_presigned_urls_response import (
+ EvaluationDatasetCreateFileUploadPresignedURLsResponse as EvaluationDatasetCreateFileUploadPresignedURLsResponse,
+)
diff --git a/src/do_gradientai/types/api_evaluation_metric.py b/src/do_gradientai/types/agents/api_evaluation_metric.py
similarity index 95%
rename from src/do_gradientai/types/api_evaluation_metric.py
rename to src/do_gradientai/types/agents/api_evaluation_metric.py
index 05390297..1aa85306 100644
--- a/src/do_gradientai/types/api_evaluation_metric.py
+++ b/src/do_gradientai/types/agents/api_evaluation_metric.py
@@ -3,7 +3,7 @@
from typing import Optional
from typing_extensions import Literal
-from .._models import BaseModel
+from ..._models import BaseModel
__all__ = ["APIEvaluationMetric"]
diff --git a/src/do_gradientai/types/regions/evaluation_runs/api_evaluation_metric_result.py b/src/do_gradientai/types/agents/api_evaluation_metric_result.py
similarity index 92%
rename from src/do_gradientai/types/regions/evaluation_runs/api_evaluation_metric_result.py
rename to src/do_gradientai/types/agents/api_evaluation_metric_result.py
index cb50fd80..35146c00 100644
--- a/src/do_gradientai/types/regions/evaluation_runs/api_evaluation_metric_result.py
+++ b/src/do_gradientai/types/agents/api_evaluation_metric_result.py
@@ -2,7 +2,7 @@
from typing import Optional
-from ...._models import BaseModel
+from ..._models import BaseModel
__all__ = ["APIEvaluationMetricResult"]
diff --git a/src/do_gradientai/types/regions/evaluation_runs/api_prompt.py b/src/do_gradientai/types/agents/api_evaluation_prompt.py
similarity index 90%
rename from src/do_gradientai/types/regions/evaluation_runs/api_prompt.py
rename to src/do_gradientai/types/agents/api_evaluation_prompt.py
index fb5a51f4..750e62fb 100644
--- a/src/do_gradientai/types/regions/evaluation_runs/api_prompt.py
+++ b/src/do_gradientai/types/agents/api_evaluation_prompt.py
@@ -2,10 +2,10 @@
from typing import List, Optional
-from ...._models import BaseModel
+from ..._models import BaseModel
from .api_evaluation_metric_result import APIEvaluationMetricResult
-__all__ = ["APIPrompt", "PromptChunk"]
+__all__ = ["APIEvaluationPrompt", "PromptChunk"]
class PromptChunk(BaseModel):
@@ -25,7 +25,7 @@ class PromptChunk(BaseModel):
"""Text content of the chunk."""
-class APIPrompt(BaseModel):
+class APIEvaluationPrompt(BaseModel):
ground_truth: Optional[str] = None
"""The ground truth for the prompt."""
diff --git a/src/do_gradientai/types/regions/evaluation_runs/api_evaluation_run.py b/src/do_gradientai/types/agents/api_evaluation_run.py
similarity index 97%
rename from src/do_gradientai/types/regions/evaluation_runs/api_evaluation_run.py
rename to src/do_gradientai/types/agents/api_evaluation_run.py
index 7822f53c..ae046d3e 100644
--- a/src/do_gradientai/types/regions/evaluation_runs/api_evaluation_run.py
+++ b/src/do_gradientai/types/agents/api_evaluation_run.py
@@ -4,7 +4,7 @@
from datetime import datetime
from typing_extensions import Literal
-from ...._models import BaseModel
+from ..._models import BaseModel
from .api_evaluation_metric_result import APIEvaluationMetricResult
__all__ = ["APIEvaluationRun"]
diff --git a/src/do_gradientai/types/regions/api_evaluation_test_case.py b/src/do_gradientai/types/agents/api_evaluation_test_case.py
similarity index 94%
rename from src/do_gradientai/types/regions/api_evaluation_test_case.py
rename to src/do_gradientai/types/agents/api_evaluation_test_case.py
index d799b0e0..09ce5e48 100644
--- a/src/do_gradientai/types/regions/api_evaluation_test_case.py
+++ b/src/do_gradientai/types/agents/api_evaluation_test_case.py
@@ -5,7 +5,7 @@
from ..._models import BaseModel
from .api_star_metric import APIStarMetric
-from ..api_evaluation_metric import APIEvaluationMetric
+from .api_evaluation_metric import APIEvaluationMetric
__all__ = ["APIEvaluationTestCase"]
diff --git a/src/do_gradientai/types/regions/api_star_metric.py b/src/do_gradientai/types/agents/api_star_metric.py
similarity index 100%
rename from src/do_gradientai/types/regions/api_star_metric.py
rename to src/do_gradientai/types/agents/api_star_metric.py
diff --git a/src/do_gradientai/types/regions/api_star_metric_param.py b/src/do_gradientai/types/agents/api_star_metric_param.py
similarity index 100%
rename from src/do_gradientai/types/regions/api_star_metric_param.py
rename to src/do_gradientai/types/agents/api_star_metric_param.py
diff --git a/src/do_gradientai/types/regions/evaluation_dataset_create_file_upload_presigned_urls_params.py b/src/do_gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py
similarity index 100%
rename from src/do_gradientai/types/regions/evaluation_dataset_create_file_upload_presigned_urls_params.py
rename to src/do_gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py
diff --git a/src/do_gradientai/types/regions/evaluation_dataset_create_file_upload_presigned_urls_response.py b/src/do_gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_response.py
similarity index 100%
rename from src/do_gradientai/types/regions/evaluation_dataset_create_file_upload_presigned_urls_response.py
rename to src/do_gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_response.py
diff --git a/src/do_gradientai/types/regions/evaluation_dataset_create_params.py b/src/do_gradientai/types/agents/evaluation_dataset_create_params.py
similarity index 100%
rename from src/do_gradientai/types/regions/evaluation_dataset_create_params.py
rename to src/do_gradientai/types/agents/evaluation_dataset_create_params.py
diff --git a/src/do_gradientai/types/regions/evaluation_dataset_create_response.py b/src/do_gradientai/types/agents/evaluation_dataset_create_response.py
similarity index 100%
rename from src/do_gradientai/types/regions/evaluation_dataset_create_response.py
rename to src/do_gradientai/types/agents/evaluation_dataset_create_response.py
diff --git a/src/do_gradientai/types/region_list_evaluation_metrics_response.py b/src/do_gradientai/types/agents/evaluation_metric_list_response.py
similarity index 63%
rename from src/do_gradientai/types/region_list_evaluation_metrics_response.py
rename to src/do_gradientai/types/agents/evaluation_metric_list_response.py
index c57b71d1..0708f1ba 100644
--- a/src/do_gradientai/types/region_list_evaluation_metrics_response.py
+++ b/src/do_gradientai/types/agents/evaluation_metric_list_response.py
@@ -2,11 +2,11 @@
from typing import List, Optional
-from .._models import BaseModel
+from ..._models import BaseModel
from .api_evaluation_metric import APIEvaluationMetric
-__all__ = ["RegionListEvaluationMetricsResponse"]
+__all__ = ["EvaluationMetricListResponse"]
-class RegionListEvaluationMetricsResponse(BaseModel):
+class EvaluationMetricListResponse(BaseModel):
metrics: Optional[List[APIEvaluationMetric]] = None
diff --git a/src/do_gradientai/types/regions/evaluation_run_create_params.py b/src/do_gradientai/types/agents/evaluation_run_create_params.py
similarity index 100%
rename from src/do_gradientai/types/regions/evaluation_run_create_params.py
rename to src/do_gradientai/types/agents/evaluation_run_create_params.py
diff --git a/src/do_gradientai/types/regions/evaluation_run_create_response.py b/src/do_gradientai/types/agents/evaluation_run_create_response.py
similarity index 100%
rename from src/do_gradientai/types/regions/evaluation_run_create_response.py
rename to src/do_gradientai/types/agents/evaluation_run_create_response.py
diff --git a/src/do_gradientai/types/regions/evaluation_runs/result_retrieve_response.py b/src/do_gradientai/types/agents/evaluation_run_list_results_response.py
similarity index 52%
rename from src/do_gradientai/types/regions/evaluation_runs/result_retrieve_response.py
rename to src/do_gradientai/types/agents/evaluation_run_list_results_response.py
index 27256353..f0a9882b 100644
--- a/src/do_gradientai/types/regions/evaluation_runs/result_retrieve_response.py
+++ b/src/do_gradientai/types/agents/evaluation_run_list_results_response.py
@@ -2,15 +2,15 @@
from typing import List, Optional
-from ...._models import BaseModel
-from .api_prompt import APIPrompt
+from ..._models import BaseModel
from .api_evaluation_run import APIEvaluationRun
+from .api_evaluation_prompt import APIEvaluationPrompt
-__all__ = ["ResultRetrieveResponse"]
+__all__ = ["EvaluationRunListResultsResponse"]
-class ResultRetrieveResponse(BaseModel):
+class EvaluationRunListResultsResponse(BaseModel):
evaluation_run: Optional[APIEvaluationRun] = None
- prompts: Optional[List[APIPrompt]] = None
+ prompts: Optional[List[APIEvaluationPrompt]] = None
"""The prompt level results."""
diff --git a/src/do_gradientai/types/regions/evaluation_run_retrieve_response.py b/src/do_gradientai/types/agents/evaluation_run_retrieve_response.py
similarity index 82%
rename from src/do_gradientai/types/regions/evaluation_run_retrieve_response.py
rename to src/do_gradientai/types/agents/evaluation_run_retrieve_response.py
index 68d71978..cedba220 100644
--- a/src/do_gradientai/types/regions/evaluation_run_retrieve_response.py
+++ b/src/do_gradientai/types/agents/evaluation_run_retrieve_response.py
@@ -3,7 +3,7 @@
from typing import Optional
from ..._models import BaseModel
-from .evaluation_runs.api_evaluation_run import APIEvaluationRun
+from .api_evaluation_run import APIEvaluationRun
__all__ = ["EvaluationRunRetrieveResponse"]
diff --git a/src/do_gradientai/types/regions/evaluation_test_case_create_params.py b/src/do_gradientai/types/agents/evaluation_test_case_create_params.py
similarity index 100%
rename from src/do_gradientai/types/regions/evaluation_test_case_create_params.py
rename to src/do_gradientai/types/agents/evaluation_test_case_create_params.py
diff --git a/src/do_gradientai/types/regions/evaluation_test_case_create_response.py b/src/do_gradientai/types/agents/evaluation_test_case_create_response.py
similarity index 100%
rename from src/do_gradientai/types/regions/evaluation_test_case_create_response.py
rename to src/do_gradientai/types/agents/evaluation_test_case_create_response.py
diff --git a/src/do_gradientai/types/regions/evaluation_test_case_list_evaluation_runs_params.py b/src/do_gradientai/types/agents/evaluation_test_case_list_evaluation_runs_params.py
similarity index 100%
rename from src/do_gradientai/types/regions/evaluation_test_case_list_evaluation_runs_params.py
rename to src/do_gradientai/types/agents/evaluation_test_case_list_evaluation_runs_params.py
diff --git a/src/do_gradientai/types/regions/evaluation_test_case_list_evaluation_runs_response.py b/src/do_gradientai/types/agents/evaluation_test_case_list_evaluation_runs_response.py
similarity index 85%
rename from src/do_gradientai/types/regions/evaluation_test_case_list_evaluation_runs_response.py
rename to src/do_gradientai/types/agents/evaluation_test_case_list_evaluation_runs_response.py
index 4233d0ec..d9565e97 100644
--- a/src/do_gradientai/types/regions/evaluation_test_case_list_evaluation_runs_response.py
+++ b/src/do_gradientai/types/agents/evaluation_test_case_list_evaluation_runs_response.py
@@ -3,7 +3,7 @@
from typing import List, Optional
from ..._models import BaseModel
-from .evaluation_runs.api_evaluation_run import APIEvaluationRun
+from .api_evaluation_run import APIEvaluationRun
__all__ = ["EvaluationTestCaseListEvaluationRunsResponse"]
diff --git a/src/do_gradientai/types/regions/evaluation_test_case_list_response.py b/src/do_gradientai/types/agents/evaluation_test_case_list_response.py
similarity index 100%
rename from src/do_gradientai/types/regions/evaluation_test_case_list_response.py
rename to src/do_gradientai/types/agents/evaluation_test_case_list_response.py
diff --git a/src/do_gradientai/types/regions/evaluation_test_case_retrieve_response.py b/src/do_gradientai/types/agents/evaluation_test_case_retrieve_response.py
similarity index 100%
rename from src/do_gradientai/types/regions/evaluation_test_case_retrieve_response.py
rename to src/do_gradientai/types/agents/evaluation_test_case_retrieve_response.py
diff --git a/src/do_gradientai/types/regions/evaluation_test_case_update_params.py b/src/do_gradientai/types/agents/evaluation_test_case_update_params.py
similarity index 100%
rename from src/do_gradientai/types/regions/evaluation_test_case_update_params.py
rename to src/do_gradientai/types/agents/evaluation_test_case_update_params.py
diff --git a/src/do_gradientai/types/regions/evaluation_test_case_update_response.py b/src/do_gradientai/types/agents/evaluation_test_case_update_response.py
similarity index 100%
rename from src/do_gradientai/types/regions/evaluation_test_case_update_response.py
rename to src/do_gradientai/types/agents/evaluation_test_case_update_response.py
diff --git a/src/do_gradientai/types/api_workspace.py b/src/do_gradientai/types/api_workspace.py
index b170d504..83e59379 100644
--- a/src/do_gradientai/types/api_workspace.py
+++ b/src/do_gradientai/types/api_workspace.py
@@ -6,7 +6,7 @@
from datetime import datetime
from .._models import BaseModel
-from .regions.api_evaluation_test_case import APIEvaluationTestCase
+from .agents.api_evaluation_test_case import APIEvaluationTestCase
__all__ = ["APIWorkspace"]
diff --git a/src/do_gradientai/types/regions/__init__.py b/src/do_gradientai/types/regions/__init__.py
deleted file mode 100644
index 695ba3b4..00000000
--- a/src/do_gradientai/types/regions/__init__.py
+++ /dev/null
@@ -1,32 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from .api_star_metric import APIStarMetric as APIStarMetric
-from .api_star_metric_param import APIStarMetricParam as APIStarMetricParam
-from .api_evaluation_test_case import APIEvaluationTestCase as APIEvaluationTestCase
-from .evaluation_run_create_params import EvaluationRunCreateParams as EvaluationRunCreateParams
-from .evaluation_run_create_response import EvaluationRunCreateResponse as EvaluationRunCreateResponse
-from .evaluation_dataset_create_params import EvaluationDatasetCreateParams as EvaluationDatasetCreateParams
-from .evaluation_run_retrieve_response import EvaluationRunRetrieveResponse as EvaluationRunRetrieveResponse
-from .evaluation_dataset_create_response import EvaluationDatasetCreateResponse as EvaluationDatasetCreateResponse
-from .evaluation_test_case_create_params import EvaluationTestCaseCreateParams as EvaluationTestCaseCreateParams
-from .evaluation_test_case_list_response import EvaluationTestCaseListResponse as EvaluationTestCaseListResponse
-from .evaluation_test_case_update_params import EvaluationTestCaseUpdateParams as EvaluationTestCaseUpdateParams
-from .evaluation_test_case_create_response import EvaluationTestCaseCreateResponse as EvaluationTestCaseCreateResponse
-from .evaluation_test_case_update_response import EvaluationTestCaseUpdateResponse as EvaluationTestCaseUpdateResponse
-from .evaluation_test_case_retrieve_response import (
- EvaluationTestCaseRetrieveResponse as EvaluationTestCaseRetrieveResponse,
-)
-from .evaluation_test_case_list_evaluation_runs_params import (
- EvaluationTestCaseListEvaluationRunsParams as EvaluationTestCaseListEvaluationRunsParams,
-)
-from .evaluation_test_case_list_evaluation_runs_response import (
- EvaluationTestCaseListEvaluationRunsResponse as EvaluationTestCaseListEvaluationRunsResponse,
-)
-from .evaluation_dataset_create_file_upload_presigned_urls_params import (
- EvaluationDatasetCreateFileUploadPresignedURLsParams as EvaluationDatasetCreateFileUploadPresignedURLsParams,
-)
-from .evaluation_dataset_create_file_upload_presigned_urls_response import (
- EvaluationDatasetCreateFileUploadPresignedURLsResponse as EvaluationDatasetCreateFileUploadPresignedURLsResponse,
-)
diff --git a/src/do_gradientai/types/regions/evaluation_runs/__init__.py b/src/do_gradientai/types/regions/evaluation_runs/__init__.py
deleted file mode 100644
index 0ec4f2f6..00000000
--- a/src/do_gradientai/types/regions/evaluation_runs/__init__.py
+++ /dev/null
@@ -1,9 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from .api_prompt import APIPrompt as APIPrompt
-from .api_evaluation_run import APIEvaluationRun as APIEvaluationRun
-from .result_retrieve_response import ResultRetrieveResponse as ResultRetrieveResponse
-from .api_evaluation_metric_result import APIEvaluationMetricResult as APIEvaluationMetricResult
-from .result_retrieve_prompt_response import ResultRetrievePromptResponse as ResultRetrievePromptResponse
diff --git a/src/do_gradientai/types/regions/evaluation_runs/result_retrieve_prompt_response.py b/src/do_gradientai/types/regions/evaluation_runs/result_retrieve_prompt_response.py
deleted file mode 100644
index ebebee48..00000000
--- a/src/do_gradientai/types/regions/evaluation_runs/result_retrieve_prompt_response.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-
-from ...._models import BaseModel
-from .api_prompt import APIPrompt
-
-__all__ = ["ResultRetrievePromptResponse"]
-
-
-class ResultRetrievePromptResponse(BaseModel):
- prompt: Optional[APIPrompt] = None
diff --git a/tests/api_resources/regions/test_evaluation_datasets.py b/tests/api_resources/agents/test_evaluation_datasets.py
similarity index 82%
rename from tests/api_resources/regions/test_evaluation_datasets.py
rename to tests/api_resources/agents/test_evaluation_datasets.py
index 6e7a5e52..9e6dad52 100644
--- a/tests/api_resources/regions/test_evaluation_datasets.py
+++ b/tests/api_resources/agents/test_evaluation_datasets.py
@@ -9,7 +9,7 @@
from tests.utils import assert_matches_type
from do_gradientai import GradientAI, AsyncGradientAI
-from do_gradientai.types.regions import (
+from do_gradientai.types.agents import (
EvaluationDatasetCreateResponse,
EvaluationDatasetCreateFileUploadPresignedURLsResponse,
)
@@ -23,13 +23,13 @@ class TestEvaluationDatasets:
@pytest.mark.skip()
@parametrize
def test_method_create(self, client: GradientAI) -> None:
- evaluation_dataset = client.regions.evaluation_datasets.create()
+ evaluation_dataset = client.agents.evaluation_datasets.create()
assert_matches_type(EvaluationDatasetCreateResponse, evaluation_dataset, path=["response"])
@pytest.mark.skip()
@parametrize
def test_method_create_with_all_params(self, client: GradientAI) -> None:
- evaluation_dataset = client.regions.evaluation_datasets.create(
+ evaluation_dataset = client.agents.evaluation_datasets.create(
file_upload_dataset={
"original_file_name": "original_file_name",
"size_in_bytes": "size_in_bytes",
@@ -42,7 +42,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_raw_response_create(self, client: GradientAI) -> None:
- response = client.regions.evaluation_datasets.with_raw_response.create()
+ response = client.agents.evaluation_datasets.with_raw_response.create()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -52,7 +52,7 @@ def test_raw_response_create(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_streaming_response_create(self, client: GradientAI) -> None:
- with client.regions.evaluation_datasets.with_streaming_response.create() as response:
+ with client.agents.evaluation_datasets.with_streaming_response.create() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -64,7 +64,7 @@ def test_streaming_response_create(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_method_create_file_upload_presigned_urls(self, client: GradientAI) -> None:
- evaluation_dataset = client.regions.evaluation_datasets.create_file_upload_presigned_urls()
+ evaluation_dataset = client.agents.evaluation_datasets.create_file_upload_presigned_urls()
assert_matches_type(
EvaluationDatasetCreateFileUploadPresignedURLsResponse, evaluation_dataset, path=["response"]
)
@@ -72,7 +72,7 @@ def test_method_create_file_upload_presigned_urls(self, client: GradientAI) -> N
@pytest.mark.skip()
@parametrize
def test_method_create_file_upload_presigned_urls_with_all_params(self, client: GradientAI) -> None:
- evaluation_dataset = client.regions.evaluation_datasets.create_file_upload_presigned_urls(
+ evaluation_dataset = client.agents.evaluation_datasets.create_file_upload_presigned_urls(
files=[
{
"file_name": "file_name",
@@ -87,7 +87,7 @@ def test_method_create_file_upload_presigned_urls_with_all_params(self, client:
@pytest.mark.skip()
@parametrize
def test_raw_response_create_file_upload_presigned_urls(self, client: GradientAI) -> None:
- response = client.regions.evaluation_datasets.with_raw_response.create_file_upload_presigned_urls()
+ response = client.agents.evaluation_datasets.with_raw_response.create_file_upload_presigned_urls()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -99,7 +99,7 @@ def test_raw_response_create_file_upload_presigned_urls(self, client: GradientAI
@pytest.mark.skip()
@parametrize
def test_streaming_response_create_file_upload_presigned_urls(self, client: GradientAI) -> None:
- with client.regions.evaluation_datasets.with_streaming_response.create_file_upload_presigned_urls() as response:
+ with client.agents.evaluation_datasets.with_streaming_response.create_file_upload_presigned_urls() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -119,13 +119,13 @@ class TestAsyncEvaluationDatasets:
@pytest.mark.skip()
@parametrize
async def test_method_create(self, async_client: AsyncGradientAI) -> None:
- evaluation_dataset = await async_client.regions.evaluation_datasets.create()
+ evaluation_dataset = await async_client.agents.evaluation_datasets.create()
assert_matches_type(EvaluationDatasetCreateResponse, evaluation_dataset, path=["response"])
@pytest.mark.skip()
@parametrize
async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None:
- evaluation_dataset = await async_client.regions.evaluation_datasets.create(
+ evaluation_dataset = await async_client.agents.evaluation_datasets.create(
file_upload_dataset={
"original_file_name": "original_file_name",
"size_in_bytes": "size_in_bytes",
@@ -138,7 +138,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI
@pytest.mark.skip()
@parametrize
async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.regions.evaluation_datasets.with_raw_response.create()
+ response = await async_client.agents.evaluation_datasets.with_raw_response.create()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -148,7 +148,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None:
@pytest.mark.skip()
@parametrize
async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None:
- async with async_client.regions.evaluation_datasets.with_streaming_response.create() as response:
+ async with async_client.agents.evaluation_datasets.with_streaming_response.create() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -160,7 +160,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) ->
@pytest.mark.skip()
@parametrize
async def test_method_create_file_upload_presigned_urls(self, async_client: AsyncGradientAI) -> None:
- evaluation_dataset = await async_client.regions.evaluation_datasets.create_file_upload_presigned_urls()
+ evaluation_dataset = await async_client.agents.evaluation_datasets.create_file_upload_presigned_urls()
assert_matches_type(
EvaluationDatasetCreateFileUploadPresignedURLsResponse, evaluation_dataset, path=["response"]
)
@@ -170,7 +170,7 @@ async def test_method_create_file_upload_presigned_urls(self, async_client: Asyn
async def test_method_create_file_upload_presigned_urls_with_all_params(
self, async_client: AsyncGradientAI
) -> None:
- evaluation_dataset = await async_client.regions.evaluation_datasets.create_file_upload_presigned_urls(
+ evaluation_dataset = await async_client.agents.evaluation_datasets.create_file_upload_presigned_urls(
files=[
{
"file_name": "file_name",
@@ -185,7 +185,7 @@ async def test_method_create_file_upload_presigned_urls_with_all_params(
@pytest.mark.skip()
@parametrize
async def test_raw_response_create_file_upload_presigned_urls(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.regions.evaluation_datasets.with_raw_response.create_file_upload_presigned_urls()
+ response = await async_client.agents.evaluation_datasets.with_raw_response.create_file_upload_presigned_urls()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -198,7 +198,7 @@ async def test_raw_response_create_file_upload_presigned_urls(self, async_client
@parametrize
async def test_streaming_response_create_file_upload_presigned_urls(self, async_client: AsyncGradientAI) -> None:
async with (
- async_client.regions.evaluation_datasets.with_streaming_response.create_file_upload_presigned_urls()
+ async_client.agents.evaluation_datasets.with_streaming_response.create_file_upload_presigned_urls()
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
diff --git a/tests/api_resources/agents/test_evaluation_metrics.py b/tests/api_resources/agents/test_evaluation_metrics.py
new file mode 100644
index 00000000..82084f61
--- /dev/null
+++ b/tests/api_resources/agents/test_evaluation_metrics.py
@@ -0,0 +1,80 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from tests.utils import assert_matches_type
+from do_gradientai import GradientAI, AsyncGradientAI
+from do_gradientai.types.agents import EvaluationMetricListResponse
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestEvaluationMetrics:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_list(self, client: GradientAI) -> None:
+ evaluation_metric = client.agents.evaluation_metrics.list()
+ assert_matches_type(EvaluationMetricListResponse, evaluation_metric, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_list(self, client: GradientAI) -> None:
+ response = client.agents.evaluation_metrics.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ evaluation_metric = response.parse()
+ assert_matches_type(EvaluationMetricListResponse, evaluation_metric, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_list(self, client: GradientAI) -> None:
+ with client.agents.evaluation_metrics.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ evaluation_metric = response.parse()
+ assert_matches_type(EvaluationMetricListResponse, evaluation_metric, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+
+class TestAsyncEvaluationMetrics:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_list(self, async_client: AsyncGradientAI) -> None:
+ evaluation_metric = await async_client.agents.evaluation_metrics.list()
+ assert_matches_type(EvaluationMetricListResponse, evaluation_metric, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.agents.evaluation_metrics.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ evaluation_metric = await response.parse()
+ assert_matches_type(EvaluationMetricListResponse, evaluation_metric, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.agents.evaluation_metrics.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ evaluation_metric = await response.parse()
+ assert_matches_type(EvaluationMetricListResponse, evaluation_metric, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/regions/test_evaluation_runs.py b/tests/api_resources/agents/test_evaluation_runs.py
similarity index 56%
rename from tests/api_resources/regions/test_evaluation_runs.py
rename to tests/api_resources/agents/test_evaluation_runs.py
index 09bf8525..721be2a0 100644
--- a/tests/api_resources/regions/test_evaluation_runs.py
+++ b/tests/api_resources/agents/test_evaluation_runs.py
@@ -9,9 +9,10 @@
from tests.utils import assert_matches_type
from do_gradientai import GradientAI, AsyncGradientAI
-from do_gradientai.types.regions import (
+from do_gradientai.types.agents import (
EvaluationRunCreateResponse,
EvaluationRunRetrieveResponse,
+ EvaluationRunListResultsResponse,
)
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -23,13 +24,13 @@ class TestEvaluationRuns:
@pytest.mark.skip()
@parametrize
def test_method_create(self, client: GradientAI) -> None:
- evaluation_run = client.regions.evaluation_runs.create()
+ evaluation_run = client.agents.evaluation_runs.create()
assert_matches_type(EvaluationRunCreateResponse, evaluation_run, path=["response"])
@pytest.mark.skip()
@parametrize
def test_method_create_with_all_params(self, client: GradientAI) -> None:
- evaluation_run = client.regions.evaluation_runs.create(
+ evaluation_run = client.agents.evaluation_runs.create(
agent_uuid="agent_uuid",
run_name="run_name",
test_case_uuid="test_case_uuid",
@@ -39,7 +40,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_raw_response_create(self, client: GradientAI) -> None:
- response = client.regions.evaluation_runs.with_raw_response.create()
+ response = client.agents.evaluation_runs.with_raw_response.create()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -49,7 +50,7 @@ def test_raw_response_create(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_streaming_response_create(self, client: GradientAI) -> None:
- with client.regions.evaluation_runs.with_streaming_response.create() as response:
+ with client.agents.evaluation_runs.with_streaming_response.create() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -61,7 +62,7 @@ def test_streaming_response_create(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_method_retrieve(self, client: GradientAI) -> None:
- evaluation_run = client.regions.evaluation_runs.retrieve(
+ evaluation_run = client.agents.evaluation_runs.retrieve(
"evaluation_run_uuid",
)
assert_matches_type(EvaluationRunRetrieveResponse, evaluation_run, path=["response"])
@@ -69,7 +70,7 @@ def test_method_retrieve(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_raw_response_retrieve(self, client: GradientAI) -> None:
- response = client.regions.evaluation_runs.with_raw_response.retrieve(
+ response = client.agents.evaluation_runs.with_raw_response.retrieve(
"evaluation_run_uuid",
)
@@ -81,7 +82,7 @@ def test_raw_response_retrieve(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_streaming_response_retrieve(self, client: GradientAI) -> None:
- with client.regions.evaluation_runs.with_streaming_response.retrieve(
+ with client.agents.evaluation_runs.with_streaming_response.retrieve(
"evaluation_run_uuid",
) as response:
assert not response.is_closed
@@ -96,7 +97,49 @@ def test_streaming_response_retrieve(self, client: GradientAI) -> None:
@parametrize
def test_path_params_retrieve(self, client: GradientAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"):
- client.regions.evaluation_runs.with_raw_response.retrieve(
+ client.agents.evaluation_runs.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_list_results(self, client: GradientAI) -> None:
+ evaluation_run = client.agents.evaluation_runs.list_results(
+ "evaluation_run_uuid",
+ )
+ assert_matches_type(EvaluationRunListResultsResponse, evaluation_run, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_list_results(self, client: GradientAI) -> None:
+ response = client.agents.evaluation_runs.with_raw_response.list_results(
+ "evaluation_run_uuid",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ evaluation_run = response.parse()
+ assert_matches_type(EvaluationRunListResultsResponse, evaluation_run, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_list_results(self, client: GradientAI) -> None:
+ with client.agents.evaluation_runs.with_streaming_response.list_results(
+ "evaluation_run_uuid",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ evaluation_run = response.parse()
+ assert_matches_type(EvaluationRunListResultsResponse, evaluation_run, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_path_params_list_results(self, client: GradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"):
+ client.agents.evaluation_runs.with_raw_response.list_results(
"",
)
@@ -109,13 +152,13 @@ class TestAsyncEvaluationRuns:
@pytest.mark.skip()
@parametrize
async def test_method_create(self, async_client: AsyncGradientAI) -> None:
- evaluation_run = await async_client.regions.evaluation_runs.create()
+ evaluation_run = await async_client.agents.evaluation_runs.create()
assert_matches_type(EvaluationRunCreateResponse, evaluation_run, path=["response"])
@pytest.mark.skip()
@parametrize
async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None:
- evaluation_run = await async_client.regions.evaluation_runs.create(
+ evaluation_run = await async_client.agents.evaluation_runs.create(
agent_uuid="agent_uuid",
run_name="run_name",
test_case_uuid="test_case_uuid",
@@ -125,7 +168,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI
@pytest.mark.skip()
@parametrize
async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.regions.evaluation_runs.with_raw_response.create()
+ response = await async_client.agents.evaluation_runs.with_raw_response.create()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -135,7 +178,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None:
@pytest.mark.skip()
@parametrize
async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None:
- async with async_client.regions.evaluation_runs.with_streaming_response.create() as response:
+ async with async_client.agents.evaluation_runs.with_streaming_response.create() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -147,7 +190,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) ->
@pytest.mark.skip()
@parametrize
async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None:
- evaluation_run = await async_client.regions.evaluation_runs.retrieve(
+ evaluation_run = await async_client.agents.evaluation_runs.retrieve(
"evaluation_run_uuid",
)
assert_matches_type(EvaluationRunRetrieveResponse, evaluation_run, path=["response"])
@@ -155,7 +198,7 @@ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None:
@pytest.mark.skip()
@parametrize
async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.regions.evaluation_runs.with_raw_response.retrieve(
+ response = await async_client.agents.evaluation_runs.with_raw_response.retrieve(
"evaluation_run_uuid",
)
@@ -167,7 +210,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> Non
@pytest.mark.skip()
@parametrize
async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None:
- async with async_client.regions.evaluation_runs.with_streaming_response.retrieve(
+ async with async_client.agents.evaluation_runs.with_streaming_response.retrieve(
"evaluation_run_uuid",
) as response:
assert not response.is_closed
@@ -182,6 +225,48 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI)
@parametrize
async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"):
- await async_client.regions.evaluation_runs.with_raw_response.retrieve(
+ await async_client.agents.evaluation_runs.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_list_results(self, async_client: AsyncGradientAI) -> None:
+ evaluation_run = await async_client.agents.evaluation_runs.list_results(
+ "evaluation_run_uuid",
+ )
+ assert_matches_type(EvaluationRunListResultsResponse, evaluation_run, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_list_results(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.agents.evaluation_runs.with_raw_response.list_results(
+ "evaluation_run_uuid",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ evaluation_run = await response.parse()
+ assert_matches_type(EvaluationRunListResultsResponse, evaluation_run, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_list_results(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.agents.evaluation_runs.with_streaming_response.list_results(
+ "evaluation_run_uuid",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ evaluation_run = await response.parse()
+ assert_matches_type(EvaluationRunListResultsResponse, evaluation_run, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_path_params_list_results(self, async_client: AsyncGradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"):
+ await async_client.agents.evaluation_runs.with_raw_response.list_results(
"",
)
diff --git a/tests/api_resources/regions/test_evaluation_test_cases.py b/tests/api_resources/agents/test_evaluation_test_cases.py
similarity index 82%
rename from tests/api_resources/regions/test_evaluation_test_cases.py
rename to tests/api_resources/agents/test_evaluation_test_cases.py
index 7cc18835..50b285bd 100644
--- a/tests/api_resources/regions/test_evaluation_test_cases.py
+++ b/tests/api_resources/agents/test_evaluation_test_cases.py
@@ -9,7 +9,7 @@
from tests.utils import assert_matches_type
from do_gradientai import GradientAI, AsyncGradientAI
-from do_gradientai.types.regions import (
+from do_gradientai.types.agents import (
EvaluationTestCaseListResponse,
EvaluationTestCaseCreateResponse,
EvaluationTestCaseUpdateResponse,
@@ -26,13 +26,13 @@ class TestEvaluationTestCases:
@pytest.mark.skip()
@parametrize
def test_method_create(self, client: GradientAI) -> None:
- evaluation_test_case = client.regions.evaluation_test_cases.create()
+ evaluation_test_case = client.agents.evaluation_test_cases.create()
assert_matches_type(EvaluationTestCaseCreateResponse, evaluation_test_case, path=["response"])
@pytest.mark.skip()
@parametrize
def test_method_create_with_all_params(self, client: GradientAI) -> None:
- evaluation_test_case = client.regions.evaluation_test_cases.create(
+ evaluation_test_case = client.agents.evaluation_test_cases.create(
dataset_uuid="dataset_uuid",
description="description",
metrics=["string"],
@@ -49,7 +49,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_raw_response_create(self, client: GradientAI) -> None:
- response = client.regions.evaluation_test_cases.with_raw_response.create()
+ response = client.agents.evaluation_test_cases.with_raw_response.create()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -59,7 +59,7 @@ def test_raw_response_create(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_streaming_response_create(self, client: GradientAI) -> None:
- with client.regions.evaluation_test_cases.with_streaming_response.create() as response:
+ with client.agents.evaluation_test_cases.with_streaming_response.create() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -71,7 +71,7 @@ def test_streaming_response_create(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_method_retrieve(self, client: GradientAI) -> None:
- evaluation_test_case = client.regions.evaluation_test_cases.retrieve(
+ evaluation_test_case = client.agents.evaluation_test_cases.retrieve(
"test_case_uuid",
)
assert_matches_type(EvaluationTestCaseRetrieveResponse, evaluation_test_case, path=["response"])
@@ -79,7 +79,7 @@ def test_method_retrieve(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_raw_response_retrieve(self, client: GradientAI) -> None:
- response = client.regions.evaluation_test_cases.with_raw_response.retrieve(
+ response = client.agents.evaluation_test_cases.with_raw_response.retrieve(
"test_case_uuid",
)
@@ -91,7 +91,7 @@ def test_raw_response_retrieve(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_streaming_response_retrieve(self, client: GradientAI) -> None:
- with client.regions.evaluation_test_cases.with_streaming_response.retrieve(
+ with client.agents.evaluation_test_cases.with_streaming_response.retrieve(
"test_case_uuid",
) as response:
assert not response.is_closed
@@ -106,14 +106,14 @@ def test_streaming_response_retrieve(self, client: GradientAI) -> None:
@parametrize
def test_path_params_retrieve(self, client: GradientAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `test_case_uuid` but received ''"):
- client.regions.evaluation_test_cases.with_raw_response.retrieve(
+ client.agents.evaluation_test_cases.with_raw_response.retrieve(
"",
)
@pytest.mark.skip()
@parametrize
def test_method_update(self, client: GradientAI) -> None:
- evaluation_test_case = client.regions.evaluation_test_cases.update(
+ evaluation_test_case = client.agents.evaluation_test_cases.update(
path_test_case_uuid="test_case_uuid",
)
assert_matches_type(EvaluationTestCaseUpdateResponse, evaluation_test_case, path=["response"])
@@ -121,7 +121,7 @@ def test_method_update(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_method_update_with_all_params(self, client: GradientAI) -> None:
- evaluation_test_case = client.regions.evaluation_test_cases.update(
+ evaluation_test_case = client.agents.evaluation_test_cases.update(
path_test_case_uuid="test_case_uuid",
dataset_uuid="dataset_uuid",
description="description",
@@ -139,7 +139,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_raw_response_update(self, client: GradientAI) -> None:
- response = client.regions.evaluation_test_cases.with_raw_response.update(
+ response = client.agents.evaluation_test_cases.with_raw_response.update(
path_test_case_uuid="test_case_uuid",
)
@@ -151,7 +151,7 @@ def test_raw_response_update(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_streaming_response_update(self, client: GradientAI) -> None:
- with client.regions.evaluation_test_cases.with_streaming_response.update(
+ with client.agents.evaluation_test_cases.with_streaming_response.update(
path_test_case_uuid="test_case_uuid",
) as response:
assert not response.is_closed
@@ -166,20 +166,20 @@ def test_streaming_response_update(self, client: GradientAI) -> None:
@parametrize
def test_path_params_update(self, client: GradientAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_test_case_uuid` but received ''"):
- client.regions.evaluation_test_cases.with_raw_response.update(
+ client.agents.evaluation_test_cases.with_raw_response.update(
path_test_case_uuid="",
)
@pytest.mark.skip()
@parametrize
def test_method_list(self, client: GradientAI) -> None:
- evaluation_test_case = client.regions.evaluation_test_cases.list()
+ evaluation_test_case = client.agents.evaluation_test_cases.list()
assert_matches_type(EvaluationTestCaseListResponse, evaluation_test_case, path=["response"])
@pytest.mark.skip()
@parametrize
def test_raw_response_list(self, client: GradientAI) -> None:
- response = client.regions.evaluation_test_cases.with_raw_response.list()
+ response = client.agents.evaluation_test_cases.with_raw_response.list()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -189,7 +189,7 @@ def test_raw_response_list(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_streaming_response_list(self, client: GradientAI) -> None:
- with client.regions.evaluation_test_cases.with_streaming_response.list() as response:
+ with client.agents.evaluation_test_cases.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -201,7 +201,7 @@ def test_streaming_response_list(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_method_list_evaluation_runs(self, client: GradientAI) -> None:
- evaluation_test_case = client.regions.evaluation_test_cases.list_evaluation_runs(
+ evaluation_test_case = client.agents.evaluation_test_cases.list_evaluation_runs(
evaluation_test_case_uuid="evaluation_test_case_uuid",
)
assert_matches_type(EvaluationTestCaseListEvaluationRunsResponse, evaluation_test_case, path=["response"])
@@ -209,7 +209,7 @@ def test_method_list_evaluation_runs(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_method_list_evaluation_runs_with_all_params(self, client: GradientAI) -> None:
- evaluation_test_case = client.regions.evaluation_test_cases.list_evaluation_runs(
+ evaluation_test_case = client.agents.evaluation_test_cases.list_evaluation_runs(
evaluation_test_case_uuid="evaluation_test_case_uuid",
evaluation_test_case_version=0,
)
@@ -218,7 +218,7 @@ def test_method_list_evaluation_runs_with_all_params(self, client: GradientAI) -
@pytest.mark.skip()
@parametrize
def test_raw_response_list_evaluation_runs(self, client: GradientAI) -> None:
- response = client.regions.evaluation_test_cases.with_raw_response.list_evaluation_runs(
+ response = client.agents.evaluation_test_cases.with_raw_response.list_evaluation_runs(
evaluation_test_case_uuid="evaluation_test_case_uuid",
)
@@ -230,7 +230,7 @@ def test_raw_response_list_evaluation_runs(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_streaming_response_list_evaluation_runs(self, client: GradientAI) -> None:
- with client.regions.evaluation_test_cases.with_streaming_response.list_evaluation_runs(
+ with client.agents.evaluation_test_cases.with_streaming_response.list_evaluation_runs(
evaluation_test_case_uuid="evaluation_test_case_uuid",
) as response:
assert not response.is_closed
@@ -247,7 +247,7 @@ def test_path_params_list_evaluation_runs(self, client: GradientAI) -> None:
with pytest.raises(
ValueError, match=r"Expected a non-empty value for `evaluation_test_case_uuid` but received ''"
):
- client.regions.evaluation_test_cases.with_raw_response.list_evaluation_runs(
+ client.agents.evaluation_test_cases.with_raw_response.list_evaluation_runs(
evaluation_test_case_uuid="",
)
@@ -260,13 +260,13 @@ class TestAsyncEvaluationTestCases:
@pytest.mark.skip()
@parametrize
async def test_method_create(self, async_client: AsyncGradientAI) -> None:
- evaluation_test_case = await async_client.regions.evaluation_test_cases.create()
+ evaluation_test_case = await async_client.agents.evaluation_test_cases.create()
assert_matches_type(EvaluationTestCaseCreateResponse, evaluation_test_case, path=["response"])
@pytest.mark.skip()
@parametrize
async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None:
- evaluation_test_case = await async_client.regions.evaluation_test_cases.create(
+ evaluation_test_case = await async_client.agents.evaluation_test_cases.create(
dataset_uuid="dataset_uuid",
description="description",
metrics=["string"],
@@ -283,7 +283,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI
@pytest.mark.skip()
@parametrize
async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.regions.evaluation_test_cases.with_raw_response.create()
+ response = await async_client.agents.evaluation_test_cases.with_raw_response.create()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -293,7 +293,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None:
@pytest.mark.skip()
@parametrize
async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None:
- async with async_client.regions.evaluation_test_cases.with_streaming_response.create() as response:
+ async with async_client.agents.evaluation_test_cases.with_streaming_response.create() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -305,7 +305,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) ->
@pytest.mark.skip()
@parametrize
async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None:
- evaluation_test_case = await async_client.regions.evaluation_test_cases.retrieve(
+ evaluation_test_case = await async_client.agents.evaluation_test_cases.retrieve(
"test_case_uuid",
)
assert_matches_type(EvaluationTestCaseRetrieveResponse, evaluation_test_case, path=["response"])
@@ -313,7 +313,7 @@ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None:
@pytest.mark.skip()
@parametrize
async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.regions.evaluation_test_cases.with_raw_response.retrieve(
+ response = await async_client.agents.evaluation_test_cases.with_raw_response.retrieve(
"test_case_uuid",
)
@@ -325,7 +325,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> Non
@pytest.mark.skip()
@parametrize
async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None:
- async with async_client.regions.evaluation_test_cases.with_streaming_response.retrieve(
+ async with async_client.agents.evaluation_test_cases.with_streaming_response.retrieve(
"test_case_uuid",
) as response:
assert not response.is_closed
@@ -340,14 +340,14 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI)
@parametrize
async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `test_case_uuid` but received ''"):
- await async_client.regions.evaluation_test_cases.with_raw_response.retrieve(
+ await async_client.agents.evaluation_test_cases.with_raw_response.retrieve(
"",
)
@pytest.mark.skip()
@parametrize
async def test_method_update(self, async_client: AsyncGradientAI) -> None:
- evaluation_test_case = await async_client.regions.evaluation_test_cases.update(
+ evaluation_test_case = await async_client.agents.evaluation_test_cases.update(
path_test_case_uuid="test_case_uuid",
)
assert_matches_type(EvaluationTestCaseUpdateResponse, evaluation_test_case, path=["response"])
@@ -355,7 +355,7 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None:
@pytest.mark.skip()
@parametrize
async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None:
- evaluation_test_case = await async_client.regions.evaluation_test_cases.update(
+ evaluation_test_case = await async_client.agents.evaluation_test_cases.update(
path_test_case_uuid="test_case_uuid",
dataset_uuid="dataset_uuid",
description="description",
@@ -373,7 +373,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI
@pytest.mark.skip()
@parametrize
async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.regions.evaluation_test_cases.with_raw_response.update(
+ response = await async_client.agents.evaluation_test_cases.with_raw_response.update(
path_test_case_uuid="test_case_uuid",
)
@@ -385,7 +385,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None:
@pytest.mark.skip()
@parametrize
async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None:
- async with async_client.regions.evaluation_test_cases.with_streaming_response.update(
+ async with async_client.agents.evaluation_test_cases.with_streaming_response.update(
path_test_case_uuid="test_case_uuid",
) as response:
assert not response.is_closed
@@ -400,20 +400,20 @@ async def test_streaming_response_update(self, async_client: AsyncGradientAI) ->
@parametrize
async def test_path_params_update(self, async_client: AsyncGradientAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_test_case_uuid` but received ''"):
- await async_client.regions.evaluation_test_cases.with_raw_response.update(
+ await async_client.agents.evaluation_test_cases.with_raw_response.update(
path_test_case_uuid="",
)
@pytest.mark.skip()
@parametrize
async def test_method_list(self, async_client: AsyncGradientAI) -> None:
- evaluation_test_case = await async_client.regions.evaluation_test_cases.list()
+ evaluation_test_case = await async_client.agents.evaluation_test_cases.list()
assert_matches_type(EvaluationTestCaseListResponse, evaluation_test_case, path=["response"])
@pytest.mark.skip()
@parametrize
async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.regions.evaluation_test_cases.with_raw_response.list()
+ response = await async_client.agents.evaluation_test_cases.with_raw_response.list()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -423,7 +423,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None:
@pytest.mark.skip()
@parametrize
async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None:
- async with async_client.regions.evaluation_test_cases.with_streaming_response.list() as response:
+ async with async_client.agents.evaluation_test_cases.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -435,7 +435,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N
@pytest.mark.skip()
@parametrize
async def test_method_list_evaluation_runs(self, async_client: AsyncGradientAI) -> None:
- evaluation_test_case = await async_client.regions.evaluation_test_cases.list_evaluation_runs(
+ evaluation_test_case = await async_client.agents.evaluation_test_cases.list_evaluation_runs(
evaluation_test_case_uuid="evaluation_test_case_uuid",
)
assert_matches_type(EvaluationTestCaseListEvaluationRunsResponse, evaluation_test_case, path=["response"])
@@ -443,7 +443,7 @@ async def test_method_list_evaluation_runs(self, async_client: AsyncGradientAI)
@pytest.mark.skip()
@parametrize
async def test_method_list_evaluation_runs_with_all_params(self, async_client: AsyncGradientAI) -> None:
- evaluation_test_case = await async_client.regions.evaluation_test_cases.list_evaluation_runs(
+ evaluation_test_case = await async_client.agents.evaluation_test_cases.list_evaluation_runs(
evaluation_test_case_uuid="evaluation_test_case_uuid",
evaluation_test_case_version=0,
)
@@ -452,7 +452,7 @@ async def test_method_list_evaluation_runs_with_all_params(self, async_client: A
@pytest.mark.skip()
@parametrize
async def test_raw_response_list_evaluation_runs(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.regions.evaluation_test_cases.with_raw_response.list_evaluation_runs(
+ response = await async_client.agents.evaluation_test_cases.with_raw_response.list_evaluation_runs(
evaluation_test_case_uuid="evaluation_test_case_uuid",
)
@@ -464,7 +464,7 @@ async def test_raw_response_list_evaluation_runs(self, async_client: AsyncGradie
@pytest.mark.skip()
@parametrize
async def test_streaming_response_list_evaluation_runs(self, async_client: AsyncGradientAI) -> None:
- async with async_client.regions.evaluation_test_cases.with_streaming_response.list_evaluation_runs(
+ async with async_client.agents.evaluation_test_cases.with_streaming_response.list_evaluation_runs(
evaluation_test_case_uuid="evaluation_test_case_uuid",
) as response:
assert not response.is_closed
@@ -481,6 +481,6 @@ async def test_path_params_list_evaluation_runs(self, async_client: AsyncGradien
with pytest.raises(
ValueError, match=r"Expected a non-empty value for `evaluation_test_case_uuid` but received ''"
):
- await async_client.regions.evaluation_test_cases.with_raw_response.list_evaluation_runs(
+ await async_client.agents.evaluation_test_cases.with_raw_response.list_evaluation_runs(
evaluation_test_case_uuid="",
)
diff --git a/tests/api_resources/regions/__init__.py b/tests/api_resources/regions/__init__.py
deleted file mode 100644
index fd8019a9..00000000
--- a/tests/api_resources/regions/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/regions/evaluation_runs/__init__.py b/tests/api_resources/regions/evaluation_runs/__init__.py
deleted file mode 100644
index fd8019a9..00000000
--- a/tests/api_resources/regions/evaluation_runs/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/regions/evaluation_runs/test_results.py b/tests/api_resources/regions/evaluation_runs/test_results.py
deleted file mode 100644
index e4b906bd..00000000
--- a/tests/api_resources/regions/evaluation_runs/test_results.py
+++ /dev/null
@@ -1,200 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from do_gradientai import GradientAI, AsyncGradientAI
-from do_gradientai.types.regions.evaluation_runs import ResultRetrieveResponse, ResultRetrievePromptResponse
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestResults:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: GradientAI) -> None:
- result = client.regions.evaluation_runs.results.retrieve(
- "evaluation_run_uuid",
- )
- assert_matches_type(ResultRetrieveResponse, result, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: GradientAI) -> None:
- response = client.regions.evaluation_runs.results.with_raw_response.retrieve(
- "evaluation_run_uuid",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- result = response.parse()
- assert_matches_type(ResultRetrieveResponse, result, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: GradientAI) -> None:
- with client.regions.evaluation_runs.results.with_streaming_response.retrieve(
- "evaluation_run_uuid",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- result = response.parse()
- assert_matches_type(ResultRetrieveResponse, result, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: GradientAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"):
- client.regions.evaluation_runs.results.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve_prompt(self, client: GradientAI) -> None:
- result = client.regions.evaluation_runs.results.retrieve_prompt(
- prompt_id=0,
- evaluation_run_uuid="evaluation_run_uuid",
- )
- assert_matches_type(ResultRetrievePromptResponse, result, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve_prompt(self, client: GradientAI) -> None:
- response = client.regions.evaluation_runs.results.with_raw_response.retrieve_prompt(
- prompt_id=0,
- evaluation_run_uuid="evaluation_run_uuid",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- result = response.parse()
- assert_matches_type(ResultRetrievePromptResponse, result, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve_prompt(self, client: GradientAI) -> None:
- with client.regions.evaluation_runs.results.with_streaming_response.retrieve_prompt(
- prompt_id=0,
- evaluation_run_uuid="evaluation_run_uuid",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- result = response.parse()
- assert_matches_type(ResultRetrievePromptResponse, result, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve_prompt(self, client: GradientAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"):
- client.regions.evaluation_runs.results.with_raw_response.retrieve_prompt(
- prompt_id=0,
- evaluation_run_uuid="",
- )
-
-
-class TestAsyncResults:
- parametrize = pytest.mark.parametrize(
- "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None:
- result = await async_client.regions.evaluation_runs.results.retrieve(
- "evaluation_run_uuid",
- )
- assert_matches_type(ResultRetrieveResponse, result, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.regions.evaluation_runs.results.with_raw_response.retrieve(
- "evaluation_run_uuid",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- result = await response.parse()
- assert_matches_type(ResultRetrieveResponse, result, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None:
- async with async_client.regions.evaluation_runs.results.with_streaming_response.retrieve(
- "evaluation_run_uuid",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- result = await response.parse()
- assert_matches_type(ResultRetrieveResponse, result, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"):
- await async_client.regions.evaluation_runs.results.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve_prompt(self, async_client: AsyncGradientAI) -> None:
- result = await async_client.regions.evaluation_runs.results.retrieve_prompt(
- prompt_id=0,
- evaluation_run_uuid="evaluation_run_uuid",
- )
- assert_matches_type(ResultRetrievePromptResponse, result, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve_prompt(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.regions.evaluation_runs.results.with_raw_response.retrieve_prompt(
- prompt_id=0,
- evaluation_run_uuid="evaluation_run_uuid",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- result = await response.parse()
- assert_matches_type(ResultRetrievePromptResponse, result, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve_prompt(self, async_client: AsyncGradientAI) -> None:
- async with async_client.regions.evaluation_runs.results.with_streaming_response.retrieve_prompt(
- prompt_id=0,
- evaluation_run_uuid="evaluation_run_uuid",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- result = await response.parse()
- assert_matches_type(ResultRetrievePromptResponse, result, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve_prompt(self, async_client: AsyncGradientAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"):
- await async_client.regions.evaluation_runs.results.with_raw_response.retrieve_prompt(
- prompt_id=0,
- evaluation_run_uuid="",
- )
diff --git a/tests/api_resources/test_regions.py b/tests/api_resources/test_regions.py
index bf51ef96..4ed5bb27 100644
--- a/tests/api_resources/test_regions.py
+++ b/tests/api_resources/test_regions.py
@@ -9,7 +9,7 @@
from tests.utils import assert_matches_type
from do_gradientai import GradientAI, AsyncGradientAI
-from do_gradientai.types import RegionListResponse, RegionListEvaluationMetricsResponse
+from do_gradientai.types import RegionListResponse
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -54,34 +54,6 @@ def test_streaming_response_list(self, client: GradientAI) -> None:
assert cast(Any, response.is_closed) is True
- @pytest.mark.skip()
- @parametrize
- def test_method_list_evaluation_metrics(self, client: GradientAI) -> None:
- region = client.regions.list_evaluation_metrics()
- assert_matches_type(RegionListEvaluationMetricsResponse, region, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list_evaluation_metrics(self, client: GradientAI) -> None:
- response = client.regions.with_raw_response.list_evaluation_metrics()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- region = response.parse()
- assert_matches_type(RegionListEvaluationMetricsResponse, region, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list_evaluation_metrics(self, client: GradientAI) -> None:
- with client.regions.with_streaming_response.list_evaluation_metrics() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- region = response.parse()
- assert_matches_type(RegionListEvaluationMetricsResponse, region, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
class TestAsyncRegions:
parametrize = pytest.mark.parametrize(
@@ -124,31 +96,3 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N
assert_matches_type(RegionListResponse, region, path=["response"])
assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_evaluation_metrics(self, async_client: AsyncGradientAI) -> None:
- region = await async_client.regions.list_evaluation_metrics()
- assert_matches_type(RegionListEvaluationMetricsResponse, region, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list_evaluation_metrics(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.regions.with_raw_response.list_evaluation_metrics()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- region = await response.parse()
- assert_matches_type(RegionListEvaluationMetricsResponse, region, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list_evaluation_metrics(self, async_client: AsyncGradientAI) -> None:
- async with async_client.regions.with_streaming_response.list_evaluation_metrics() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- region = await response.parse()
- assert_matches_type(RegionListEvaluationMetricsResponse, region, path=["response"])
-
- assert cast(Any, response.is_closed) is True
From 1c702b340e4fd723393c0f02df2a87d03ca8c9bb Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 25 Jun 2025 03:21:34 +0000
Subject: [PATCH 05/21] feat(api): update via SDK Studio
---
.stats.yml | 4 +-
api.md | 13 -
.../resources/inference/__init__.py | 14 --
.../resources/inference/inference.py | 32 ---
.../resources/inference/models.py | 226 ------------------
src/do_gradientai/types/inference/__init__.py | 2 -
src/do_gradientai/types/inference/model.py | 21 --
.../types/inference/model_list_response.py | 15 --
tests/api_resources/inference/test_models.py | 164 -------------
9 files changed, 2 insertions(+), 489 deletions(-)
delete mode 100644 src/do_gradientai/resources/inference/models.py
delete mode 100644 src/do_gradientai/types/inference/model.py
delete mode 100644 src/do_gradientai/types/inference/model_list_response.py
delete mode 100644 tests/api_resources/inference/test_models.py
diff --git a/.stats.yml b/.stats.yml
index 49720dd2..cb95a5af 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 69
+configured_endpoints: 67
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml
openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667
-config_hash: 190bed33fe275347e4871077b32af63f
+config_hash: 886acf2e0eda98b9a718598587f7f81e
diff --git a/api.md b/api.md
index 018742d7..a7297098 100644
--- a/api.md
+++ b/api.md
@@ -365,19 +365,6 @@ Methods:
- client.inference.api_keys.delete(api_key_uuid) -> APIKeyDeleteResponse
- client.inference.api_keys.update_regenerate(api_key_uuid) -> APIKeyUpdateRegenerateResponse
-## Models
-
-Types:
-
-```python
-from do_gradientai.types.inference import Model, ModelListResponse
-```
-
-Methods:
-
-- client.inference.models.retrieve(model) -> Model
-- client.inference.models.list() -> ModelListResponse
-
# Models
Types:
diff --git a/src/do_gradientai/resources/inference/__init__.py b/src/do_gradientai/resources/inference/__init__.py
index 0e5631ce..21798ab2 100644
--- a/src/do_gradientai/resources/inference/__init__.py
+++ b/src/do_gradientai/resources/inference/__init__.py
@@ -1,13 +1,5 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from .models import (
- ModelsResource,
- AsyncModelsResource,
- ModelsResourceWithRawResponse,
- AsyncModelsResourceWithRawResponse,
- ModelsResourceWithStreamingResponse,
- AsyncModelsResourceWithStreamingResponse,
-)
from .api_keys import (
APIKeysResource,
AsyncAPIKeysResource,
@@ -32,12 +24,6 @@
"AsyncAPIKeysResourceWithRawResponse",
"APIKeysResourceWithStreamingResponse",
"AsyncAPIKeysResourceWithStreamingResponse",
- "ModelsResource",
- "AsyncModelsResource",
- "ModelsResourceWithRawResponse",
- "AsyncModelsResourceWithRawResponse",
- "ModelsResourceWithStreamingResponse",
- "AsyncModelsResourceWithStreamingResponse",
"InferenceResource",
"AsyncInferenceResource",
"InferenceResourceWithRawResponse",
diff --git a/src/do_gradientai/resources/inference/inference.py b/src/do_gradientai/resources/inference/inference.py
index 209d6f17..a144bae0 100644
--- a/src/do_gradientai/resources/inference/inference.py
+++ b/src/do_gradientai/resources/inference/inference.py
@@ -2,14 +2,6 @@
from __future__ import annotations
-from .models import (
- ModelsResource,
- AsyncModelsResource,
- ModelsResourceWithRawResponse,
- AsyncModelsResourceWithRawResponse,
- ModelsResourceWithStreamingResponse,
- AsyncModelsResourceWithStreamingResponse,
-)
from .api_keys import (
APIKeysResource,
AsyncAPIKeysResource,
@@ -29,10 +21,6 @@ class InferenceResource(SyncAPIResource):
def api_keys(self) -> APIKeysResource:
return APIKeysResource(self._client)
- @cached_property
- def models(self) -> ModelsResource:
- return ModelsResource(self._client)
-
@cached_property
def with_raw_response(self) -> InferenceResourceWithRawResponse:
"""
@@ -58,10 +46,6 @@ class AsyncInferenceResource(AsyncAPIResource):
def api_keys(self) -> AsyncAPIKeysResource:
return AsyncAPIKeysResource(self._client)
- @cached_property
- def models(self) -> AsyncModelsResource:
- return AsyncModelsResource(self._client)
-
@cached_property
def with_raw_response(self) -> AsyncInferenceResourceWithRawResponse:
"""
@@ -90,10 +74,6 @@ def __init__(self, inference: InferenceResource) -> None:
def api_keys(self) -> APIKeysResourceWithRawResponse:
return APIKeysResourceWithRawResponse(self._inference.api_keys)
- @cached_property
- def models(self) -> ModelsResourceWithRawResponse:
- return ModelsResourceWithRawResponse(self._inference.models)
-
class AsyncInferenceResourceWithRawResponse:
def __init__(self, inference: AsyncInferenceResource) -> None:
@@ -103,10 +83,6 @@ def __init__(self, inference: AsyncInferenceResource) -> None:
def api_keys(self) -> AsyncAPIKeysResourceWithRawResponse:
return AsyncAPIKeysResourceWithRawResponse(self._inference.api_keys)
- @cached_property
- def models(self) -> AsyncModelsResourceWithRawResponse:
- return AsyncModelsResourceWithRawResponse(self._inference.models)
-
class InferenceResourceWithStreamingResponse:
def __init__(self, inference: InferenceResource) -> None:
@@ -116,10 +92,6 @@ def __init__(self, inference: InferenceResource) -> None:
def api_keys(self) -> APIKeysResourceWithStreamingResponse:
return APIKeysResourceWithStreamingResponse(self._inference.api_keys)
- @cached_property
- def models(self) -> ModelsResourceWithStreamingResponse:
- return ModelsResourceWithStreamingResponse(self._inference.models)
-
class AsyncInferenceResourceWithStreamingResponse:
def __init__(self, inference: AsyncInferenceResource) -> None:
@@ -128,7 +100,3 @@ def __init__(self, inference: AsyncInferenceResource) -> None:
@cached_property
def api_keys(self) -> AsyncAPIKeysResourceWithStreamingResponse:
return AsyncAPIKeysResourceWithStreamingResponse(self._inference.api_keys)
-
- @cached_property
- def models(self) -> AsyncModelsResourceWithStreamingResponse:
- return AsyncModelsResourceWithStreamingResponse(self._inference.models)
diff --git a/src/do_gradientai/resources/inference/models.py b/src/do_gradientai/resources/inference/models.py
deleted file mode 100644
index 42e1dcb2..00000000
--- a/src/do_gradientai/resources/inference/models.py
+++ /dev/null
@@ -1,226 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import httpx
-
-from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ..._compat import cached_property
-from ..._resource import SyncAPIResource, AsyncAPIResource
-from ..._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ..._base_client import make_request_options
-from ...types.inference.model import Model
-from ...types.inference.model_list_response import ModelListResponse
-
-__all__ = ["ModelsResource", "AsyncModelsResource"]
-
-
-class ModelsResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> ModelsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
- """
- return ModelsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> ModelsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
- """
- return ModelsResourceWithStreamingResponse(self)
-
- def retrieve(
- self,
- model: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Model:
- """
- Retrieves a model instance, providing basic information about the model such as
- the owner and permissioning.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not model:
- raise ValueError(f"Expected a non-empty value for `model` but received {model!r}")
- return self._get(
- f"/models/{model}"
- if self._client._base_url_overridden
- else f"https://inference.do-ai.run/v1/models/{model}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Model,
- )
-
- def list(
- self,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ModelListResponse:
- """
- Lists the currently available models, and provides basic information about each
- one such as the owner and availability.
- """
- return self._get(
- "/models" if self._client._base_url_overridden else "https://inference.do-ai.run/v1/models",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ModelListResponse,
- )
-
-
-class AsyncModelsResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncModelsResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
- """
- return AsyncModelsResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncModelsResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
- """
- return AsyncModelsResourceWithStreamingResponse(self)
-
- async def retrieve(
- self,
- model: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> Model:
- """
- Retrieves a model instance, providing basic information about the model such as
- the owner and permissioning.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not model:
- raise ValueError(f"Expected a non-empty value for `model` but received {model!r}")
- return await self._get(
- f"/models/{model}"
- if self._client._base_url_overridden
- else f"https://inference.do-ai.run/v1/models/{model}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=Model,
- )
-
- async def list(
- self,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ModelListResponse:
- """
- Lists the currently available models, and provides basic information about each
- one such as the owner and availability.
- """
- return await self._get(
- "/models" if self._client._base_url_overridden else "https://inference.do-ai.run/v1/models",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=ModelListResponse,
- )
-
-
-class ModelsResourceWithRawResponse:
- def __init__(self, models: ModelsResource) -> None:
- self._models = models
-
- self.retrieve = to_raw_response_wrapper(
- models.retrieve,
- )
- self.list = to_raw_response_wrapper(
- models.list,
- )
-
-
-class AsyncModelsResourceWithRawResponse:
- def __init__(self, models: AsyncModelsResource) -> None:
- self._models = models
-
- self.retrieve = async_to_raw_response_wrapper(
- models.retrieve,
- )
- self.list = async_to_raw_response_wrapper(
- models.list,
- )
-
-
-class ModelsResourceWithStreamingResponse:
- def __init__(self, models: ModelsResource) -> None:
- self._models = models
-
- self.retrieve = to_streamed_response_wrapper(
- models.retrieve,
- )
- self.list = to_streamed_response_wrapper(
- models.list,
- )
-
-
-class AsyncModelsResourceWithStreamingResponse:
- def __init__(self, models: AsyncModelsResource) -> None:
- self._models = models
-
- self.retrieve = async_to_streamed_response_wrapper(
- models.retrieve,
- )
- self.list = async_to_streamed_response_wrapper(
- models.list,
- )
diff --git a/src/do_gradientai/types/inference/__init__.py b/src/do_gradientai/types/inference/__init__.py
index 829340d7..c3cbcd6d 100644
--- a/src/do_gradientai/types/inference/__init__.py
+++ b/src/do_gradientai/types/inference/__init__.py
@@ -2,9 +2,7 @@
from __future__ import annotations
-from .model import Model as Model
from .api_key_list_params import APIKeyListParams as APIKeyListParams
-from .model_list_response import ModelListResponse as ModelListResponse
from .api_key_create_params import APIKeyCreateParams as APIKeyCreateParams
from .api_key_list_response import APIKeyListResponse as APIKeyListResponse
from .api_key_update_params import APIKeyUpdateParams as APIKeyUpdateParams
diff --git a/src/do_gradientai/types/inference/model.py b/src/do_gradientai/types/inference/model.py
deleted file mode 100644
index ed8843e8..00000000
--- a/src/do_gradientai/types/inference/model.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing_extensions import Literal
-
-from ..._models import BaseModel
-
-__all__ = ["Model"]
-
-
-class Model(BaseModel):
- id: str
- """The model identifier, which can be referenced in the API endpoints."""
-
- created: int
- """The Unix timestamp (in seconds) when the model was created."""
-
- object: Literal["model"]
- """The object type, which is always "model"."""
-
- owned_by: str
- """The organization that owns the model."""
diff --git a/src/do_gradientai/types/inference/model_list_response.py b/src/do_gradientai/types/inference/model_list_response.py
deleted file mode 100644
index 01bf3b62..00000000
--- a/src/do_gradientai/types/inference/model_list_response.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List
-from typing_extensions import Literal
-
-from .model import Model
-from ..._models import BaseModel
-
-__all__ = ["ModelListResponse"]
-
-
-class ModelListResponse(BaseModel):
- data: List[Model]
-
- object: Literal["list"]
diff --git a/tests/api_resources/inference/test_models.py b/tests/api_resources/inference/test_models.py
deleted file mode 100644
index e930d83f..00000000
--- a/tests/api_resources/inference/test_models.py
+++ /dev/null
@@ -1,164 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from do_gradientai import GradientAI, AsyncGradientAI
-from do_gradientai.types.inference import Model, ModelListResponse
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestModels:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: GradientAI) -> None:
- model = client.inference.models.retrieve(
- "llama3-8b-instruct",
- )
- assert_matches_type(Model, model, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: GradientAI) -> None:
- response = client.inference.models.with_raw_response.retrieve(
- "llama3-8b-instruct",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- model = response.parse()
- assert_matches_type(Model, model, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: GradientAI) -> None:
- with client.inference.models.with_streaming_response.retrieve(
- "llama3-8b-instruct",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- model = response.parse()
- assert_matches_type(Model, model, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: GradientAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `model` but received ''"):
- client.inference.models.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list(self, client: GradientAI) -> None:
- model = client.inference.models.list()
- assert_matches_type(ModelListResponse, model, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list(self, client: GradientAI) -> None:
- response = client.inference.models.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- model = response.parse()
- assert_matches_type(ModelListResponse, model, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list(self, client: GradientAI) -> None:
- with client.inference.models.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- model = response.parse()
- assert_matches_type(ModelListResponse, model, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
-
-class TestAsyncModels:
- parametrize = pytest.mark.parametrize(
- "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None:
- model = await async_client.inference.models.retrieve(
- "llama3-8b-instruct",
- )
- assert_matches_type(Model, model, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.inference.models.with_raw_response.retrieve(
- "llama3-8b-instruct",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- model = await response.parse()
- assert_matches_type(Model, model, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None:
- async with async_client.inference.models.with_streaming_response.retrieve(
- "llama3-8b-instruct",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- model = await response.parse()
- assert_matches_type(Model, model, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `model` but received ''"):
- await async_client.inference.models.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list(self, async_client: AsyncGradientAI) -> None:
- model = await async_client.inference.models.list()
- assert_matches_type(ModelListResponse, model, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.inference.models.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- model = await response.parse()
- assert_matches_type(ModelListResponse, model, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None:
- async with async_client.inference.models.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- model = await response.parse()
- assert_matches_type(ModelListResponse, model, path=["response"])
-
- assert cast(Any, response.is_closed) is True
From 7e5029e223d7f6655ac2bec2fa26d13d46969170 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 25 Jun 2025 03:21:55 +0000
Subject: [PATCH 06/21] codegen metadata
---
.stats.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.stats.yml b/.stats.yml
index cb95a5af..e9d82b51 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 67
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml
openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667
-config_hash: 886acf2e0eda98b9a718598587f7f81e
+config_hash: e178baf496088c521dd245cbc46c932a
From 1daa3f55a49b5411d1b378fce30aea3ccbccb6d7 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 25 Jun 2025 03:22:33 +0000
Subject: [PATCH 07/21] feat(api): update via SDK Studio
---
.stats.yml | 4 +-
api.md | 56 +-
src/do_gradientai/_client.py | 41 +-
src/do_gradientai/resources/__init__.py | 14 -
.../resources/models/__init__.py | 19 +
.../resources/{ => models}/models.py | 16 +-
.../resources/providers/__init__.py | 47 --
.../resources/providers/anthropic/__init__.py | 33 -
.../providers/anthropic/anthropic.py | 102 ---
.../resources/providers/anthropic/keys.py | 686 ------------------
.../resources/providers/openai/__init__.py | 33 -
.../resources/providers/openai/keys.py | 682 -----------------
.../resources/providers/openai/openai.py | 102 ---
.../resources/providers/providers.py | 134 ----
.../types/{providers => models}/__init__.py | 0
.../types/providers/anthropic/__init__.py | 14 -
.../providers/anthropic/key_create_params.py | 13 -
.../anthropic/key_create_response.py | 12 -
.../anthropic/key_delete_response.py | 12 -
.../anthropic/key_list_agents_params.py | 15 -
.../anthropic/key_list_agents_response.py | 22 -
.../providers/anthropic/key_list_params.py | 15 -
.../providers/anthropic/key_list_response.py | 18 -
.../anthropic/key_retrieve_response.py | 12 -
.../providers/anthropic/key_update_params.py | 17 -
.../anthropic/key_update_response.py | 12 -
.../types/providers/openai/__init__.py | 14 -
.../providers/openai/key_create_params.py | 13 -
.../providers/openai/key_create_response.py | 12 -
.../providers/openai/key_delete_response.py | 12 -
.../types/providers/openai/key_list_params.py | 15 -
.../providers/openai/key_list_response.py | 18 -
.../openai/key_retrieve_agents_params.py | 15 -
.../openai/key_retrieve_agents_response.py | 22 -
.../providers/openai/key_retrieve_response.py | 12 -
.../providers/openai/key_update_params.py | 17 -
.../providers/openai/key_update_response.py | 12 -
.../{providers => models}/__init__.py | 0
.../providers/anthropic/__init__.py | 1 -
.../providers/anthropic/test_keys.py | 557 --------------
.../providers/openai/__init__.py | 1 -
.../providers/openai/test_keys.py | 557 --------------
42 files changed, 32 insertions(+), 3377 deletions(-)
create mode 100644 src/do_gradientai/resources/models/__init__.py
rename src/do_gradientai/resources/{ => models}/models.py (95%)
delete mode 100644 src/do_gradientai/resources/providers/__init__.py
delete mode 100644 src/do_gradientai/resources/providers/anthropic/__init__.py
delete mode 100644 src/do_gradientai/resources/providers/anthropic/anthropic.py
delete mode 100644 src/do_gradientai/resources/providers/anthropic/keys.py
delete mode 100644 src/do_gradientai/resources/providers/openai/__init__.py
delete mode 100644 src/do_gradientai/resources/providers/openai/keys.py
delete mode 100644 src/do_gradientai/resources/providers/openai/openai.py
delete mode 100644 src/do_gradientai/resources/providers/providers.py
rename src/do_gradientai/types/{providers => models}/__init__.py (100%)
delete mode 100644 src/do_gradientai/types/providers/anthropic/__init__.py
delete mode 100644 src/do_gradientai/types/providers/anthropic/key_create_params.py
delete mode 100644 src/do_gradientai/types/providers/anthropic/key_create_response.py
delete mode 100644 src/do_gradientai/types/providers/anthropic/key_delete_response.py
delete mode 100644 src/do_gradientai/types/providers/anthropic/key_list_agents_params.py
delete mode 100644 src/do_gradientai/types/providers/anthropic/key_list_agents_response.py
delete mode 100644 src/do_gradientai/types/providers/anthropic/key_list_params.py
delete mode 100644 src/do_gradientai/types/providers/anthropic/key_list_response.py
delete mode 100644 src/do_gradientai/types/providers/anthropic/key_retrieve_response.py
delete mode 100644 src/do_gradientai/types/providers/anthropic/key_update_params.py
delete mode 100644 src/do_gradientai/types/providers/anthropic/key_update_response.py
delete mode 100644 src/do_gradientai/types/providers/openai/__init__.py
delete mode 100644 src/do_gradientai/types/providers/openai/key_create_params.py
delete mode 100644 src/do_gradientai/types/providers/openai/key_create_response.py
delete mode 100644 src/do_gradientai/types/providers/openai/key_delete_response.py
delete mode 100644 src/do_gradientai/types/providers/openai/key_list_params.py
delete mode 100644 src/do_gradientai/types/providers/openai/key_list_response.py
delete mode 100644 src/do_gradientai/types/providers/openai/key_retrieve_agents_params.py
delete mode 100644 src/do_gradientai/types/providers/openai/key_retrieve_agents_response.py
delete mode 100644 src/do_gradientai/types/providers/openai/key_retrieve_response.py
delete mode 100644 src/do_gradientai/types/providers/openai/key_update_params.py
delete mode 100644 src/do_gradientai/types/providers/openai/key_update_response.py
rename tests/api_resources/{providers => models}/__init__.py (100%)
delete mode 100644 tests/api_resources/providers/anthropic/__init__.py
delete mode 100644 tests/api_resources/providers/anthropic/test_keys.py
delete mode 100644 tests/api_resources/providers/openai/__init__.py
delete mode 100644 tests/api_resources/providers/openai/test_keys.py
diff --git a/.stats.yml b/.stats.yml
index e9d82b51..645d4148 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 67
+configured_endpoints: 55
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml
openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667
-config_hash: e178baf496088c521dd245cbc46c932a
+config_hash: ed552d382f42c2e579a4bb0a608e2055
diff --git a/api.md b/api.md
index a7297098..9811559a 100644
--- a/api.md
+++ b/api.md
@@ -191,60 +191,6 @@ Methods:
- client.agents.child_agents.add(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> ChildAgentAddResponse
- client.agents.child_agents.view(uuid) -> ChildAgentViewResponse
-# Providers
-
-## Anthropic
-
-### Keys
-
-Types:
-
-```python
-from do_gradientai.types.providers.anthropic import (
- KeyCreateResponse,
- KeyRetrieveResponse,
- KeyUpdateResponse,
- KeyListResponse,
- KeyDeleteResponse,
- KeyListAgentsResponse,
-)
-```
-
-Methods:
-
-- client.providers.anthropic.keys.create(\*\*params) -> KeyCreateResponse
-- client.providers.anthropic.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse
-- client.providers.anthropic.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse
-- client.providers.anthropic.keys.list(\*\*params) -> KeyListResponse
-- client.providers.anthropic.keys.delete(api_key_uuid) -> KeyDeleteResponse
-- client.providers.anthropic.keys.list_agents(uuid, \*\*params) -> KeyListAgentsResponse
-
-## OpenAI
-
-### Keys
-
-Types:
-
-```python
-from do_gradientai.types.providers.openai import (
- KeyCreateResponse,
- KeyRetrieveResponse,
- KeyUpdateResponse,
- KeyListResponse,
- KeyDeleteResponse,
- KeyRetrieveAgentsResponse,
-)
-```
-
-Methods:
-
-- client.providers.openai.keys.create(\*\*params) -> KeyCreateResponse
-- client.providers.openai.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse
-- client.providers.openai.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse
-- client.providers.openai.keys.list(\*\*params) -> KeyListResponse
-- client.providers.openai.keys.delete(api_key_uuid) -> KeyDeleteResponse
-- client.providers.openai.keys.retrieve_agents(uuid, \*\*params) -> KeyRetrieveAgentsResponse
-
# Regions
Types:
@@ -375,4 +321,4 @@ from do_gradientai.types import APIAgreement, APIModel, APIModelVersion, ModelLi
Methods:
-- client.models.list(\*\*params) -> ModelListResponse
+- client.models.list(\*\*params) -> ModelListResponse
diff --git a/src/do_gradientai/_client.py b/src/do_gradientai/_client.py
index 8710fe68..afd18a26 100644
--- a/src/do_gradientai/_client.py
+++ b/src/do_gradientai/_client.py
@@ -31,14 +31,13 @@
)
if TYPE_CHECKING:
- from .resources import chat, agents, models, regions, inference, providers, indexing_jobs, knowledge_bases
- from .resources.models import ModelsResource, AsyncModelsResource
+ from .resources import chat, agents, models, regions, inference, indexing_jobs, knowledge_bases
from .resources.regions import RegionsResource, AsyncRegionsResource
from .resources.chat.chat import ChatResource, AsyncChatResource
from .resources.agents.agents import AgentsResource, AsyncAgentsResource
from .resources.indexing_jobs import IndexingJobsResource, AsyncIndexingJobsResource
+ from .resources.models.models import ModelsResource, AsyncModelsResource
from .resources.inference.inference import InferenceResource, AsyncInferenceResource
- from .resources.providers.providers import ProvidersResource, AsyncProvidersResource
from .resources.knowledge_bases.knowledge_bases import KnowledgeBasesResource, AsyncKnowledgeBasesResource
__all__ = [
@@ -115,12 +114,6 @@ def agents(self) -> AgentsResource:
return AgentsResource(self)
- @cached_property
- def providers(self) -> ProvidersResource:
- from .resources.providers import ProvidersResource
-
- return ProvidersResource(self)
-
@cached_property
def regions(self) -> RegionsResource:
from .resources.regions import RegionsResource
@@ -334,12 +327,6 @@ def agents(self) -> AsyncAgentsResource:
return AsyncAgentsResource(self)
- @cached_property
- def providers(self) -> AsyncProvidersResource:
- from .resources.providers import AsyncProvidersResource
-
- return AsyncProvidersResource(self)
-
@cached_property
def regions(self) -> AsyncRegionsResource:
from .resources.regions import AsyncRegionsResource
@@ -503,12 +490,6 @@ def agents(self) -> agents.AgentsResourceWithRawResponse:
return AgentsResourceWithRawResponse(self._client.agents)
- @cached_property
- def providers(self) -> providers.ProvidersResourceWithRawResponse:
- from .resources.providers import ProvidersResourceWithRawResponse
-
- return ProvidersResourceWithRawResponse(self._client.providers)
-
@cached_property
def regions(self) -> regions.RegionsResourceWithRawResponse:
from .resources.regions import RegionsResourceWithRawResponse
@@ -558,12 +539,6 @@ def agents(self) -> agents.AsyncAgentsResourceWithRawResponse:
return AsyncAgentsResourceWithRawResponse(self._client.agents)
- @cached_property
- def providers(self) -> providers.AsyncProvidersResourceWithRawResponse:
- from .resources.providers import AsyncProvidersResourceWithRawResponse
-
- return AsyncProvidersResourceWithRawResponse(self._client.providers)
-
@cached_property
def regions(self) -> regions.AsyncRegionsResourceWithRawResponse:
from .resources.regions import AsyncRegionsResourceWithRawResponse
@@ -613,12 +588,6 @@ def agents(self) -> agents.AgentsResourceWithStreamingResponse:
return AgentsResourceWithStreamingResponse(self._client.agents)
- @cached_property
- def providers(self) -> providers.ProvidersResourceWithStreamingResponse:
- from .resources.providers import ProvidersResourceWithStreamingResponse
-
- return ProvidersResourceWithStreamingResponse(self._client.providers)
-
@cached_property
def regions(self) -> regions.RegionsResourceWithStreamingResponse:
from .resources.regions import RegionsResourceWithStreamingResponse
@@ -668,12 +637,6 @@ def agents(self) -> agents.AsyncAgentsResourceWithStreamingResponse:
return AsyncAgentsResourceWithStreamingResponse(self._client.agents)
- @cached_property
- def providers(self) -> providers.AsyncProvidersResourceWithStreamingResponse:
- from .resources.providers import AsyncProvidersResourceWithStreamingResponse
-
- return AsyncProvidersResourceWithStreamingResponse(self._client.providers)
-
@cached_property
def regions(self) -> regions.AsyncRegionsResourceWithStreamingResponse:
from .resources.regions import AsyncRegionsResourceWithStreamingResponse
diff --git a/src/do_gradientai/resources/__init__.py b/src/do_gradientai/resources/__init__.py
index 1763a13e..b074f7d1 100644
--- a/src/do_gradientai/resources/__init__.py
+++ b/src/do_gradientai/resources/__init__.py
@@ -40,14 +40,6 @@
InferenceResourceWithStreamingResponse,
AsyncInferenceResourceWithStreamingResponse,
)
-from .providers import (
- ProvidersResource,
- AsyncProvidersResource,
- ProvidersResourceWithRawResponse,
- AsyncProvidersResourceWithRawResponse,
- ProvidersResourceWithStreamingResponse,
- AsyncProvidersResourceWithStreamingResponse,
-)
from .indexing_jobs import (
IndexingJobsResource,
AsyncIndexingJobsResource,
@@ -72,12 +64,6 @@
"AsyncAgentsResourceWithRawResponse",
"AgentsResourceWithStreamingResponse",
"AsyncAgentsResourceWithStreamingResponse",
- "ProvidersResource",
- "AsyncProvidersResource",
- "ProvidersResourceWithRawResponse",
- "AsyncProvidersResourceWithRawResponse",
- "ProvidersResourceWithStreamingResponse",
- "AsyncProvidersResourceWithStreamingResponse",
"RegionsResource",
"AsyncRegionsResource",
"RegionsResourceWithRawResponse",
diff --git a/src/do_gradientai/resources/models/__init__.py b/src/do_gradientai/resources/models/__init__.py
new file mode 100644
index 00000000..7a5c25cc
--- /dev/null
+++ b/src/do_gradientai/resources/models/__init__.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .models import (
+ ModelsResource,
+ AsyncModelsResource,
+ ModelsResourceWithRawResponse,
+ AsyncModelsResourceWithRawResponse,
+ ModelsResourceWithStreamingResponse,
+ AsyncModelsResourceWithStreamingResponse,
+)
+
+__all__ = [
+ "ModelsResource",
+ "AsyncModelsResource",
+ "ModelsResourceWithRawResponse",
+ "AsyncModelsResourceWithRawResponse",
+ "ModelsResourceWithStreamingResponse",
+ "AsyncModelsResourceWithStreamingResponse",
+]
diff --git a/src/do_gradientai/resources/models.py b/src/do_gradientai/resources/models/models.py
similarity index 95%
rename from src/do_gradientai/resources/models.py
rename to src/do_gradientai/resources/models/models.py
index c8e78b9b..acdd45a1 100644
--- a/src/do_gradientai/resources/models.py
+++ b/src/do_gradientai/resources/models/models.py
@@ -7,19 +7,19 @@
import httpx
-from ..types import model_list_params
-from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from .._utils import maybe_transform, async_maybe_transform
-from .._compat import cached_property
-from .._resource import SyncAPIResource, AsyncAPIResource
-from .._response import (
+from ...types import model_list_params
+from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ..._utils import maybe_transform, async_maybe_transform
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import (
to_raw_response_wrapper,
to_streamed_response_wrapper,
async_to_raw_response_wrapper,
async_to_streamed_response_wrapper,
)
-from .._base_client import make_request_options
-from ..types.model_list_response import ModelListResponse
+from ..._base_client import make_request_options
+from ...types.model_list_response import ModelListResponse
__all__ = ["ModelsResource", "AsyncModelsResource"]
diff --git a/src/do_gradientai/resources/providers/__init__.py b/src/do_gradientai/resources/providers/__init__.py
deleted file mode 100644
index 1731e057..00000000
--- a/src/do_gradientai/resources/providers/__init__.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .openai import (
- OpenAIResource,
- AsyncOpenAIResource,
- OpenAIResourceWithRawResponse,
- AsyncOpenAIResourceWithRawResponse,
- OpenAIResourceWithStreamingResponse,
- AsyncOpenAIResourceWithStreamingResponse,
-)
-from .anthropic import (
- AnthropicResource,
- AsyncAnthropicResource,
- AnthropicResourceWithRawResponse,
- AsyncAnthropicResourceWithRawResponse,
- AnthropicResourceWithStreamingResponse,
- AsyncAnthropicResourceWithStreamingResponse,
-)
-from .providers import (
- ProvidersResource,
- AsyncProvidersResource,
- ProvidersResourceWithRawResponse,
- AsyncProvidersResourceWithRawResponse,
- ProvidersResourceWithStreamingResponse,
- AsyncProvidersResourceWithStreamingResponse,
-)
-
-__all__ = [
- "AnthropicResource",
- "AsyncAnthropicResource",
- "AnthropicResourceWithRawResponse",
- "AsyncAnthropicResourceWithRawResponse",
- "AnthropicResourceWithStreamingResponse",
- "AsyncAnthropicResourceWithStreamingResponse",
- "OpenAIResource",
- "AsyncOpenAIResource",
- "OpenAIResourceWithRawResponse",
- "AsyncOpenAIResourceWithRawResponse",
- "OpenAIResourceWithStreamingResponse",
- "AsyncOpenAIResourceWithStreamingResponse",
- "ProvidersResource",
- "AsyncProvidersResource",
- "ProvidersResourceWithRawResponse",
- "AsyncProvidersResourceWithRawResponse",
- "ProvidersResourceWithStreamingResponse",
- "AsyncProvidersResourceWithStreamingResponse",
-]
diff --git a/src/do_gradientai/resources/providers/anthropic/__init__.py b/src/do_gradientai/resources/providers/anthropic/__init__.py
deleted file mode 100644
index 057a3a2f..00000000
--- a/src/do_gradientai/resources/providers/anthropic/__init__.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .keys import (
- KeysResource,
- AsyncKeysResource,
- KeysResourceWithRawResponse,
- AsyncKeysResourceWithRawResponse,
- KeysResourceWithStreamingResponse,
- AsyncKeysResourceWithStreamingResponse,
-)
-from .anthropic import (
- AnthropicResource,
- AsyncAnthropicResource,
- AnthropicResourceWithRawResponse,
- AsyncAnthropicResourceWithRawResponse,
- AnthropicResourceWithStreamingResponse,
- AsyncAnthropicResourceWithStreamingResponse,
-)
-
-__all__ = [
- "KeysResource",
- "AsyncKeysResource",
- "KeysResourceWithRawResponse",
- "AsyncKeysResourceWithRawResponse",
- "KeysResourceWithStreamingResponse",
- "AsyncKeysResourceWithStreamingResponse",
- "AnthropicResource",
- "AsyncAnthropicResource",
- "AnthropicResourceWithRawResponse",
- "AsyncAnthropicResourceWithRawResponse",
- "AnthropicResourceWithStreamingResponse",
- "AsyncAnthropicResourceWithStreamingResponse",
-]
diff --git a/src/do_gradientai/resources/providers/anthropic/anthropic.py b/src/do_gradientai/resources/providers/anthropic/anthropic.py
deleted file mode 100644
index 23a914e9..00000000
--- a/src/do_gradientai/resources/providers/anthropic/anthropic.py
+++ /dev/null
@@ -1,102 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from .keys import (
- KeysResource,
- AsyncKeysResource,
- KeysResourceWithRawResponse,
- AsyncKeysResourceWithRawResponse,
- KeysResourceWithStreamingResponse,
- AsyncKeysResourceWithStreamingResponse,
-)
-from ...._compat import cached_property
-from ...._resource import SyncAPIResource, AsyncAPIResource
-
-__all__ = ["AnthropicResource", "AsyncAnthropicResource"]
-
-
-class AnthropicResource(SyncAPIResource):
- @cached_property
- def keys(self) -> KeysResource:
- return KeysResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> AnthropicResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
- """
- return AnthropicResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AnthropicResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
- """
- return AnthropicResourceWithStreamingResponse(self)
-
-
-class AsyncAnthropicResource(AsyncAPIResource):
- @cached_property
- def keys(self) -> AsyncKeysResource:
- return AsyncKeysResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> AsyncAnthropicResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
- """
- return AsyncAnthropicResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncAnthropicResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
- """
- return AsyncAnthropicResourceWithStreamingResponse(self)
-
-
-class AnthropicResourceWithRawResponse:
- def __init__(self, anthropic: AnthropicResource) -> None:
- self._anthropic = anthropic
-
- @cached_property
- def keys(self) -> KeysResourceWithRawResponse:
- return KeysResourceWithRawResponse(self._anthropic.keys)
-
-
-class AsyncAnthropicResourceWithRawResponse:
- def __init__(self, anthropic: AsyncAnthropicResource) -> None:
- self._anthropic = anthropic
-
- @cached_property
- def keys(self) -> AsyncKeysResourceWithRawResponse:
- return AsyncKeysResourceWithRawResponse(self._anthropic.keys)
-
-
-class AnthropicResourceWithStreamingResponse:
- def __init__(self, anthropic: AnthropicResource) -> None:
- self._anthropic = anthropic
-
- @cached_property
- def keys(self) -> KeysResourceWithStreamingResponse:
- return KeysResourceWithStreamingResponse(self._anthropic.keys)
-
-
-class AsyncAnthropicResourceWithStreamingResponse:
- def __init__(self, anthropic: AsyncAnthropicResource) -> None:
- self._anthropic = anthropic
-
- @cached_property
- def keys(self) -> AsyncKeysResourceWithStreamingResponse:
- return AsyncKeysResourceWithStreamingResponse(self._anthropic.keys)
diff --git a/src/do_gradientai/resources/providers/anthropic/keys.py b/src/do_gradientai/resources/providers/anthropic/keys.py
deleted file mode 100644
index d1a33290..00000000
--- a/src/do_gradientai/resources/providers/anthropic/keys.py
+++ /dev/null
@@ -1,686 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import httpx
-
-from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ...._utils import maybe_transform, async_maybe_transform
-from ...._compat import cached_property
-from ...._resource import SyncAPIResource, AsyncAPIResource
-from ...._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ...._base_client import make_request_options
-from ....types.providers.anthropic import key_list_params, key_create_params, key_update_params, key_list_agents_params
-from ....types.providers.anthropic.key_list_response import KeyListResponse
-from ....types.providers.anthropic.key_create_response import KeyCreateResponse
-from ....types.providers.anthropic.key_delete_response import KeyDeleteResponse
-from ....types.providers.anthropic.key_update_response import KeyUpdateResponse
-from ....types.providers.anthropic.key_retrieve_response import KeyRetrieveResponse
-from ....types.providers.anthropic.key_list_agents_response import KeyListAgentsResponse
-
-__all__ = ["KeysResource", "AsyncKeysResource"]
-
-
-class KeysResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> KeysResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
- """
- return KeysResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> KeysResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
- """
- return KeysResourceWithStreamingResponse(self)
-
- def create(
- self,
- *,
- api_key: str | NotGiven = NOT_GIVEN,
- name: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> KeyCreateResponse:
- """
- To create an Anthropic API key, send a POST request to
- `/v2/gen-ai/anthropic/keys`.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/v2/gen-ai/anthropic/keys"
- if self._client._base_url_overridden
- else "https://api.digitalocean.com/v2/gen-ai/anthropic/keys",
- body=maybe_transform(
- {
- "api_key": api_key,
- "name": name,
- },
- key_create_params.KeyCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=KeyCreateResponse,
- )
-
- def retrieve(
- self,
- api_key_uuid: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> KeyRetrieveResponse:
- """
- To retrieve details of an Anthropic API key, send a GET request to
- `/v2/gen-ai/anthropic/keys/{api_key_uuid}`.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not api_key_uuid:
- raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
- return self._get(
- f"/v2/gen-ai/anthropic/keys/{api_key_uuid}"
- if self._client._base_url_overridden
- else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{api_key_uuid}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=KeyRetrieveResponse,
- )
-
- def update(
- self,
- path_api_key_uuid: str,
- *,
- api_key: str | NotGiven = NOT_GIVEN,
- body_api_key_uuid: str | NotGiven = NOT_GIVEN,
- name: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> KeyUpdateResponse:
- """
- To update an Anthropic API key, send a PUT request to
- `/v2/gen-ai/anthropic/keys/{api_key_uuid}`.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not path_api_key_uuid:
- raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}")
- return self._put(
- f"/v2/gen-ai/anthropic/keys/{path_api_key_uuid}"
- if self._client._base_url_overridden
- else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{path_api_key_uuid}",
- body=maybe_transform(
- {
- "api_key": api_key,
- "body_api_key_uuid": body_api_key_uuid,
- "name": name,
- },
- key_update_params.KeyUpdateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=KeyUpdateResponse,
- )
-
- def list(
- self,
- *,
- page: int | NotGiven = NOT_GIVEN,
- per_page: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> KeyListResponse:
- """
- To list all Anthropic API keys, send a GET request to
- `/v2/gen-ai/anthropic/keys`.
-
- Args:
- page: page number.
-
- per_page: items per page.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/v2/gen-ai/anthropic/keys"
- if self._client._base_url_overridden
- else "https://api.digitalocean.com/v2/gen-ai/anthropic/keys",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "page": page,
- "per_page": per_page,
- },
- key_list_params.KeyListParams,
- ),
- ),
- cast_to=KeyListResponse,
- )
-
- def delete(
- self,
- api_key_uuid: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> KeyDeleteResponse:
- """
- To delete an Anthropic API key, send a DELETE request to
- `/v2/gen-ai/anthropic/keys/{api_key_uuid}`.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not api_key_uuid:
- raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
- return self._delete(
- f"/v2/gen-ai/anthropic/keys/{api_key_uuid}"
- if self._client._base_url_overridden
- else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{api_key_uuid}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=KeyDeleteResponse,
- )
-
- def list_agents(
- self,
- uuid: str,
- *,
- page: int | NotGiven = NOT_GIVEN,
- per_page: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> KeyListAgentsResponse:
- """
- List Agents by Anthropic Key.
-
- Args:
- page: page number.
-
- per_page: items per page.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not uuid:
- raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
- return self._get(
- f"/v2/gen-ai/anthropic/keys/{uuid}/agents"
- if self._client._base_url_overridden
- else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{uuid}/agents",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "page": page,
- "per_page": per_page,
- },
- key_list_agents_params.KeyListAgentsParams,
- ),
- ),
- cast_to=KeyListAgentsResponse,
- )
-
-
-class AsyncKeysResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncKeysResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
- """
- return AsyncKeysResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncKeysResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
- """
- return AsyncKeysResourceWithStreamingResponse(self)
-
- async def create(
- self,
- *,
- api_key: str | NotGiven = NOT_GIVEN,
- name: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> KeyCreateResponse:
- """
- To create an Anthropic API key, send a POST request to
- `/v2/gen-ai/anthropic/keys`.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/v2/gen-ai/anthropic/keys"
- if self._client._base_url_overridden
- else "https://api.digitalocean.com/v2/gen-ai/anthropic/keys",
- body=await async_maybe_transform(
- {
- "api_key": api_key,
- "name": name,
- },
- key_create_params.KeyCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=KeyCreateResponse,
- )
-
- async def retrieve(
- self,
- api_key_uuid: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> KeyRetrieveResponse:
- """
- To retrieve details of an Anthropic API key, send a GET request to
- `/v2/gen-ai/anthropic/keys/{api_key_uuid}`.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not api_key_uuid:
- raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
- return await self._get(
- f"/v2/gen-ai/anthropic/keys/{api_key_uuid}"
- if self._client._base_url_overridden
- else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{api_key_uuid}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=KeyRetrieveResponse,
- )
-
- async def update(
- self,
- path_api_key_uuid: str,
- *,
- api_key: str | NotGiven = NOT_GIVEN,
- body_api_key_uuid: str | NotGiven = NOT_GIVEN,
- name: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> KeyUpdateResponse:
- """
- To update an Anthropic API key, send a PUT request to
- `/v2/gen-ai/anthropic/keys/{api_key_uuid}`.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not path_api_key_uuid:
- raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}")
- return await self._put(
- f"/v2/gen-ai/anthropic/keys/{path_api_key_uuid}"
- if self._client._base_url_overridden
- else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{path_api_key_uuid}",
- body=await async_maybe_transform(
- {
- "api_key": api_key,
- "body_api_key_uuid": body_api_key_uuid,
- "name": name,
- },
- key_update_params.KeyUpdateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=KeyUpdateResponse,
- )
-
- async def list(
- self,
- *,
- page: int | NotGiven = NOT_GIVEN,
- per_page: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> KeyListResponse:
- """
- To list all Anthropic API keys, send a GET request to
- `/v2/gen-ai/anthropic/keys`.
-
- Args:
- page: page number.
-
- per_page: items per page.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/v2/gen-ai/anthropic/keys"
- if self._client._base_url_overridden
- else "https://api.digitalocean.com/v2/gen-ai/anthropic/keys",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "page": page,
- "per_page": per_page,
- },
- key_list_params.KeyListParams,
- ),
- ),
- cast_to=KeyListResponse,
- )
-
- async def delete(
- self,
- api_key_uuid: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> KeyDeleteResponse:
- """
- To delete an Anthropic API key, send a DELETE request to
- `/v2/gen-ai/anthropic/keys/{api_key_uuid}`.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not api_key_uuid:
- raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
- return await self._delete(
- f"/v2/gen-ai/anthropic/keys/{api_key_uuid}"
- if self._client._base_url_overridden
- else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{api_key_uuid}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=KeyDeleteResponse,
- )
-
- async def list_agents(
- self,
- uuid: str,
- *,
- page: int | NotGiven = NOT_GIVEN,
- per_page: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> KeyListAgentsResponse:
- """
- List Agents by Anthropic Key.
-
- Args:
- page: page number.
-
- per_page: items per page.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not uuid:
- raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
- return await self._get(
- f"/v2/gen-ai/anthropic/keys/{uuid}/agents"
- if self._client._base_url_overridden
- else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{uuid}/agents",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "page": page,
- "per_page": per_page,
- },
- key_list_agents_params.KeyListAgentsParams,
- ),
- ),
- cast_to=KeyListAgentsResponse,
- )
-
-
-class KeysResourceWithRawResponse:
- def __init__(self, keys: KeysResource) -> None:
- self._keys = keys
-
- self.create = to_raw_response_wrapper(
- keys.create,
- )
- self.retrieve = to_raw_response_wrapper(
- keys.retrieve,
- )
- self.update = to_raw_response_wrapper(
- keys.update,
- )
- self.list = to_raw_response_wrapper(
- keys.list,
- )
- self.delete = to_raw_response_wrapper(
- keys.delete,
- )
- self.list_agents = to_raw_response_wrapper(
- keys.list_agents,
- )
-
-
-class AsyncKeysResourceWithRawResponse:
- def __init__(self, keys: AsyncKeysResource) -> None:
- self._keys = keys
-
- self.create = async_to_raw_response_wrapper(
- keys.create,
- )
- self.retrieve = async_to_raw_response_wrapper(
- keys.retrieve,
- )
- self.update = async_to_raw_response_wrapper(
- keys.update,
- )
- self.list = async_to_raw_response_wrapper(
- keys.list,
- )
- self.delete = async_to_raw_response_wrapper(
- keys.delete,
- )
- self.list_agents = async_to_raw_response_wrapper(
- keys.list_agents,
- )
-
-
-class KeysResourceWithStreamingResponse:
- def __init__(self, keys: KeysResource) -> None:
- self._keys = keys
-
- self.create = to_streamed_response_wrapper(
- keys.create,
- )
- self.retrieve = to_streamed_response_wrapper(
- keys.retrieve,
- )
- self.update = to_streamed_response_wrapper(
- keys.update,
- )
- self.list = to_streamed_response_wrapper(
- keys.list,
- )
- self.delete = to_streamed_response_wrapper(
- keys.delete,
- )
- self.list_agents = to_streamed_response_wrapper(
- keys.list_agents,
- )
-
-
-class AsyncKeysResourceWithStreamingResponse:
- def __init__(self, keys: AsyncKeysResource) -> None:
- self._keys = keys
-
- self.create = async_to_streamed_response_wrapper(
- keys.create,
- )
- self.retrieve = async_to_streamed_response_wrapper(
- keys.retrieve,
- )
- self.update = async_to_streamed_response_wrapper(
- keys.update,
- )
- self.list = async_to_streamed_response_wrapper(
- keys.list,
- )
- self.delete = async_to_streamed_response_wrapper(
- keys.delete,
- )
- self.list_agents = async_to_streamed_response_wrapper(
- keys.list_agents,
- )
diff --git a/src/do_gradientai/resources/providers/openai/__init__.py b/src/do_gradientai/resources/providers/openai/__init__.py
deleted file mode 100644
index 66d8ca7a..00000000
--- a/src/do_gradientai/resources/providers/openai/__init__.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .keys import (
- KeysResource,
- AsyncKeysResource,
- KeysResourceWithRawResponse,
- AsyncKeysResourceWithRawResponse,
- KeysResourceWithStreamingResponse,
- AsyncKeysResourceWithStreamingResponse,
-)
-from .openai import (
- OpenAIResource,
- AsyncOpenAIResource,
- OpenAIResourceWithRawResponse,
- AsyncOpenAIResourceWithRawResponse,
- OpenAIResourceWithStreamingResponse,
- AsyncOpenAIResourceWithStreamingResponse,
-)
-
-__all__ = [
- "KeysResource",
- "AsyncKeysResource",
- "KeysResourceWithRawResponse",
- "AsyncKeysResourceWithRawResponse",
- "KeysResourceWithStreamingResponse",
- "AsyncKeysResourceWithStreamingResponse",
- "OpenAIResource",
- "AsyncOpenAIResource",
- "OpenAIResourceWithRawResponse",
- "AsyncOpenAIResourceWithRawResponse",
- "OpenAIResourceWithStreamingResponse",
- "AsyncOpenAIResourceWithStreamingResponse",
-]
diff --git a/src/do_gradientai/resources/providers/openai/keys.py b/src/do_gradientai/resources/providers/openai/keys.py
deleted file mode 100644
index 01cfee75..00000000
--- a/src/do_gradientai/resources/providers/openai/keys.py
+++ /dev/null
@@ -1,682 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import httpx
-
-from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ...._utils import maybe_transform, async_maybe_transform
-from ...._compat import cached_property
-from ...._resource import SyncAPIResource, AsyncAPIResource
-from ...._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from ...._base_client import make_request_options
-from ....types.providers.openai import key_list_params, key_create_params, key_update_params, key_retrieve_agents_params
-from ....types.providers.openai.key_list_response import KeyListResponse
-from ....types.providers.openai.key_create_response import KeyCreateResponse
-from ....types.providers.openai.key_delete_response import KeyDeleteResponse
-from ....types.providers.openai.key_update_response import KeyUpdateResponse
-from ....types.providers.openai.key_retrieve_response import KeyRetrieveResponse
-from ....types.providers.openai.key_retrieve_agents_response import KeyRetrieveAgentsResponse
-
-__all__ = ["KeysResource", "AsyncKeysResource"]
-
-
-class KeysResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> KeysResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
- """
- return KeysResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> KeysResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
- """
- return KeysResourceWithStreamingResponse(self)
-
- def create(
- self,
- *,
- api_key: str | NotGiven = NOT_GIVEN,
- name: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> KeyCreateResponse:
- """
- To create an OpenAI API key, send a POST request to `/v2/gen-ai/openai/keys`.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/v2/gen-ai/openai/keys"
- if self._client._base_url_overridden
- else "https://api.digitalocean.com/v2/gen-ai/openai/keys",
- body=maybe_transform(
- {
- "api_key": api_key,
- "name": name,
- },
- key_create_params.KeyCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=KeyCreateResponse,
- )
-
- def retrieve(
- self,
- api_key_uuid: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> KeyRetrieveResponse:
- """
- To retrieve details of an OpenAI API key, send a GET request to
- `/v2/gen-ai/openai/keys/{api_key_uuid}`.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not api_key_uuid:
- raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
- return self._get(
- f"/v2/gen-ai/openai/keys/{api_key_uuid}"
- if self._client._base_url_overridden
- else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{api_key_uuid}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=KeyRetrieveResponse,
- )
-
- def update(
- self,
- path_api_key_uuid: str,
- *,
- api_key: str | NotGiven = NOT_GIVEN,
- body_api_key_uuid: str | NotGiven = NOT_GIVEN,
- name: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> KeyUpdateResponse:
- """
- To update an OpenAI API key, send a PUT request to
- `/v2/gen-ai/openai/keys/{api_key_uuid}`.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not path_api_key_uuid:
- raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}")
- return self._put(
- f"/v2/gen-ai/openai/keys/{path_api_key_uuid}"
- if self._client._base_url_overridden
- else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{path_api_key_uuid}",
- body=maybe_transform(
- {
- "api_key": api_key,
- "body_api_key_uuid": body_api_key_uuid,
- "name": name,
- },
- key_update_params.KeyUpdateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=KeyUpdateResponse,
- )
-
- def list(
- self,
- *,
- page: int | NotGiven = NOT_GIVEN,
- per_page: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> KeyListResponse:
- """
- To list all OpenAI API keys, send a GET request to `/v2/gen-ai/openai/keys`.
-
- Args:
- page: page number.
-
- per_page: items per page.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._get(
- "/v2/gen-ai/openai/keys"
- if self._client._base_url_overridden
- else "https://api.digitalocean.com/v2/gen-ai/openai/keys",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "page": page,
- "per_page": per_page,
- },
- key_list_params.KeyListParams,
- ),
- ),
- cast_to=KeyListResponse,
- )
-
- def delete(
- self,
- api_key_uuid: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> KeyDeleteResponse:
- """
- To delete an OpenAI API key, send a DELETE request to
- `/v2/gen-ai/openai/keys/{api_key_uuid}`.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not api_key_uuid:
- raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
- return self._delete(
- f"/v2/gen-ai/openai/keys/{api_key_uuid}"
- if self._client._base_url_overridden
- else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{api_key_uuid}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=KeyDeleteResponse,
- )
-
- def retrieve_agents(
- self,
- uuid: str,
- *,
- page: int | NotGiven = NOT_GIVEN,
- per_page: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> KeyRetrieveAgentsResponse:
- """
- List Agents by OpenAI Key.
-
- Args:
- page: page number.
-
- per_page: items per page.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not uuid:
- raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
- return self._get(
- f"/v2/gen-ai/openai/keys/{uuid}/agents"
- if self._client._base_url_overridden
- else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{uuid}/agents",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "page": page,
- "per_page": per_page,
- },
- key_retrieve_agents_params.KeyRetrieveAgentsParams,
- ),
- ),
- cast_to=KeyRetrieveAgentsResponse,
- )
-
-
-class AsyncKeysResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncKeysResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
- """
- return AsyncKeysResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncKeysResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
- """
- return AsyncKeysResourceWithStreamingResponse(self)
-
- async def create(
- self,
- *,
- api_key: str | NotGiven = NOT_GIVEN,
- name: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> KeyCreateResponse:
- """
- To create an OpenAI API key, send a POST request to `/v2/gen-ai/openai/keys`.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/v2/gen-ai/openai/keys"
- if self._client._base_url_overridden
- else "https://api.digitalocean.com/v2/gen-ai/openai/keys",
- body=await async_maybe_transform(
- {
- "api_key": api_key,
- "name": name,
- },
- key_create_params.KeyCreateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=KeyCreateResponse,
- )
-
- async def retrieve(
- self,
- api_key_uuid: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> KeyRetrieveResponse:
- """
- To retrieve details of an OpenAI API key, send a GET request to
- `/v2/gen-ai/openai/keys/{api_key_uuid}`.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not api_key_uuid:
- raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
- return await self._get(
- f"/v2/gen-ai/openai/keys/{api_key_uuid}"
- if self._client._base_url_overridden
- else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{api_key_uuid}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=KeyRetrieveResponse,
- )
-
- async def update(
- self,
- path_api_key_uuid: str,
- *,
- api_key: str | NotGiven = NOT_GIVEN,
- body_api_key_uuid: str | NotGiven = NOT_GIVEN,
- name: str | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> KeyUpdateResponse:
- """
- To update an OpenAI API key, send a PUT request to
- `/v2/gen-ai/openai/keys/{api_key_uuid}`.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not path_api_key_uuid:
- raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}")
- return await self._put(
- f"/v2/gen-ai/openai/keys/{path_api_key_uuid}"
- if self._client._base_url_overridden
- else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{path_api_key_uuid}",
- body=await async_maybe_transform(
- {
- "api_key": api_key,
- "body_api_key_uuid": body_api_key_uuid,
- "name": name,
- },
- key_update_params.KeyUpdateParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=KeyUpdateResponse,
- )
-
- async def list(
- self,
- *,
- page: int | NotGiven = NOT_GIVEN,
- per_page: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> KeyListResponse:
- """
- To list all OpenAI API keys, send a GET request to `/v2/gen-ai/openai/keys`.
-
- Args:
- page: page number.
-
- per_page: items per page.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._get(
- "/v2/gen-ai/openai/keys"
- if self._client._base_url_overridden
- else "https://api.digitalocean.com/v2/gen-ai/openai/keys",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "page": page,
- "per_page": per_page,
- },
- key_list_params.KeyListParams,
- ),
- ),
- cast_to=KeyListResponse,
- )
-
- async def delete(
- self,
- api_key_uuid: str,
- *,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> KeyDeleteResponse:
- """
- To delete an OpenAI API key, send a DELETE request to
- `/v2/gen-ai/openai/keys/{api_key_uuid}`.
-
- Args:
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not api_key_uuid:
- raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
- return await self._delete(
- f"/v2/gen-ai/openai/keys/{api_key_uuid}"
- if self._client._base_url_overridden
- else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{api_key_uuid}",
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=KeyDeleteResponse,
- )
-
- async def retrieve_agents(
- self,
- uuid: str,
- *,
- page: int | NotGiven = NOT_GIVEN,
- per_page: int | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> KeyRetrieveAgentsResponse:
- """
- List Agents by OpenAI Key.
-
- Args:
- page: page number.
-
- per_page: items per page.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- if not uuid:
- raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
- return await self._get(
- f"/v2/gen-ai/openai/keys/{uuid}/agents"
- if self._client._base_url_overridden
- else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{uuid}/agents",
- options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "page": page,
- "per_page": per_page,
- },
- key_retrieve_agents_params.KeyRetrieveAgentsParams,
- ),
- ),
- cast_to=KeyRetrieveAgentsResponse,
- )
-
-
-class KeysResourceWithRawResponse:
- def __init__(self, keys: KeysResource) -> None:
- self._keys = keys
-
- self.create = to_raw_response_wrapper(
- keys.create,
- )
- self.retrieve = to_raw_response_wrapper(
- keys.retrieve,
- )
- self.update = to_raw_response_wrapper(
- keys.update,
- )
- self.list = to_raw_response_wrapper(
- keys.list,
- )
- self.delete = to_raw_response_wrapper(
- keys.delete,
- )
- self.retrieve_agents = to_raw_response_wrapper(
- keys.retrieve_agents,
- )
-
-
-class AsyncKeysResourceWithRawResponse:
- def __init__(self, keys: AsyncKeysResource) -> None:
- self._keys = keys
-
- self.create = async_to_raw_response_wrapper(
- keys.create,
- )
- self.retrieve = async_to_raw_response_wrapper(
- keys.retrieve,
- )
- self.update = async_to_raw_response_wrapper(
- keys.update,
- )
- self.list = async_to_raw_response_wrapper(
- keys.list,
- )
- self.delete = async_to_raw_response_wrapper(
- keys.delete,
- )
- self.retrieve_agents = async_to_raw_response_wrapper(
- keys.retrieve_agents,
- )
-
-
-class KeysResourceWithStreamingResponse:
- def __init__(self, keys: KeysResource) -> None:
- self._keys = keys
-
- self.create = to_streamed_response_wrapper(
- keys.create,
- )
- self.retrieve = to_streamed_response_wrapper(
- keys.retrieve,
- )
- self.update = to_streamed_response_wrapper(
- keys.update,
- )
- self.list = to_streamed_response_wrapper(
- keys.list,
- )
- self.delete = to_streamed_response_wrapper(
- keys.delete,
- )
- self.retrieve_agents = to_streamed_response_wrapper(
- keys.retrieve_agents,
- )
-
-
-class AsyncKeysResourceWithStreamingResponse:
- def __init__(self, keys: AsyncKeysResource) -> None:
- self._keys = keys
-
- self.create = async_to_streamed_response_wrapper(
- keys.create,
- )
- self.retrieve = async_to_streamed_response_wrapper(
- keys.retrieve,
- )
- self.update = async_to_streamed_response_wrapper(
- keys.update,
- )
- self.list = async_to_streamed_response_wrapper(
- keys.list,
- )
- self.delete = async_to_streamed_response_wrapper(
- keys.delete,
- )
- self.retrieve_agents = async_to_streamed_response_wrapper(
- keys.retrieve_agents,
- )
diff --git a/src/do_gradientai/resources/providers/openai/openai.py b/src/do_gradientai/resources/providers/openai/openai.py
deleted file mode 100644
index b02dc2e1..00000000
--- a/src/do_gradientai/resources/providers/openai/openai.py
+++ /dev/null
@@ -1,102 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from .keys import (
- KeysResource,
- AsyncKeysResource,
- KeysResourceWithRawResponse,
- AsyncKeysResourceWithRawResponse,
- KeysResourceWithStreamingResponse,
- AsyncKeysResourceWithStreamingResponse,
-)
-from ...._compat import cached_property
-from ...._resource import SyncAPIResource, AsyncAPIResource
-
-__all__ = ["OpenAIResource", "AsyncOpenAIResource"]
-
-
-class OpenAIResource(SyncAPIResource):
- @cached_property
- def keys(self) -> KeysResource:
- return KeysResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> OpenAIResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
- """
- return OpenAIResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> OpenAIResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
- """
- return OpenAIResourceWithStreamingResponse(self)
-
-
-class AsyncOpenAIResource(AsyncAPIResource):
- @cached_property
- def keys(self) -> AsyncKeysResource:
- return AsyncKeysResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> AsyncOpenAIResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
- """
- return AsyncOpenAIResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncOpenAIResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
- """
- return AsyncOpenAIResourceWithStreamingResponse(self)
-
-
-class OpenAIResourceWithRawResponse:
- def __init__(self, openai: OpenAIResource) -> None:
- self._openai = openai
-
- @cached_property
- def keys(self) -> KeysResourceWithRawResponse:
- return KeysResourceWithRawResponse(self._openai.keys)
-
-
-class AsyncOpenAIResourceWithRawResponse:
- def __init__(self, openai: AsyncOpenAIResource) -> None:
- self._openai = openai
-
- @cached_property
- def keys(self) -> AsyncKeysResourceWithRawResponse:
- return AsyncKeysResourceWithRawResponse(self._openai.keys)
-
-
-class OpenAIResourceWithStreamingResponse:
- def __init__(self, openai: OpenAIResource) -> None:
- self._openai = openai
-
- @cached_property
- def keys(self) -> KeysResourceWithStreamingResponse:
- return KeysResourceWithStreamingResponse(self._openai.keys)
-
-
-class AsyncOpenAIResourceWithStreamingResponse:
- def __init__(self, openai: AsyncOpenAIResource) -> None:
- self._openai = openai
-
- @cached_property
- def keys(self) -> AsyncKeysResourceWithStreamingResponse:
- return AsyncKeysResourceWithStreamingResponse(self._openai.keys)
diff --git a/src/do_gradientai/resources/providers/providers.py b/src/do_gradientai/resources/providers/providers.py
deleted file mode 100644
index ef942f73..00000000
--- a/src/do_gradientai/resources/providers/providers.py
+++ /dev/null
@@ -1,134 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from ..._compat import cached_property
-from ..._resource import SyncAPIResource, AsyncAPIResource
-from .openai.openai import (
- OpenAIResource,
- AsyncOpenAIResource,
- OpenAIResourceWithRawResponse,
- AsyncOpenAIResourceWithRawResponse,
- OpenAIResourceWithStreamingResponse,
- AsyncOpenAIResourceWithStreamingResponse,
-)
-from .anthropic.anthropic import (
- AnthropicResource,
- AsyncAnthropicResource,
- AnthropicResourceWithRawResponse,
- AsyncAnthropicResourceWithRawResponse,
- AnthropicResourceWithStreamingResponse,
- AsyncAnthropicResourceWithStreamingResponse,
-)
-
-__all__ = ["ProvidersResource", "AsyncProvidersResource"]
-
-
-class ProvidersResource(SyncAPIResource):
- @cached_property
- def anthropic(self) -> AnthropicResource:
- return AnthropicResource(self._client)
-
- @cached_property
- def openai(self) -> OpenAIResource:
- return OpenAIResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> ProvidersResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
- """
- return ProvidersResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> ProvidersResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
- """
- return ProvidersResourceWithStreamingResponse(self)
-
-
-class AsyncProvidersResource(AsyncAPIResource):
- @cached_property
- def anthropic(self) -> AsyncAnthropicResource:
- return AsyncAnthropicResource(self._client)
-
- @cached_property
- def openai(self) -> AsyncOpenAIResource:
- return AsyncOpenAIResource(self._client)
-
- @cached_property
- def with_raw_response(self) -> AsyncProvidersResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
- """
- return AsyncProvidersResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncProvidersResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
- """
- return AsyncProvidersResourceWithStreamingResponse(self)
-
-
-class ProvidersResourceWithRawResponse:
- def __init__(self, providers: ProvidersResource) -> None:
- self._providers = providers
-
- @cached_property
- def anthropic(self) -> AnthropicResourceWithRawResponse:
- return AnthropicResourceWithRawResponse(self._providers.anthropic)
-
- @cached_property
- def openai(self) -> OpenAIResourceWithRawResponse:
- return OpenAIResourceWithRawResponse(self._providers.openai)
-
-
-class AsyncProvidersResourceWithRawResponse:
- def __init__(self, providers: AsyncProvidersResource) -> None:
- self._providers = providers
-
- @cached_property
- def anthropic(self) -> AsyncAnthropicResourceWithRawResponse:
- return AsyncAnthropicResourceWithRawResponse(self._providers.anthropic)
-
- @cached_property
- def openai(self) -> AsyncOpenAIResourceWithRawResponse:
- return AsyncOpenAIResourceWithRawResponse(self._providers.openai)
-
-
-class ProvidersResourceWithStreamingResponse:
- def __init__(self, providers: ProvidersResource) -> None:
- self._providers = providers
-
- @cached_property
- def anthropic(self) -> AnthropicResourceWithStreamingResponse:
- return AnthropicResourceWithStreamingResponse(self._providers.anthropic)
-
- @cached_property
- def openai(self) -> OpenAIResourceWithStreamingResponse:
- return OpenAIResourceWithStreamingResponse(self._providers.openai)
-
-
-class AsyncProvidersResourceWithStreamingResponse:
- def __init__(self, providers: AsyncProvidersResource) -> None:
- self._providers = providers
-
- @cached_property
- def anthropic(self) -> AsyncAnthropicResourceWithStreamingResponse:
- return AsyncAnthropicResourceWithStreamingResponse(self._providers.anthropic)
-
- @cached_property
- def openai(self) -> AsyncOpenAIResourceWithStreamingResponse:
- return AsyncOpenAIResourceWithStreamingResponse(self._providers.openai)
diff --git a/src/do_gradientai/types/providers/__init__.py b/src/do_gradientai/types/models/__init__.py
similarity index 100%
rename from src/do_gradientai/types/providers/__init__.py
rename to src/do_gradientai/types/models/__init__.py
diff --git a/src/do_gradientai/types/providers/anthropic/__init__.py b/src/do_gradientai/types/providers/anthropic/__init__.py
deleted file mode 100644
index eb47e709..00000000
--- a/src/do_gradientai/types/providers/anthropic/__init__.py
+++ /dev/null
@@ -1,14 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from .key_list_params import KeyListParams as KeyListParams
-from .key_create_params import KeyCreateParams as KeyCreateParams
-from .key_list_response import KeyListResponse as KeyListResponse
-from .key_update_params import KeyUpdateParams as KeyUpdateParams
-from .key_create_response import KeyCreateResponse as KeyCreateResponse
-from .key_delete_response import KeyDeleteResponse as KeyDeleteResponse
-from .key_update_response import KeyUpdateResponse as KeyUpdateResponse
-from .key_retrieve_response import KeyRetrieveResponse as KeyRetrieveResponse
-from .key_list_agents_params import KeyListAgentsParams as KeyListAgentsParams
-from .key_list_agents_response import KeyListAgentsResponse as KeyListAgentsResponse
diff --git a/src/do_gradientai/types/providers/anthropic/key_create_params.py b/src/do_gradientai/types/providers/anthropic/key_create_params.py
deleted file mode 100644
index 389f167c..00000000
--- a/src/do_gradientai/types/providers/anthropic/key_create_params.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import TypedDict
-
-__all__ = ["KeyCreateParams"]
-
-
-class KeyCreateParams(TypedDict, total=False):
- api_key: str
-
- name: str
diff --git a/src/do_gradientai/types/providers/anthropic/key_create_response.py b/src/do_gradientai/types/providers/anthropic/key_create_response.py
deleted file mode 100644
index a032810c..00000000
--- a/src/do_gradientai/types/providers/anthropic/key_create_response.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-
-from ...._models import BaseModel
-from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo
-
-__all__ = ["KeyCreateResponse"]
-
-
-class KeyCreateResponse(BaseModel):
- api_key_info: Optional[APIAnthropicAPIKeyInfo] = None
diff --git a/src/do_gradientai/types/providers/anthropic/key_delete_response.py b/src/do_gradientai/types/providers/anthropic/key_delete_response.py
deleted file mode 100644
index 2afe2dda..00000000
--- a/src/do_gradientai/types/providers/anthropic/key_delete_response.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-
-from ...._models import BaseModel
-from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo
-
-__all__ = ["KeyDeleteResponse"]
-
-
-class KeyDeleteResponse(BaseModel):
- api_key_info: Optional[APIAnthropicAPIKeyInfo] = None
diff --git a/src/do_gradientai/types/providers/anthropic/key_list_agents_params.py b/src/do_gradientai/types/providers/anthropic/key_list_agents_params.py
deleted file mode 100644
index ebbc3b7e..00000000
--- a/src/do_gradientai/types/providers/anthropic/key_list_agents_params.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import TypedDict
-
-__all__ = ["KeyListAgentsParams"]
-
-
-class KeyListAgentsParams(TypedDict, total=False):
- page: int
- """page number."""
-
- per_page: int
- """items per page."""
diff --git a/src/do_gradientai/types/providers/anthropic/key_list_agents_response.py b/src/do_gradientai/types/providers/anthropic/key_list_agents_response.py
deleted file mode 100644
index ba6ca946..00000000
--- a/src/do_gradientai/types/providers/anthropic/key_list_agents_response.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List, Optional
-
-from ...._models import BaseModel
-from ...agents.api_meta import APIMeta
-from ...agents.api_links import APILinks
-
-__all__ = ["KeyListAgentsResponse"]
-
-
-class KeyListAgentsResponse(BaseModel):
- agents: Optional[List["APIAgent"]] = None
-
- links: Optional[APILinks] = None
-
- meta: Optional[APIMeta] = None
-
-
-from ...api_agent import APIAgent
diff --git a/src/do_gradientai/types/providers/anthropic/key_list_params.py b/src/do_gradientai/types/providers/anthropic/key_list_params.py
deleted file mode 100644
index a11458ad..00000000
--- a/src/do_gradientai/types/providers/anthropic/key_list_params.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import TypedDict
-
-__all__ = ["KeyListParams"]
-
-
-class KeyListParams(TypedDict, total=False):
- page: int
- """page number."""
-
- per_page: int
- """items per page."""
diff --git a/src/do_gradientai/types/providers/anthropic/key_list_response.py b/src/do_gradientai/types/providers/anthropic/key_list_response.py
deleted file mode 100644
index d0b84e96..00000000
--- a/src/do_gradientai/types/providers/anthropic/key_list_response.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Optional
-
-from ...._models import BaseModel
-from ...agents.api_meta import APIMeta
-from ...agents.api_links import APILinks
-from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo
-
-__all__ = ["KeyListResponse"]
-
-
-class KeyListResponse(BaseModel):
- api_key_infos: Optional[List[APIAnthropicAPIKeyInfo]] = None
-
- links: Optional[APILinks] = None
-
- meta: Optional[APIMeta] = None
diff --git a/src/do_gradientai/types/providers/anthropic/key_retrieve_response.py b/src/do_gradientai/types/providers/anthropic/key_retrieve_response.py
deleted file mode 100644
index b8361fc2..00000000
--- a/src/do_gradientai/types/providers/anthropic/key_retrieve_response.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-
-from ...._models import BaseModel
-from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo
-
-__all__ = ["KeyRetrieveResponse"]
-
-
-class KeyRetrieveResponse(BaseModel):
- api_key_info: Optional[APIAnthropicAPIKeyInfo] = None
diff --git a/src/do_gradientai/types/providers/anthropic/key_update_params.py b/src/do_gradientai/types/providers/anthropic/key_update_params.py
deleted file mode 100644
index c07d7f66..00000000
--- a/src/do_gradientai/types/providers/anthropic/key_update_params.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Annotated, TypedDict
-
-from ...._utils import PropertyInfo
-
-__all__ = ["KeyUpdateParams"]
-
-
-class KeyUpdateParams(TypedDict, total=False):
- api_key: str
-
- body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")]
-
- name: str
diff --git a/src/do_gradientai/types/providers/anthropic/key_update_response.py b/src/do_gradientai/types/providers/anthropic/key_update_response.py
deleted file mode 100644
index b04277a6..00000000
--- a/src/do_gradientai/types/providers/anthropic/key_update_response.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-
-from ...._models import BaseModel
-from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo
-
-__all__ = ["KeyUpdateResponse"]
-
-
-class KeyUpdateResponse(BaseModel):
- api_key_info: Optional[APIAnthropicAPIKeyInfo] = None
diff --git a/src/do_gradientai/types/providers/openai/__init__.py b/src/do_gradientai/types/providers/openai/__init__.py
deleted file mode 100644
index 70abf332..00000000
--- a/src/do_gradientai/types/providers/openai/__init__.py
+++ /dev/null
@@ -1,14 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from .key_list_params import KeyListParams as KeyListParams
-from .key_create_params import KeyCreateParams as KeyCreateParams
-from .key_list_response import KeyListResponse as KeyListResponse
-from .key_update_params import KeyUpdateParams as KeyUpdateParams
-from .key_create_response import KeyCreateResponse as KeyCreateResponse
-from .key_delete_response import KeyDeleteResponse as KeyDeleteResponse
-from .key_update_response import KeyUpdateResponse as KeyUpdateResponse
-from .key_retrieve_response import KeyRetrieveResponse as KeyRetrieveResponse
-from .key_retrieve_agents_params import KeyRetrieveAgentsParams as KeyRetrieveAgentsParams
-from .key_retrieve_agents_response import KeyRetrieveAgentsResponse as KeyRetrieveAgentsResponse
diff --git a/src/do_gradientai/types/providers/openai/key_create_params.py b/src/do_gradientai/types/providers/openai/key_create_params.py
deleted file mode 100644
index 389f167c..00000000
--- a/src/do_gradientai/types/providers/openai/key_create_params.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import TypedDict
-
-__all__ = ["KeyCreateParams"]
-
-
-class KeyCreateParams(TypedDict, total=False):
- api_key: str
-
- name: str
diff --git a/src/do_gradientai/types/providers/openai/key_create_response.py b/src/do_gradientai/types/providers/openai/key_create_response.py
deleted file mode 100644
index f3b4d36c..00000000
--- a/src/do_gradientai/types/providers/openai/key_create_response.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-
-from ...._models import BaseModel
-from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo
-
-__all__ = ["KeyCreateResponse"]
-
-
-class KeyCreateResponse(BaseModel):
- api_key_info: Optional[APIOpenAIAPIKeyInfo] = None
diff --git a/src/do_gradientai/types/providers/openai/key_delete_response.py b/src/do_gradientai/types/providers/openai/key_delete_response.py
deleted file mode 100644
index 0c8922bb..00000000
--- a/src/do_gradientai/types/providers/openai/key_delete_response.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-
-from ...._models import BaseModel
-from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo
-
-__all__ = ["KeyDeleteResponse"]
-
-
-class KeyDeleteResponse(BaseModel):
- api_key_info: Optional[APIOpenAIAPIKeyInfo] = None
diff --git a/src/do_gradientai/types/providers/openai/key_list_params.py b/src/do_gradientai/types/providers/openai/key_list_params.py
deleted file mode 100644
index a11458ad..00000000
--- a/src/do_gradientai/types/providers/openai/key_list_params.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import TypedDict
-
-__all__ = ["KeyListParams"]
-
-
-class KeyListParams(TypedDict, total=False):
- page: int
- """page number."""
-
- per_page: int
- """items per page."""
diff --git a/src/do_gradientai/types/providers/openai/key_list_response.py b/src/do_gradientai/types/providers/openai/key_list_response.py
deleted file mode 100644
index c263cba3..00000000
--- a/src/do_gradientai/types/providers/openai/key_list_response.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import List, Optional
-
-from ...._models import BaseModel
-from ...agents.api_meta import APIMeta
-from ...agents.api_links import APILinks
-from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo
-
-__all__ = ["KeyListResponse"]
-
-
-class KeyListResponse(BaseModel):
- api_key_infos: Optional[List[APIOpenAIAPIKeyInfo]] = None
-
- links: Optional[APILinks] = None
-
- meta: Optional[APIMeta] = None
diff --git a/src/do_gradientai/types/providers/openai/key_retrieve_agents_params.py b/src/do_gradientai/types/providers/openai/key_retrieve_agents_params.py
deleted file mode 100644
index ec745d14..00000000
--- a/src/do_gradientai/types/providers/openai/key_retrieve_agents_params.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import TypedDict
-
-__all__ = ["KeyRetrieveAgentsParams"]
-
-
-class KeyRetrieveAgentsParams(TypedDict, total=False):
- page: int
- """page number."""
-
- per_page: int
- """items per page."""
diff --git a/src/do_gradientai/types/providers/openai/key_retrieve_agents_response.py b/src/do_gradientai/types/providers/openai/key_retrieve_agents_response.py
deleted file mode 100644
index f42edea6..00000000
--- a/src/do_gradientai/types/providers/openai/key_retrieve_agents_response.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List, Optional
-
-from ...._models import BaseModel
-from ...agents.api_meta import APIMeta
-from ...agents.api_links import APILinks
-
-__all__ = ["KeyRetrieveAgentsResponse"]
-
-
-class KeyRetrieveAgentsResponse(BaseModel):
- agents: Optional[List["APIAgent"]] = None
-
- links: Optional[APILinks] = None
-
- meta: Optional[APIMeta] = None
-
-
-from ...api_agent import APIAgent
diff --git a/src/do_gradientai/types/providers/openai/key_retrieve_response.py b/src/do_gradientai/types/providers/openai/key_retrieve_response.py
deleted file mode 100644
index 7015b6f7..00000000
--- a/src/do_gradientai/types/providers/openai/key_retrieve_response.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-
-from ...._models import BaseModel
-from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo
-
-__all__ = ["KeyRetrieveResponse"]
-
-
-class KeyRetrieveResponse(BaseModel):
- api_key_info: Optional[APIOpenAIAPIKeyInfo] = None
diff --git a/src/do_gradientai/types/providers/openai/key_update_params.py b/src/do_gradientai/types/providers/openai/key_update_params.py
deleted file mode 100644
index c07d7f66..00000000
--- a/src/do_gradientai/types/providers/openai/key_update_params.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing_extensions import Annotated, TypedDict
-
-from ...._utils import PropertyInfo
-
-__all__ = ["KeyUpdateParams"]
-
-
-class KeyUpdateParams(TypedDict, total=False):
- api_key: str
-
- body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")]
-
- name: str
diff --git a/src/do_gradientai/types/providers/openai/key_update_response.py b/src/do_gradientai/types/providers/openai/key_update_response.py
deleted file mode 100644
index 4889f994..00000000
--- a/src/do_gradientai/types/providers/openai/key_update_response.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-
-from ...._models import BaseModel
-from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo
-
-__all__ = ["KeyUpdateResponse"]
-
-
-class KeyUpdateResponse(BaseModel):
- api_key_info: Optional[APIOpenAIAPIKeyInfo] = None
diff --git a/tests/api_resources/providers/__init__.py b/tests/api_resources/models/__init__.py
similarity index 100%
rename from tests/api_resources/providers/__init__.py
rename to tests/api_resources/models/__init__.py
diff --git a/tests/api_resources/providers/anthropic/__init__.py b/tests/api_resources/providers/anthropic/__init__.py
deleted file mode 100644
index fd8019a9..00000000
--- a/tests/api_resources/providers/anthropic/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/providers/anthropic/test_keys.py b/tests/api_resources/providers/anthropic/test_keys.py
deleted file mode 100644
index 7aa595f7..00000000
--- a/tests/api_resources/providers/anthropic/test_keys.py
+++ /dev/null
@@ -1,557 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from do_gradientai import GradientAI, AsyncGradientAI
-from do_gradientai.types.providers.anthropic import (
- KeyListResponse,
- KeyCreateResponse,
- KeyDeleteResponse,
- KeyUpdateResponse,
- KeyRetrieveResponse,
- KeyListAgentsResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestKeys:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create(self, client: GradientAI) -> None:
- key = client.providers.anthropic.keys.create()
- assert_matches_type(KeyCreateResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create_with_all_params(self, client: GradientAI) -> None:
- key = client.providers.anthropic.keys.create(
- api_key="api_key",
- name="name",
- )
- assert_matches_type(KeyCreateResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_create(self, client: GradientAI) -> None:
- response = client.providers.anthropic.keys.with_raw_response.create()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- key = response.parse()
- assert_matches_type(KeyCreateResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_create(self, client: GradientAI) -> None:
- with client.providers.anthropic.keys.with_streaming_response.create() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- key = response.parse()
- assert_matches_type(KeyCreateResponse, key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: GradientAI) -> None:
- key = client.providers.anthropic.keys.retrieve(
- "api_key_uuid",
- )
- assert_matches_type(KeyRetrieveResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: GradientAI) -> None:
- response = client.providers.anthropic.keys.with_raw_response.retrieve(
- "api_key_uuid",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- key = response.parse()
- assert_matches_type(KeyRetrieveResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: GradientAI) -> None:
- with client.providers.anthropic.keys.with_streaming_response.retrieve(
- "api_key_uuid",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- key = response.parse()
- assert_matches_type(KeyRetrieveResponse, key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: GradientAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
- client.providers.anthropic.keys.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_update(self, client: GradientAI) -> None:
- key = client.providers.anthropic.keys.update(
- path_api_key_uuid="api_key_uuid",
- )
- assert_matches_type(KeyUpdateResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_update_with_all_params(self, client: GradientAI) -> None:
- key = client.providers.anthropic.keys.update(
- path_api_key_uuid="api_key_uuid",
- api_key="api_key",
- body_api_key_uuid="api_key_uuid",
- name="name",
- )
- assert_matches_type(KeyUpdateResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_update(self, client: GradientAI) -> None:
- response = client.providers.anthropic.keys.with_raw_response.update(
- path_api_key_uuid="api_key_uuid",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- key = response.parse()
- assert_matches_type(KeyUpdateResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_update(self, client: GradientAI) -> None:
- with client.providers.anthropic.keys.with_streaming_response.update(
- path_api_key_uuid="api_key_uuid",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- key = response.parse()
- assert_matches_type(KeyUpdateResponse, key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_update(self, client: GradientAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"):
- client.providers.anthropic.keys.with_raw_response.update(
- path_api_key_uuid="",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list(self, client: GradientAI) -> None:
- key = client.providers.anthropic.keys.list()
- assert_matches_type(KeyListResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_with_all_params(self, client: GradientAI) -> None:
- key = client.providers.anthropic.keys.list(
- page=0,
- per_page=0,
- )
- assert_matches_type(KeyListResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list(self, client: GradientAI) -> None:
- response = client.providers.anthropic.keys.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- key = response.parse()
- assert_matches_type(KeyListResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list(self, client: GradientAI) -> None:
- with client.providers.anthropic.keys.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- key = response.parse()
- assert_matches_type(KeyListResponse, key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_delete(self, client: GradientAI) -> None:
- key = client.providers.anthropic.keys.delete(
- "api_key_uuid",
- )
- assert_matches_type(KeyDeleteResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_delete(self, client: GradientAI) -> None:
- response = client.providers.anthropic.keys.with_raw_response.delete(
- "api_key_uuid",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- key = response.parse()
- assert_matches_type(KeyDeleteResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_delete(self, client: GradientAI) -> None:
- with client.providers.anthropic.keys.with_streaming_response.delete(
- "api_key_uuid",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- key = response.parse()
- assert_matches_type(KeyDeleteResponse, key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_delete(self, client: GradientAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
- client.providers.anthropic.keys.with_raw_response.delete(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_agents(self, client: GradientAI) -> None:
- key = client.providers.anthropic.keys.list_agents(
- uuid="uuid",
- )
- assert_matches_type(KeyListAgentsResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_agents_with_all_params(self, client: GradientAI) -> None:
- key = client.providers.anthropic.keys.list_agents(
- uuid="uuid",
- page=0,
- per_page=0,
- )
- assert_matches_type(KeyListAgentsResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list_agents(self, client: GradientAI) -> None:
- response = client.providers.anthropic.keys.with_raw_response.list_agents(
- uuid="uuid",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- key = response.parse()
- assert_matches_type(KeyListAgentsResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list_agents(self, client: GradientAI) -> None:
- with client.providers.anthropic.keys.with_streaming_response.list_agents(
- uuid="uuid",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- key = response.parse()
- assert_matches_type(KeyListAgentsResponse, key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_list_agents(self, client: GradientAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
- client.providers.anthropic.keys.with_raw_response.list_agents(
- uuid="",
- )
-
-
-class TestAsyncKeys:
- parametrize = pytest.mark.parametrize(
- "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create(self, async_client: AsyncGradientAI) -> None:
- key = await async_client.providers.anthropic.keys.create()
- assert_matches_type(KeyCreateResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None:
- key = await async_client.providers.anthropic.keys.create(
- api_key="api_key",
- name="name",
- )
- assert_matches_type(KeyCreateResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.providers.anthropic.keys.with_raw_response.create()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- key = await response.parse()
- assert_matches_type(KeyCreateResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None:
- async with async_client.providers.anthropic.keys.with_streaming_response.create() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- key = await response.parse()
- assert_matches_type(KeyCreateResponse, key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None:
- key = await async_client.providers.anthropic.keys.retrieve(
- "api_key_uuid",
- )
- assert_matches_type(KeyRetrieveResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.providers.anthropic.keys.with_raw_response.retrieve(
- "api_key_uuid",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- key = await response.parse()
- assert_matches_type(KeyRetrieveResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None:
- async with async_client.providers.anthropic.keys.with_streaming_response.retrieve(
- "api_key_uuid",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- key = await response.parse()
- assert_matches_type(KeyRetrieveResponse, key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
- await async_client.providers.anthropic.keys.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_update(self, async_client: AsyncGradientAI) -> None:
- key = await async_client.providers.anthropic.keys.update(
- path_api_key_uuid="api_key_uuid",
- )
- assert_matches_type(KeyUpdateResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None:
- key = await async_client.providers.anthropic.keys.update(
- path_api_key_uuid="api_key_uuid",
- api_key="api_key",
- body_api_key_uuid="api_key_uuid",
- name="name",
- )
- assert_matches_type(KeyUpdateResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.providers.anthropic.keys.with_raw_response.update(
- path_api_key_uuid="api_key_uuid",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- key = await response.parse()
- assert_matches_type(KeyUpdateResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None:
- async with async_client.providers.anthropic.keys.with_streaming_response.update(
- path_api_key_uuid="api_key_uuid",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- key = await response.parse()
- assert_matches_type(KeyUpdateResponse, key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_update(self, async_client: AsyncGradientAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"):
- await async_client.providers.anthropic.keys.with_raw_response.update(
- path_api_key_uuid="",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list(self, async_client: AsyncGradientAI) -> None:
- key = await async_client.providers.anthropic.keys.list()
- assert_matches_type(KeyListResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None:
- key = await async_client.providers.anthropic.keys.list(
- page=0,
- per_page=0,
- )
- assert_matches_type(KeyListResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.providers.anthropic.keys.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- key = await response.parse()
- assert_matches_type(KeyListResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None:
- async with async_client.providers.anthropic.keys.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- key = await response.parse()
- assert_matches_type(KeyListResponse, key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_delete(self, async_client: AsyncGradientAI) -> None:
- key = await async_client.providers.anthropic.keys.delete(
- "api_key_uuid",
- )
- assert_matches_type(KeyDeleteResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.providers.anthropic.keys.with_raw_response.delete(
- "api_key_uuid",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- key = await response.parse()
- assert_matches_type(KeyDeleteResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None:
- async with async_client.providers.anthropic.keys.with_streaming_response.delete(
- "api_key_uuid",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- key = await response.parse()
- assert_matches_type(KeyDeleteResponse, key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
- await async_client.providers.anthropic.keys.with_raw_response.delete(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_agents(self, async_client: AsyncGradientAI) -> None:
- key = await async_client.providers.anthropic.keys.list_agents(
- uuid="uuid",
- )
- assert_matches_type(KeyListAgentsResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_agents_with_all_params(self, async_client: AsyncGradientAI) -> None:
- key = await async_client.providers.anthropic.keys.list_agents(
- uuid="uuid",
- page=0,
- per_page=0,
- )
- assert_matches_type(KeyListAgentsResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list_agents(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.providers.anthropic.keys.with_raw_response.list_agents(
- uuid="uuid",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- key = await response.parse()
- assert_matches_type(KeyListAgentsResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list_agents(self, async_client: AsyncGradientAI) -> None:
- async with async_client.providers.anthropic.keys.with_streaming_response.list_agents(
- uuid="uuid",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- key = await response.parse()
- assert_matches_type(KeyListAgentsResponse, key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_list_agents(self, async_client: AsyncGradientAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
- await async_client.providers.anthropic.keys.with_raw_response.list_agents(
- uuid="",
- )
diff --git a/tests/api_resources/providers/openai/__init__.py b/tests/api_resources/providers/openai/__init__.py
deleted file mode 100644
index fd8019a9..00000000
--- a/tests/api_resources/providers/openai/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/providers/openai/test_keys.py b/tests/api_resources/providers/openai/test_keys.py
deleted file mode 100644
index 714dc4bd..00000000
--- a/tests/api_resources/providers/openai/test_keys.py
+++ /dev/null
@@ -1,557 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from tests.utils import assert_matches_type
-from do_gradientai import GradientAI, AsyncGradientAI
-from do_gradientai.types.providers.openai import (
- KeyListResponse,
- KeyCreateResponse,
- KeyDeleteResponse,
- KeyUpdateResponse,
- KeyRetrieveResponse,
- KeyRetrieveAgentsResponse,
-)
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestKeys:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create(self, client: GradientAI) -> None:
- key = client.providers.openai.keys.create()
- assert_matches_type(KeyCreateResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_create_with_all_params(self, client: GradientAI) -> None:
- key = client.providers.openai.keys.create(
- api_key="api_key",
- name="name",
- )
- assert_matches_type(KeyCreateResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_create(self, client: GradientAI) -> None:
- response = client.providers.openai.keys.with_raw_response.create()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- key = response.parse()
- assert_matches_type(KeyCreateResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_create(self, client: GradientAI) -> None:
- with client.providers.openai.keys.with_streaming_response.create() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- key = response.parse()
- assert_matches_type(KeyCreateResponse, key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve(self, client: GradientAI) -> None:
- key = client.providers.openai.keys.retrieve(
- "api_key_uuid",
- )
- assert_matches_type(KeyRetrieveResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve(self, client: GradientAI) -> None:
- response = client.providers.openai.keys.with_raw_response.retrieve(
- "api_key_uuid",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- key = response.parse()
- assert_matches_type(KeyRetrieveResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve(self, client: GradientAI) -> None:
- with client.providers.openai.keys.with_streaming_response.retrieve(
- "api_key_uuid",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- key = response.parse()
- assert_matches_type(KeyRetrieveResponse, key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve(self, client: GradientAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
- client.providers.openai.keys.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_update(self, client: GradientAI) -> None:
- key = client.providers.openai.keys.update(
- path_api_key_uuid="api_key_uuid",
- )
- assert_matches_type(KeyUpdateResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_update_with_all_params(self, client: GradientAI) -> None:
- key = client.providers.openai.keys.update(
- path_api_key_uuid="api_key_uuid",
- api_key="api_key",
- body_api_key_uuid="api_key_uuid",
- name="name",
- )
- assert_matches_type(KeyUpdateResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_update(self, client: GradientAI) -> None:
- response = client.providers.openai.keys.with_raw_response.update(
- path_api_key_uuid="api_key_uuid",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- key = response.parse()
- assert_matches_type(KeyUpdateResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_update(self, client: GradientAI) -> None:
- with client.providers.openai.keys.with_streaming_response.update(
- path_api_key_uuid="api_key_uuid",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- key = response.parse()
- assert_matches_type(KeyUpdateResponse, key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_update(self, client: GradientAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"):
- client.providers.openai.keys.with_raw_response.update(
- path_api_key_uuid="",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list(self, client: GradientAI) -> None:
- key = client.providers.openai.keys.list()
- assert_matches_type(KeyListResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_list_with_all_params(self, client: GradientAI) -> None:
- key = client.providers.openai.keys.list(
- page=0,
- per_page=0,
- )
- assert_matches_type(KeyListResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_list(self, client: GradientAI) -> None:
- response = client.providers.openai.keys.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- key = response.parse()
- assert_matches_type(KeyListResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_list(self, client: GradientAI) -> None:
- with client.providers.openai.keys.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- key = response.parse()
- assert_matches_type(KeyListResponse, key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_method_delete(self, client: GradientAI) -> None:
- key = client.providers.openai.keys.delete(
- "api_key_uuid",
- )
- assert_matches_type(KeyDeleteResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_delete(self, client: GradientAI) -> None:
- response = client.providers.openai.keys.with_raw_response.delete(
- "api_key_uuid",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- key = response.parse()
- assert_matches_type(KeyDeleteResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_delete(self, client: GradientAI) -> None:
- with client.providers.openai.keys.with_streaming_response.delete(
- "api_key_uuid",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- key = response.parse()
- assert_matches_type(KeyDeleteResponse, key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_delete(self, client: GradientAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
- client.providers.openai.keys.with_raw_response.delete(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve_agents(self, client: GradientAI) -> None:
- key = client.providers.openai.keys.retrieve_agents(
- uuid="uuid",
- )
- assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_method_retrieve_agents_with_all_params(self, client: GradientAI) -> None:
- key = client.providers.openai.keys.retrieve_agents(
- uuid="uuid",
- page=0,
- per_page=0,
- )
- assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_raw_response_retrieve_agents(self, client: GradientAI) -> None:
- response = client.providers.openai.keys.with_raw_response.retrieve_agents(
- uuid="uuid",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- key = response.parse()
- assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- def test_streaming_response_retrieve_agents(self, client: GradientAI) -> None:
- with client.providers.openai.keys.with_streaming_response.retrieve_agents(
- uuid="uuid",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- key = response.parse()
- assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- def test_path_params_retrieve_agents(self, client: GradientAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
- client.providers.openai.keys.with_raw_response.retrieve_agents(
- uuid="",
- )
-
-
-class TestAsyncKeys:
- parametrize = pytest.mark.parametrize(
- "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create(self, async_client: AsyncGradientAI) -> None:
- key = await async_client.providers.openai.keys.create()
- assert_matches_type(KeyCreateResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None:
- key = await async_client.providers.openai.keys.create(
- api_key="api_key",
- name="name",
- )
- assert_matches_type(KeyCreateResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.providers.openai.keys.with_raw_response.create()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- key = await response.parse()
- assert_matches_type(KeyCreateResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None:
- async with async_client.providers.openai.keys.with_streaming_response.create() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- key = await response.parse()
- assert_matches_type(KeyCreateResponse, key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None:
- key = await async_client.providers.openai.keys.retrieve(
- "api_key_uuid",
- )
- assert_matches_type(KeyRetrieveResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.providers.openai.keys.with_raw_response.retrieve(
- "api_key_uuid",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- key = await response.parse()
- assert_matches_type(KeyRetrieveResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None:
- async with async_client.providers.openai.keys.with_streaming_response.retrieve(
- "api_key_uuid",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- key = await response.parse()
- assert_matches_type(KeyRetrieveResponse, key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
- await async_client.providers.openai.keys.with_raw_response.retrieve(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_update(self, async_client: AsyncGradientAI) -> None:
- key = await async_client.providers.openai.keys.update(
- path_api_key_uuid="api_key_uuid",
- )
- assert_matches_type(KeyUpdateResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None:
- key = await async_client.providers.openai.keys.update(
- path_api_key_uuid="api_key_uuid",
- api_key="api_key",
- body_api_key_uuid="api_key_uuid",
- name="name",
- )
- assert_matches_type(KeyUpdateResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.providers.openai.keys.with_raw_response.update(
- path_api_key_uuid="api_key_uuid",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- key = await response.parse()
- assert_matches_type(KeyUpdateResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None:
- async with async_client.providers.openai.keys.with_streaming_response.update(
- path_api_key_uuid="api_key_uuid",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- key = await response.parse()
- assert_matches_type(KeyUpdateResponse, key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_update(self, async_client: AsyncGradientAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"):
- await async_client.providers.openai.keys.with_raw_response.update(
- path_api_key_uuid="",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list(self, async_client: AsyncGradientAI) -> None:
- key = await async_client.providers.openai.keys.list()
- assert_matches_type(KeyListResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None:
- key = await async_client.providers.openai.keys.list(
- page=0,
- per_page=0,
- )
- assert_matches_type(KeyListResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.providers.openai.keys.with_raw_response.list()
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- key = await response.parse()
- assert_matches_type(KeyListResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None:
- async with async_client.providers.openai.keys.with_streaming_response.list() as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- key = await response.parse()
- assert_matches_type(KeyListResponse, key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_delete(self, async_client: AsyncGradientAI) -> None:
- key = await async_client.providers.openai.keys.delete(
- "api_key_uuid",
- )
- assert_matches_type(KeyDeleteResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.providers.openai.keys.with_raw_response.delete(
- "api_key_uuid",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- key = await response.parse()
- assert_matches_type(KeyDeleteResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None:
- async with async_client.providers.openai.keys.with_streaming_response.delete(
- "api_key_uuid",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- key = await response.parse()
- assert_matches_type(KeyDeleteResponse, key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
- await async_client.providers.openai.keys.with_raw_response.delete(
- "",
- )
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve_agents(self, async_client: AsyncGradientAI) -> None:
- key = await async_client.providers.openai.keys.retrieve_agents(
- uuid="uuid",
- )
- assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_method_retrieve_agents_with_all_params(self, async_client: AsyncGradientAI) -> None:
- key = await async_client.providers.openai.keys.retrieve_agents(
- uuid="uuid",
- page=0,
- per_page=0,
- )
- assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_raw_response_retrieve_agents(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.providers.openai.keys.with_raw_response.retrieve_agents(
- uuid="uuid",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- key = await response.parse()
- assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"])
-
- @pytest.mark.skip()
- @parametrize
- async def test_streaming_response_retrieve_agents(self, async_client: AsyncGradientAI) -> None:
- async with async_client.providers.openai.keys.with_streaming_response.retrieve_agents(
- uuid="uuid",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- key = await response.parse()
- assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip()
- @parametrize
- async def test_path_params_retrieve_agents(self, async_client: AsyncGradientAI) -> None:
- with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
- await async_client.providers.openai.keys.with_raw_response.retrieve_agents(
- uuid="",
- )
From e5ce59057792968892317215078ac2c11e811812 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 25 Jun 2025 03:22:54 +0000
Subject: [PATCH 08/21] feat(api): update via SDK Studio
---
.stats.yml | 4 +-
api.md | 56 +-
src/do_gradientai/_client.py | 41 +-
src/do_gradientai/resources/__init__.py | 14 +
.../resources/{models => }/models.py | 16 +-
.../resources/models/__init__.py | 19 -
.../resources/providers/__init__.py | 47 ++
.../resources/providers/anthropic/__init__.py | 33 +
.../providers/anthropic/anthropic.py | 102 +++
.../resources/providers/anthropic/keys.py | 686 ++++++++++++++++++
.../resources/providers/openai/__init__.py | 33 +
.../resources/providers/openai/keys.py | 682 +++++++++++++++++
.../resources/providers/openai/openai.py | 102 +++
.../resources/providers/providers.py | 134 ++++
.../types/{models => providers}/__init__.py | 0
.../types/providers/anthropic/__init__.py | 14 +
.../providers/anthropic/key_create_params.py | 13 +
.../anthropic/key_create_response.py | 12 +
.../anthropic/key_delete_response.py | 12 +
.../anthropic/key_list_agents_params.py | 15 +
.../anthropic/key_list_agents_response.py | 22 +
.../providers/anthropic/key_list_params.py | 15 +
.../providers/anthropic/key_list_response.py | 18 +
.../anthropic/key_retrieve_response.py | 12 +
.../providers/anthropic/key_update_params.py | 17 +
.../anthropic/key_update_response.py | 12 +
.../types/providers/openai/__init__.py | 14 +
.../providers/openai/key_create_params.py | 13 +
.../providers/openai/key_create_response.py | 12 +
.../providers/openai/key_delete_response.py | 12 +
.../types/providers/openai/key_list_params.py | 15 +
.../providers/openai/key_list_response.py | 18 +
.../openai/key_retrieve_agents_params.py | 15 +
.../openai/key_retrieve_agents_response.py | 22 +
.../providers/openai/key_retrieve_response.py | 12 +
.../providers/openai/key_update_params.py | 17 +
.../providers/openai/key_update_response.py | 12 +
.../{models => providers}/__init__.py | 0
.../providers/anthropic/__init__.py | 1 +
.../providers/anthropic/test_keys.py | 557 ++++++++++++++
.../providers/openai/__init__.py | 1 +
.../providers/openai/test_keys.py | 557 ++++++++++++++
42 files changed, 3377 insertions(+), 32 deletions(-)
rename src/do_gradientai/resources/{models => }/models.py (95%)
delete mode 100644 src/do_gradientai/resources/models/__init__.py
create mode 100644 src/do_gradientai/resources/providers/__init__.py
create mode 100644 src/do_gradientai/resources/providers/anthropic/__init__.py
create mode 100644 src/do_gradientai/resources/providers/anthropic/anthropic.py
create mode 100644 src/do_gradientai/resources/providers/anthropic/keys.py
create mode 100644 src/do_gradientai/resources/providers/openai/__init__.py
create mode 100644 src/do_gradientai/resources/providers/openai/keys.py
create mode 100644 src/do_gradientai/resources/providers/openai/openai.py
create mode 100644 src/do_gradientai/resources/providers/providers.py
rename src/do_gradientai/types/{models => providers}/__init__.py (100%)
create mode 100644 src/do_gradientai/types/providers/anthropic/__init__.py
create mode 100644 src/do_gradientai/types/providers/anthropic/key_create_params.py
create mode 100644 src/do_gradientai/types/providers/anthropic/key_create_response.py
create mode 100644 src/do_gradientai/types/providers/anthropic/key_delete_response.py
create mode 100644 src/do_gradientai/types/providers/anthropic/key_list_agents_params.py
create mode 100644 src/do_gradientai/types/providers/anthropic/key_list_agents_response.py
create mode 100644 src/do_gradientai/types/providers/anthropic/key_list_params.py
create mode 100644 src/do_gradientai/types/providers/anthropic/key_list_response.py
create mode 100644 src/do_gradientai/types/providers/anthropic/key_retrieve_response.py
create mode 100644 src/do_gradientai/types/providers/anthropic/key_update_params.py
create mode 100644 src/do_gradientai/types/providers/anthropic/key_update_response.py
create mode 100644 src/do_gradientai/types/providers/openai/__init__.py
create mode 100644 src/do_gradientai/types/providers/openai/key_create_params.py
create mode 100644 src/do_gradientai/types/providers/openai/key_create_response.py
create mode 100644 src/do_gradientai/types/providers/openai/key_delete_response.py
create mode 100644 src/do_gradientai/types/providers/openai/key_list_params.py
create mode 100644 src/do_gradientai/types/providers/openai/key_list_response.py
create mode 100644 src/do_gradientai/types/providers/openai/key_retrieve_agents_params.py
create mode 100644 src/do_gradientai/types/providers/openai/key_retrieve_agents_response.py
create mode 100644 src/do_gradientai/types/providers/openai/key_retrieve_response.py
create mode 100644 src/do_gradientai/types/providers/openai/key_update_params.py
create mode 100644 src/do_gradientai/types/providers/openai/key_update_response.py
rename tests/api_resources/{models => providers}/__init__.py (100%)
create mode 100644 tests/api_resources/providers/anthropic/__init__.py
create mode 100644 tests/api_resources/providers/anthropic/test_keys.py
create mode 100644 tests/api_resources/providers/openai/__init__.py
create mode 100644 tests/api_resources/providers/openai/test_keys.py
diff --git a/.stats.yml b/.stats.yml
index 645d4148..e9d82b51 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 55
+configured_endpoints: 67
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml
openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667
-config_hash: ed552d382f42c2e579a4bb0a608e2055
+config_hash: e178baf496088c521dd245cbc46c932a
diff --git a/api.md b/api.md
index 9811559a..a7297098 100644
--- a/api.md
+++ b/api.md
@@ -191,6 +191,60 @@ Methods:
- client.agents.child_agents.add(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> ChildAgentAddResponse
- client.agents.child_agents.view(uuid) -> ChildAgentViewResponse
+# Providers
+
+## Anthropic
+
+### Keys
+
+Types:
+
+```python
+from do_gradientai.types.providers.anthropic import (
+ KeyCreateResponse,
+ KeyRetrieveResponse,
+ KeyUpdateResponse,
+ KeyListResponse,
+ KeyDeleteResponse,
+ KeyListAgentsResponse,
+)
+```
+
+Methods:
+
+- client.providers.anthropic.keys.create(\*\*params) -> KeyCreateResponse
+- client.providers.anthropic.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse
+- client.providers.anthropic.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse
+- client.providers.anthropic.keys.list(\*\*params) -> KeyListResponse
+- client.providers.anthropic.keys.delete(api_key_uuid) -> KeyDeleteResponse
+- client.providers.anthropic.keys.list_agents(uuid, \*\*params) -> KeyListAgentsResponse
+
+## OpenAI
+
+### Keys
+
+Types:
+
+```python
+from do_gradientai.types.providers.openai import (
+ KeyCreateResponse,
+ KeyRetrieveResponse,
+ KeyUpdateResponse,
+ KeyListResponse,
+ KeyDeleteResponse,
+ KeyRetrieveAgentsResponse,
+)
+```
+
+Methods:
+
+- client.providers.openai.keys.create(\*\*params) -> KeyCreateResponse
+- client.providers.openai.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse
+- client.providers.openai.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse
+- client.providers.openai.keys.list(\*\*params) -> KeyListResponse
+- client.providers.openai.keys.delete(api_key_uuid) -> KeyDeleteResponse
+- client.providers.openai.keys.retrieve_agents(uuid, \*\*params) -> KeyRetrieveAgentsResponse
+
# Regions
Types:
@@ -321,4 +375,4 @@ from do_gradientai.types import APIAgreement, APIModel, APIModelVersion, ModelLi
Methods:
-- client.models.list(\*\*params) -> ModelListResponse
+- client.models.list(\*\*params) -> ModelListResponse
diff --git a/src/do_gradientai/_client.py b/src/do_gradientai/_client.py
index afd18a26..8710fe68 100644
--- a/src/do_gradientai/_client.py
+++ b/src/do_gradientai/_client.py
@@ -31,13 +31,14 @@
)
if TYPE_CHECKING:
- from .resources import chat, agents, models, regions, inference, indexing_jobs, knowledge_bases
+ from .resources import chat, agents, models, regions, inference, providers, indexing_jobs, knowledge_bases
+ from .resources.models import ModelsResource, AsyncModelsResource
from .resources.regions import RegionsResource, AsyncRegionsResource
from .resources.chat.chat import ChatResource, AsyncChatResource
from .resources.agents.agents import AgentsResource, AsyncAgentsResource
from .resources.indexing_jobs import IndexingJobsResource, AsyncIndexingJobsResource
- from .resources.models.models import ModelsResource, AsyncModelsResource
from .resources.inference.inference import InferenceResource, AsyncInferenceResource
+ from .resources.providers.providers import ProvidersResource, AsyncProvidersResource
from .resources.knowledge_bases.knowledge_bases import KnowledgeBasesResource, AsyncKnowledgeBasesResource
__all__ = [
@@ -114,6 +115,12 @@ def agents(self) -> AgentsResource:
return AgentsResource(self)
+ @cached_property
+ def providers(self) -> ProvidersResource:
+ from .resources.providers import ProvidersResource
+
+ return ProvidersResource(self)
+
@cached_property
def regions(self) -> RegionsResource:
from .resources.regions import RegionsResource
@@ -327,6 +334,12 @@ def agents(self) -> AsyncAgentsResource:
return AsyncAgentsResource(self)
+ @cached_property
+ def providers(self) -> AsyncProvidersResource:
+ from .resources.providers import AsyncProvidersResource
+
+ return AsyncProvidersResource(self)
+
@cached_property
def regions(self) -> AsyncRegionsResource:
from .resources.regions import AsyncRegionsResource
@@ -490,6 +503,12 @@ def agents(self) -> agents.AgentsResourceWithRawResponse:
return AgentsResourceWithRawResponse(self._client.agents)
+ @cached_property
+ def providers(self) -> providers.ProvidersResourceWithRawResponse:
+ from .resources.providers import ProvidersResourceWithRawResponse
+
+ return ProvidersResourceWithRawResponse(self._client.providers)
+
@cached_property
def regions(self) -> regions.RegionsResourceWithRawResponse:
from .resources.regions import RegionsResourceWithRawResponse
@@ -539,6 +558,12 @@ def agents(self) -> agents.AsyncAgentsResourceWithRawResponse:
return AsyncAgentsResourceWithRawResponse(self._client.agents)
+ @cached_property
+ def providers(self) -> providers.AsyncProvidersResourceWithRawResponse:
+ from .resources.providers import AsyncProvidersResourceWithRawResponse
+
+ return AsyncProvidersResourceWithRawResponse(self._client.providers)
+
@cached_property
def regions(self) -> regions.AsyncRegionsResourceWithRawResponse:
from .resources.regions import AsyncRegionsResourceWithRawResponse
@@ -588,6 +613,12 @@ def agents(self) -> agents.AgentsResourceWithStreamingResponse:
return AgentsResourceWithStreamingResponse(self._client.agents)
+ @cached_property
+ def providers(self) -> providers.ProvidersResourceWithStreamingResponse:
+ from .resources.providers import ProvidersResourceWithStreamingResponse
+
+ return ProvidersResourceWithStreamingResponse(self._client.providers)
+
@cached_property
def regions(self) -> regions.RegionsResourceWithStreamingResponse:
from .resources.regions import RegionsResourceWithStreamingResponse
@@ -637,6 +668,12 @@ def agents(self) -> agents.AsyncAgentsResourceWithStreamingResponse:
return AsyncAgentsResourceWithStreamingResponse(self._client.agents)
+ @cached_property
+ def providers(self) -> providers.AsyncProvidersResourceWithStreamingResponse:
+ from .resources.providers import AsyncProvidersResourceWithStreamingResponse
+
+ return AsyncProvidersResourceWithStreamingResponse(self._client.providers)
+
@cached_property
def regions(self) -> regions.AsyncRegionsResourceWithStreamingResponse:
from .resources.regions import AsyncRegionsResourceWithStreamingResponse
diff --git a/src/do_gradientai/resources/__init__.py b/src/do_gradientai/resources/__init__.py
index b074f7d1..1763a13e 100644
--- a/src/do_gradientai/resources/__init__.py
+++ b/src/do_gradientai/resources/__init__.py
@@ -40,6 +40,14 @@
InferenceResourceWithStreamingResponse,
AsyncInferenceResourceWithStreamingResponse,
)
+from .providers import (
+ ProvidersResource,
+ AsyncProvidersResource,
+ ProvidersResourceWithRawResponse,
+ AsyncProvidersResourceWithRawResponse,
+ ProvidersResourceWithStreamingResponse,
+ AsyncProvidersResourceWithStreamingResponse,
+)
from .indexing_jobs import (
IndexingJobsResource,
AsyncIndexingJobsResource,
@@ -64,6 +72,12 @@
"AsyncAgentsResourceWithRawResponse",
"AgentsResourceWithStreamingResponse",
"AsyncAgentsResourceWithStreamingResponse",
+ "ProvidersResource",
+ "AsyncProvidersResource",
+ "ProvidersResourceWithRawResponse",
+ "AsyncProvidersResourceWithRawResponse",
+ "ProvidersResourceWithStreamingResponse",
+ "AsyncProvidersResourceWithStreamingResponse",
"RegionsResource",
"AsyncRegionsResource",
"RegionsResourceWithRawResponse",
diff --git a/src/do_gradientai/resources/models/models.py b/src/do_gradientai/resources/models.py
similarity index 95%
rename from src/do_gradientai/resources/models/models.py
rename to src/do_gradientai/resources/models.py
index acdd45a1..c8e78b9b 100644
--- a/src/do_gradientai/resources/models/models.py
+++ b/src/do_gradientai/resources/models.py
@@ -7,19 +7,19 @@
import httpx
-from ...types import model_list_params
-from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ..._utils import maybe_transform, async_maybe_transform
-from ..._compat import cached_property
-from ..._resource import SyncAPIResource, AsyncAPIResource
-from ..._response import (
+from ..types import model_list_params
+from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from .._utils import maybe_transform, async_maybe_transform
+from .._compat import cached_property
+from .._resource import SyncAPIResource, AsyncAPIResource
+from .._response import (
to_raw_response_wrapper,
to_streamed_response_wrapper,
async_to_raw_response_wrapper,
async_to_streamed_response_wrapper,
)
-from ..._base_client import make_request_options
-from ...types.model_list_response import ModelListResponse
+from .._base_client import make_request_options
+from ..types.model_list_response import ModelListResponse
__all__ = ["ModelsResource", "AsyncModelsResource"]
diff --git a/src/do_gradientai/resources/models/__init__.py b/src/do_gradientai/resources/models/__init__.py
deleted file mode 100644
index 7a5c25cc..00000000
--- a/src/do_gradientai/resources/models/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from .models import (
- ModelsResource,
- AsyncModelsResource,
- ModelsResourceWithRawResponse,
- AsyncModelsResourceWithRawResponse,
- ModelsResourceWithStreamingResponse,
- AsyncModelsResourceWithStreamingResponse,
-)
-
-__all__ = [
- "ModelsResource",
- "AsyncModelsResource",
- "ModelsResourceWithRawResponse",
- "AsyncModelsResourceWithRawResponse",
- "ModelsResourceWithStreamingResponse",
- "AsyncModelsResourceWithStreamingResponse",
-]
diff --git a/src/do_gradientai/resources/providers/__init__.py b/src/do_gradientai/resources/providers/__init__.py
new file mode 100644
index 00000000..1731e057
--- /dev/null
+++ b/src/do_gradientai/resources/providers/__init__.py
@@ -0,0 +1,47 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .openai import (
+ OpenAIResource,
+ AsyncOpenAIResource,
+ OpenAIResourceWithRawResponse,
+ AsyncOpenAIResourceWithRawResponse,
+ OpenAIResourceWithStreamingResponse,
+ AsyncOpenAIResourceWithStreamingResponse,
+)
+from .anthropic import (
+ AnthropicResource,
+ AsyncAnthropicResource,
+ AnthropicResourceWithRawResponse,
+ AsyncAnthropicResourceWithRawResponse,
+ AnthropicResourceWithStreamingResponse,
+ AsyncAnthropicResourceWithStreamingResponse,
+)
+from .providers import (
+ ProvidersResource,
+ AsyncProvidersResource,
+ ProvidersResourceWithRawResponse,
+ AsyncProvidersResourceWithRawResponse,
+ ProvidersResourceWithStreamingResponse,
+ AsyncProvidersResourceWithStreamingResponse,
+)
+
+__all__ = [
+ "AnthropicResource",
+ "AsyncAnthropicResource",
+ "AnthropicResourceWithRawResponse",
+ "AsyncAnthropicResourceWithRawResponse",
+ "AnthropicResourceWithStreamingResponse",
+ "AsyncAnthropicResourceWithStreamingResponse",
+ "OpenAIResource",
+ "AsyncOpenAIResource",
+ "OpenAIResourceWithRawResponse",
+ "AsyncOpenAIResourceWithRawResponse",
+ "OpenAIResourceWithStreamingResponse",
+ "AsyncOpenAIResourceWithStreamingResponse",
+ "ProvidersResource",
+ "AsyncProvidersResource",
+ "ProvidersResourceWithRawResponse",
+ "AsyncProvidersResourceWithRawResponse",
+ "ProvidersResourceWithStreamingResponse",
+ "AsyncProvidersResourceWithStreamingResponse",
+]
diff --git a/src/do_gradientai/resources/providers/anthropic/__init__.py b/src/do_gradientai/resources/providers/anthropic/__init__.py
new file mode 100644
index 00000000..057a3a2f
--- /dev/null
+++ b/src/do_gradientai/resources/providers/anthropic/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .keys import (
+ KeysResource,
+ AsyncKeysResource,
+ KeysResourceWithRawResponse,
+ AsyncKeysResourceWithRawResponse,
+ KeysResourceWithStreamingResponse,
+ AsyncKeysResourceWithStreamingResponse,
+)
+from .anthropic import (
+ AnthropicResource,
+ AsyncAnthropicResource,
+ AnthropicResourceWithRawResponse,
+ AsyncAnthropicResourceWithRawResponse,
+ AnthropicResourceWithStreamingResponse,
+ AsyncAnthropicResourceWithStreamingResponse,
+)
+
+__all__ = [
+ "KeysResource",
+ "AsyncKeysResource",
+ "KeysResourceWithRawResponse",
+ "AsyncKeysResourceWithRawResponse",
+ "KeysResourceWithStreamingResponse",
+ "AsyncKeysResourceWithStreamingResponse",
+ "AnthropicResource",
+ "AsyncAnthropicResource",
+ "AnthropicResourceWithRawResponse",
+ "AsyncAnthropicResourceWithRawResponse",
+ "AnthropicResourceWithStreamingResponse",
+ "AsyncAnthropicResourceWithStreamingResponse",
+]
diff --git a/src/do_gradientai/resources/providers/anthropic/anthropic.py b/src/do_gradientai/resources/providers/anthropic/anthropic.py
new file mode 100644
index 00000000..23a914e9
--- /dev/null
+++ b/src/do_gradientai/resources/providers/anthropic/anthropic.py
@@ -0,0 +1,102 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .keys import (
+ KeysResource,
+ AsyncKeysResource,
+ KeysResourceWithRawResponse,
+ AsyncKeysResourceWithRawResponse,
+ KeysResourceWithStreamingResponse,
+ AsyncKeysResourceWithStreamingResponse,
+)
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+
+__all__ = ["AnthropicResource", "AsyncAnthropicResource"]
+
+
+class AnthropicResource(SyncAPIResource):
+ @cached_property
+ def keys(self) -> KeysResource:
+ return KeysResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AnthropicResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
+ """
+ return AnthropicResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AnthropicResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
+ """
+ return AnthropicResourceWithStreamingResponse(self)
+
+
+class AsyncAnthropicResource(AsyncAPIResource):
+ @cached_property
+ def keys(self) -> AsyncKeysResource:
+ return AsyncKeysResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncAnthropicResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncAnthropicResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncAnthropicResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
+ """
+ return AsyncAnthropicResourceWithStreamingResponse(self)
+
+
+class AnthropicResourceWithRawResponse:
+ def __init__(self, anthropic: AnthropicResource) -> None:
+ self._anthropic = anthropic
+
+ @cached_property
+ def keys(self) -> KeysResourceWithRawResponse:
+ return KeysResourceWithRawResponse(self._anthropic.keys)
+
+
+class AsyncAnthropicResourceWithRawResponse:
+ def __init__(self, anthropic: AsyncAnthropicResource) -> None:
+ self._anthropic = anthropic
+
+ @cached_property
+ def keys(self) -> AsyncKeysResourceWithRawResponse:
+ return AsyncKeysResourceWithRawResponse(self._anthropic.keys)
+
+
+class AnthropicResourceWithStreamingResponse:
+ def __init__(self, anthropic: AnthropicResource) -> None:
+ self._anthropic = anthropic
+
+ @cached_property
+ def keys(self) -> KeysResourceWithStreamingResponse:
+ return KeysResourceWithStreamingResponse(self._anthropic.keys)
+
+
+class AsyncAnthropicResourceWithStreamingResponse:
+ def __init__(self, anthropic: AsyncAnthropicResource) -> None:
+ self._anthropic = anthropic
+
+ @cached_property
+ def keys(self) -> AsyncKeysResourceWithStreamingResponse:
+ return AsyncKeysResourceWithStreamingResponse(self._anthropic.keys)
diff --git a/src/do_gradientai/resources/providers/anthropic/keys.py b/src/do_gradientai/resources/providers/anthropic/keys.py
new file mode 100644
index 00000000..d1a33290
--- /dev/null
+++ b/src/do_gradientai/resources/providers/anthropic/keys.py
@@ -0,0 +1,686 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ...._utils import maybe_transform, async_maybe_transform
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ...._base_client import make_request_options
+from ....types.providers.anthropic import key_list_params, key_create_params, key_update_params, key_list_agents_params
+from ....types.providers.anthropic.key_list_response import KeyListResponse
+from ....types.providers.anthropic.key_create_response import KeyCreateResponse
+from ....types.providers.anthropic.key_delete_response import KeyDeleteResponse
+from ....types.providers.anthropic.key_update_response import KeyUpdateResponse
+from ....types.providers.anthropic.key_retrieve_response import KeyRetrieveResponse
+from ....types.providers.anthropic.key_list_agents_response import KeyListAgentsResponse
+
+__all__ = ["KeysResource", "AsyncKeysResource"]
+
+
+class KeysResource(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> KeysResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
+ """
+ return KeysResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> KeysResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
+ """
+ return KeysResourceWithStreamingResponse(self)
+
+ def create(
+ self,
+ *,
+ api_key: str | NotGiven = NOT_GIVEN,
+ name: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> KeyCreateResponse:
+ """
+ To create an Anthropic API key, send a POST request to
+ `/v2/gen-ai/anthropic/keys`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/v2/gen-ai/anthropic/keys"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/anthropic/keys",
+ body=maybe_transform(
+ {
+ "api_key": api_key,
+ "name": name,
+ },
+ key_create_params.KeyCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyCreateResponse,
+ )
+
+ def retrieve(
+ self,
+ api_key_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> KeyRetrieveResponse:
+ """
+ To retrieve details of an Anthropic API key, send a GET request to
+ `/v2/gen-ai/anthropic/keys/{api_key_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
+ return self._get(
+ f"/v2/gen-ai/anthropic/keys/{api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{api_key_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyRetrieveResponse,
+ )
+
+ def update(
+ self,
+ path_api_key_uuid: str,
+ *,
+ api_key: str | NotGiven = NOT_GIVEN,
+ body_api_key_uuid: str | NotGiven = NOT_GIVEN,
+ name: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> KeyUpdateResponse:
+ """
+ To update an Anthropic API key, send a PUT request to
+ `/v2/gen-ai/anthropic/keys/{api_key_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}")
+ return self._put(
+ f"/v2/gen-ai/anthropic/keys/{path_api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{path_api_key_uuid}",
+ body=maybe_transform(
+ {
+ "api_key": api_key,
+ "body_api_key_uuid": body_api_key_uuid,
+ "name": name,
+ },
+ key_update_params.KeyUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyUpdateResponse,
+ )
+
+ def list(
+ self,
+ *,
+ page: int | NotGiven = NOT_GIVEN,
+ per_page: int | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> KeyListResponse:
+ """
+ To list all Anthropic API keys, send a GET request to
+ `/v2/gen-ai/anthropic/keys`.
+
+ Args:
+ page: page number.
+
+ per_page: items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ "/v2/gen-ai/anthropic/keys"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/anthropic/keys",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ key_list_params.KeyListParams,
+ ),
+ ),
+ cast_to=KeyListResponse,
+ )
+
+ def delete(
+ self,
+ api_key_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> KeyDeleteResponse:
+ """
+ To delete an Anthropic API key, send a DELETE request to
+ `/v2/gen-ai/anthropic/keys/{api_key_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
+ return self._delete(
+ f"/v2/gen-ai/anthropic/keys/{api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{api_key_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyDeleteResponse,
+ )
+
+ def list_agents(
+ self,
+ uuid: str,
+ *,
+ page: int | NotGiven = NOT_GIVEN,
+ per_page: int | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> KeyListAgentsResponse:
+ """
+ List Agents by Anthropic Key.
+
+ Args:
+ page: page number.
+
+ per_page: items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not uuid:
+ raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+ return self._get(
+ f"/v2/gen-ai/anthropic/keys/{uuid}/agents"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{uuid}/agents",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ key_list_agents_params.KeyListAgentsParams,
+ ),
+ ),
+ cast_to=KeyListAgentsResponse,
+ )
+
+
+class AsyncKeysResource(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncKeysResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncKeysResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncKeysResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
+ """
+ return AsyncKeysResourceWithStreamingResponse(self)
+
+ async def create(
+ self,
+ *,
+ api_key: str | NotGiven = NOT_GIVEN,
+ name: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> KeyCreateResponse:
+ """
+ To create an Anthropic API key, send a POST request to
+ `/v2/gen-ai/anthropic/keys`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/v2/gen-ai/anthropic/keys"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/anthropic/keys",
+ body=await async_maybe_transform(
+ {
+ "api_key": api_key,
+ "name": name,
+ },
+ key_create_params.KeyCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyCreateResponse,
+ )
+
+ async def retrieve(
+ self,
+ api_key_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> KeyRetrieveResponse:
+ """
+ To retrieve details of an Anthropic API key, send a GET request to
+ `/v2/gen-ai/anthropic/keys/{api_key_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
+ return await self._get(
+ f"/v2/gen-ai/anthropic/keys/{api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{api_key_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyRetrieveResponse,
+ )
+
+ async def update(
+ self,
+ path_api_key_uuid: str,
+ *,
+ api_key: str | NotGiven = NOT_GIVEN,
+ body_api_key_uuid: str | NotGiven = NOT_GIVEN,
+ name: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> KeyUpdateResponse:
+ """
+ To update an Anthropic API key, send a PUT request to
+ `/v2/gen-ai/anthropic/keys/{api_key_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}")
+ return await self._put(
+ f"/v2/gen-ai/anthropic/keys/{path_api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{path_api_key_uuid}",
+ body=await async_maybe_transform(
+ {
+ "api_key": api_key,
+ "body_api_key_uuid": body_api_key_uuid,
+ "name": name,
+ },
+ key_update_params.KeyUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyUpdateResponse,
+ )
+
+ async def list(
+ self,
+ *,
+ page: int | NotGiven = NOT_GIVEN,
+ per_page: int | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> KeyListResponse:
+ """
+ To list all Anthropic API keys, send a GET request to
+ `/v2/gen-ai/anthropic/keys`.
+
+ Args:
+ page: page number.
+
+ per_page: items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ "/v2/gen-ai/anthropic/keys"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/anthropic/keys",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ key_list_params.KeyListParams,
+ ),
+ ),
+ cast_to=KeyListResponse,
+ )
+
+ async def delete(
+ self,
+ api_key_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> KeyDeleteResponse:
+ """
+ To delete an Anthropic API key, send a DELETE request to
+ `/v2/gen-ai/anthropic/keys/{api_key_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
+ return await self._delete(
+ f"/v2/gen-ai/anthropic/keys/{api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{api_key_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyDeleteResponse,
+ )
+
+ async def list_agents(
+ self,
+ uuid: str,
+ *,
+ page: int | NotGiven = NOT_GIVEN,
+ per_page: int | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> KeyListAgentsResponse:
+ """
+ List Agents by Anthropic Key.
+
+ Args:
+ page: page number.
+
+ per_page: items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not uuid:
+ raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+ return await self._get(
+ f"/v2/gen-ai/anthropic/keys/{uuid}/agents"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/anthropic/keys/{uuid}/agents",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ key_list_agents_params.KeyListAgentsParams,
+ ),
+ ),
+ cast_to=KeyListAgentsResponse,
+ )
+
+
+class KeysResourceWithRawResponse:
+ def __init__(self, keys: KeysResource) -> None:
+ self._keys = keys
+
+ self.create = to_raw_response_wrapper(
+ keys.create,
+ )
+ self.retrieve = to_raw_response_wrapper(
+ keys.retrieve,
+ )
+ self.update = to_raw_response_wrapper(
+ keys.update,
+ )
+ self.list = to_raw_response_wrapper(
+ keys.list,
+ )
+ self.delete = to_raw_response_wrapper(
+ keys.delete,
+ )
+ self.list_agents = to_raw_response_wrapper(
+ keys.list_agents,
+ )
+
+
+class AsyncKeysResourceWithRawResponse:
+ def __init__(self, keys: AsyncKeysResource) -> None:
+ self._keys = keys
+
+ self.create = async_to_raw_response_wrapper(
+ keys.create,
+ )
+ self.retrieve = async_to_raw_response_wrapper(
+ keys.retrieve,
+ )
+ self.update = async_to_raw_response_wrapper(
+ keys.update,
+ )
+ self.list = async_to_raw_response_wrapper(
+ keys.list,
+ )
+ self.delete = async_to_raw_response_wrapper(
+ keys.delete,
+ )
+ self.list_agents = async_to_raw_response_wrapper(
+ keys.list_agents,
+ )
+
+
+class KeysResourceWithStreamingResponse:
+ def __init__(self, keys: KeysResource) -> None:
+ self._keys = keys
+
+ self.create = to_streamed_response_wrapper(
+ keys.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ keys.retrieve,
+ )
+ self.update = to_streamed_response_wrapper(
+ keys.update,
+ )
+ self.list = to_streamed_response_wrapper(
+ keys.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ keys.delete,
+ )
+ self.list_agents = to_streamed_response_wrapper(
+ keys.list_agents,
+ )
+
+
+class AsyncKeysResourceWithStreamingResponse:
+ def __init__(self, keys: AsyncKeysResource) -> None:
+ self._keys = keys
+
+ self.create = async_to_streamed_response_wrapper(
+ keys.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ keys.retrieve,
+ )
+ self.update = async_to_streamed_response_wrapper(
+ keys.update,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ keys.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ keys.delete,
+ )
+ self.list_agents = async_to_streamed_response_wrapper(
+ keys.list_agents,
+ )
diff --git a/src/do_gradientai/resources/providers/openai/__init__.py b/src/do_gradientai/resources/providers/openai/__init__.py
new file mode 100644
index 00000000..66d8ca7a
--- /dev/null
+++ b/src/do_gradientai/resources/providers/openai/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .keys import (
+ KeysResource,
+ AsyncKeysResource,
+ KeysResourceWithRawResponse,
+ AsyncKeysResourceWithRawResponse,
+ KeysResourceWithStreamingResponse,
+ AsyncKeysResourceWithStreamingResponse,
+)
+from .openai import (
+ OpenAIResource,
+ AsyncOpenAIResource,
+ OpenAIResourceWithRawResponse,
+ AsyncOpenAIResourceWithRawResponse,
+ OpenAIResourceWithStreamingResponse,
+ AsyncOpenAIResourceWithStreamingResponse,
+)
+
+__all__ = [
+ "KeysResource",
+ "AsyncKeysResource",
+ "KeysResourceWithRawResponse",
+ "AsyncKeysResourceWithRawResponse",
+ "KeysResourceWithStreamingResponse",
+ "AsyncKeysResourceWithStreamingResponse",
+ "OpenAIResource",
+ "AsyncOpenAIResource",
+ "OpenAIResourceWithRawResponse",
+ "AsyncOpenAIResourceWithRawResponse",
+ "OpenAIResourceWithStreamingResponse",
+ "AsyncOpenAIResourceWithStreamingResponse",
+]
diff --git a/src/do_gradientai/resources/providers/openai/keys.py b/src/do_gradientai/resources/providers/openai/keys.py
new file mode 100644
index 00000000..01cfee75
--- /dev/null
+++ b/src/do_gradientai/resources/providers/openai/keys.py
@@ -0,0 +1,682 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ...._utils import maybe_transform, async_maybe_transform
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ...._base_client import make_request_options
+from ....types.providers.openai import key_list_params, key_create_params, key_update_params, key_retrieve_agents_params
+from ....types.providers.openai.key_list_response import KeyListResponse
+from ....types.providers.openai.key_create_response import KeyCreateResponse
+from ....types.providers.openai.key_delete_response import KeyDeleteResponse
+from ....types.providers.openai.key_update_response import KeyUpdateResponse
+from ....types.providers.openai.key_retrieve_response import KeyRetrieveResponse
+from ....types.providers.openai.key_retrieve_agents_response import KeyRetrieveAgentsResponse
+
+__all__ = ["KeysResource", "AsyncKeysResource"]
+
+
+class KeysResource(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> KeysResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
+ """
+ return KeysResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> KeysResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
+ """
+ return KeysResourceWithStreamingResponse(self)
+
+ def create(
+ self,
+ *,
+ api_key: str | NotGiven = NOT_GIVEN,
+ name: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> KeyCreateResponse:
+ """
+ To create an OpenAI API key, send a POST request to `/v2/gen-ai/openai/keys`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/v2/gen-ai/openai/keys"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/openai/keys",
+ body=maybe_transform(
+ {
+ "api_key": api_key,
+ "name": name,
+ },
+ key_create_params.KeyCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyCreateResponse,
+ )
+
+ def retrieve(
+ self,
+ api_key_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> KeyRetrieveResponse:
+ """
+ To retrieve details of an OpenAI API key, send a GET request to
+ `/v2/gen-ai/openai/keys/{api_key_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
+ return self._get(
+ f"/v2/gen-ai/openai/keys/{api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{api_key_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyRetrieveResponse,
+ )
+
+ def update(
+ self,
+ path_api_key_uuid: str,
+ *,
+ api_key: str | NotGiven = NOT_GIVEN,
+ body_api_key_uuid: str | NotGiven = NOT_GIVEN,
+ name: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> KeyUpdateResponse:
+ """
+ To update an OpenAI API key, send a PUT request to
+ `/v2/gen-ai/openai/keys/{api_key_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}")
+ return self._put(
+ f"/v2/gen-ai/openai/keys/{path_api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{path_api_key_uuid}",
+ body=maybe_transform(
+ {
+ "api_key": api_key,
+ "body_api_key_uuid": body_api_key_uuid,
+ "name": name,
+ },
+ key_update_params.KeyUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyUpdateResponse,
+ )
+
+ def list(
+ self,
+ *,
+ page: int | NotGiven = NOT_GIVEN,
+ per_page: int | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> KeyListResponse:
+ """
+ To list all OpenAI API keys, send a GET request to `/v2/gen-ai/openai/keys`.
+
+ Args:
+ page: page number.
+
+ per_page: items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ "/v2/gen-ai/openai/keys"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/openai/keys",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ key_list_params.KeyListParams,
+ ),
+ ),
+ cast_to=KeyListResponse,
+ )
+
+ def delete(
+ self,
+ api_key_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> KeyDeleteResponse:
+ """
+ To delete an OpenAI API key, send a DELETE request to
+ `/v2/gen-ai/openai/keys/{api_key_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
+ return self._delete(
+ f"/v2/gen-ai/openai/keys/{api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{api_key_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyDeleteResponse,
+ )
+
+ def retrieve_agents(
+ self,
+ uuid: str,
+ *,
+ page: int | NotGiven = NOT_GIVEN,
+ per_page: int | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> KeyRetrieveAgentsResponse:
+ """
+ List Agents by OpenAI Key.
+
+ Args:
+ page: page number.
+
+ per_page: items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not uuid:
+ raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+ return self._get(
+ f"/v2/gen-ai/openai/keys/{uuid}/agents"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{uuid}/agents",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ key_retrieve_agents_params.KeyRetrieveAgentsParams,
+ ),
+ ),
+ cast_to=KeyRetrieveAgentsResponse,
+ )
+
+
+class AsyncKeysResource(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncKeysResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncKeysResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncKeysResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
+ """
+ return AsyncKeysResourceWithStreamingResponse(self)
+
+ async def create(
+ self,
+ *,
+ api_key: str | NotGiven = NOT_GIVEN,
+ name: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> KeyCreateResponse:
+ """
+ To create an OpenAI API key, send a POST request to `/v2/gen-ai/openai/keys`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/v2/gen-ai/openai/keys"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/openai/keys",
+ body=await async_maybe_transform(
+ {
+ "api_key": api_key,
+ "name": name,
+ },
+ key_create_params.KeyCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyCreateResponse,
+ )
+
+ async def retrieve(
+ self,
+ api_key_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> KeyRetrieveResponse:
+ """
+ To retrieve details of an OpenAI API key, send a GET request to
+ `/v2/gen-ai/openai/keys/{api_key_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
+ return await self._get(
+ f"/v2/gen-ai/openai/keys/{api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{api_key_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyRetrieveResponse,
+ )
+
+ async def update(
+ self,
+ path_api_key_uuid: str,
+ *,
+ api_key: str | NotGiven = NOT_GIVEN,
+ body_api_key_uuid: str | NotGiven = NOT_GIVEN,
+ name: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> KeyUpdateResponse:
+ """
+ To update an OpenAI API key, send a PUT request to
+ `/v2/gen-ai/openai/keys/{api_key_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `path_api_key_uuid` but received {path_api_key_uuid!r}")
+ return await self._put(
+ f"/v2/gen-ai/openai/keys/{path_api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{path_api_key_uuid}",
+ body=await async_maybe_transform(
+ {
+ "api_key": api_key,
+ "body_api_key_uuid": body_api_key_uuid,
+ "name": name,
+ },
+ key_update_params.KeyUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyUpdateResponse,
+ )
+
+ async def list(
+ self,
+ *,
+ page: int | NotGiven = NOT_GIVEN,
+ per_page: int | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> KeyListResponse:
+ """
+ To list all OpenAI API keys, send a GET request to `/v2/gen-ai/openai/keys`.
+
+ Args:
+ page: page number.
+
+ per_page: items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ "/v2/gen-ai/openai/keys"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/openai/keys",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ key_list_params.KeyListParams,
+ ),
+ ),
+ cast_to=KeyListResponse,
+ )
+
+ async def delete(
+ self,
+ api_key_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> KeyDeleteResponse:
+ """
+ To delete an OpenAI API key, send a DELETE request to
+ `/v2/gen-ai/openai/keys/{api_key_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not api_key_uuid:
+ raise ValueError(f"Expected a non-empty value for `api_key_uuid` but received {api_key_uuid!r}")
+ return await self._delete(
+ f"/v2/gen-ai/openai/keys/{api_key_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{api_key_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=KeyDeleteResponse,
+ )
+
+ async def retrieve_agents(
+ self,
+ uuid: str,
+ *,
+ page: int | NotGiven = NOT_GIVEN,
+ per_page: int | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> KeyRetrieveAgentsResponse:
+ """
+ List Agents by OpenAI Key.
+
+ Args:
+ page: page number.
+
+ per_page: items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not uuid:
+ raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+ return await self._get(
+ f"/v2/gen-ai/openai/keys/{uuid}/agents"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/openai/keys/{uuid}/agents",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "page": page,
+ "per_page": per_page,
+ },
+ key_retrieve_agents_params.KeyRetrieveAgentsParams,
+ ),
+ ),
+ cast_to=KeyRetrieveAgentsResponse,
+ )
+
+
+class KeysResourceWithRawResponse:
+ def __init__(self, keys: KeysResource) -> None:
+ self._keys = keys
+
+ self.create = to_raw_response_wrapper(
+ keys.create,
+ )
+ self.retrieve = to_raw_response_wrapper(
+ keys.retrieve,
+ )
+ self.update = to_raw_response_wrapper(
+ keys.update,
+ )
+ self.list = to_raw_response_wrapper(
+ keys.list,
+ )
+ self.delete = to_raw_response_wrapper(
+ keys.delete,
+ )
+ self.retrieve_agents = to_raw_response_wrapper(
+ keys.retrieve_agents,
+ )
+
+
+class AsyncKeysResourceWithRawResponse:
+ def __init__(self, keys: AsyncKeysResource) -> None:
+ self._keys = keys
+
+ self.create = async_to_raw_response_wrapper(
+ keys.create,
+ )
+ self.retrieve = async_to_raw_response_wrapper(
+ keys.retrieve,
+ )
+ self.update = async_to_raw_response_wrapper(
+ keys.update,
+ )
+ self.list = async_to_raw_response_wrapper(
+ keys.list,
+ )
+ self.delete = async_to_raw_response_wrapper(
+ keys.delete,
+ )
+ self.retrieve_agents = async_to_raw_response_wrapper(
+ keys.retrieve_agents,
+ )
+
+
+class KeysResourceWithStreamingResponse:
+ def __init__(self, keys: KeysResource) -> None:
+ self._keys = keys
+
+ self.create = to_streamed_response_wrapper(
+ keys.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ keys.retrieve,
+ )
+ self.update = to_streamed_response_wrapper(
+ keys.update,
+ )
+ self.list = to_streamed_response_wrapper(
+ keys.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ keys.delete,
+ )
+ self.retrieve_agents = to_streamed_response_wrapper(
+ keys.retrieve_agents,
+ )
+
+
+class AsyncKeysResourceWithStreamingResponse:
+ def __init__(self, keys: AsyncKeysResource) -> None:
+ self._keys = keys
+
+ self.create = async_to_streamed_response_wrapper(
+ keys.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ keys.retrieve,
+ )
+ self.update = async_to_streamed_response_wrapper(
+ keys.update,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ keys.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ keys.delete,
+ )
+ self.retrieve_agents = async_to_streamed_response_wrapper(
+ keys.retrieve_agents,
+ )
diff --git a/src/do_gradientai/resources/providers/openai/openai.py b/src/do_gradientai/resources/providers/openai/openai.py
new file mode 100644
index 00000000..b02dc2e1
--- /dev/null
+++ b/src/do_gradientai/resources/providers/openai/openai.py
@@ -0,0 +1,102 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .keys import (
+ KeysResource,
+ AsyncKeysResource,
+ KeysResourceWithRawResponse,
+ AsyncKeysResourceWithRawResponse,
+ KeysResourceWithStreamingResponse,
+ AsyncKeysResourceWithStreamingResponse,
+)
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+
+__all__ = ["OpenAIResource", "AsyncOpenAIResource"]
+
+
+class OpenAIResource(SyncAPIResource):
+ @cached_property
+ def keys(self) -> KeysResource:
+ return KeysResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> OpenAIResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
+ """
+ return OpenAIResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> OpenAIResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
+ """
+ return OpenAIResourceWithStreamingResponse(self)
+
+
+class AsyncOpenAIResource(AsyncAPIResource):
+ @cached_property
+ def keys(self) -> AsyncKeysResource:
+ return AsyncKeysResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncOpenAIResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncOpenAIResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncOpenAIResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
+ """
+ return AsyncOpenAIResourceWithStreamingResponse(self)
+
+
+class OpenAIResourceWithRawResponse:
+ def __init__(self, openai: OpenAIResource) -> None:
+ self._openai = openai
+
+ @cached_property
+ def keys(self) -> KeysResourceWithRawResponse:
+ return KeysResourceWithRawResponse(self._openai.keys)
+
+
+class AsyncOpenAIResourceWithRawResponse:
+ def __init__(self, openai: AsyncOpenAIResource) -> None:
+ self._openai = openai
+
+ @cached_property
+ def keys(self) -> AsyncKeysResourceWithRawResponse:
+ return AsyncKeysResourceWithRawResponse(self._openai.keys)
+
+
+class OpenAIResourceWithStreamingResponse:
+ def __init__(self, openai: OpenAIResource) -> None:
+ self._openai = openai
+
+ @cached_property
+ def keys(self) -> KeysResourceWithStreamingResponse:
+ return KeysResourceWithStreamingResponse(self._openai.keys)
+
+
+class AsyncOpenAIResourceWithStreamingResponse:
+ def __init__(self, openai: AsyncOpenAIResource) -> None:
+ self._openai = openai
+
+ @cached_property
+ def keys(self) -> AsyncKeysResourceWithStreamingResponse:
+ return AsyncKeysResourceWithStreamingResponse(self._openai.keys)
diff --git a/src/do_gradientai/resources/providers/providers.py b/src/do_gradientai/resources/providers/providers.py
new file mode 100644
index 00000000..ef942f73
--- /dev/null
+++ b/src/do_gradientai/resources/providers/providers.py
@@ -0,0 +1,134 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from .openai.openai import (
+ OpenAIResource,
+ AsyncOpenAIResource,
+ OpenAIResourceWithRawResponse,
+ AsyncOpenAIResourceWithRawResponse,
+ OpenAIResourceWithStreamingResponse,
+ AsyncOpenAIResourceWithStreamingResponse,
+)
+from .anthropic.anthropic import (
+ AnthropicResource,
+ AsyncAnthropicResource,
+ AnthropicResourceWithRawResponse,
+ AsyncAnthropicResourceWithRawResponse,
+ AnthropicResourceWithStreamingResponse,
+ AsyncAnthropicResourceWithStreamingResponse,
+)
+
+__all__ = ["ProvidersResource", "AsyncProvidersResource"]
+
+
+class ProvidersResource(SyncAPIResource):
+ @cached_property
+ def anthropic(self) -> AnthropicResource:
+ return AnthropicResource(self._client)
+
+ @cached_property
+ def openai(self) -> OpenAIResource:
+ return OpenAIResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> ProvidersResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
+ """
+ return ProvidersResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> ProvidersResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
+ """
+ return ProvidersResourceWithStreamingResponse(self)
+
+
+class AsyncProvidersResource(AsyncAPIResource):
+ @cached_property
+ def anthropic(self) -> AsyncAnthropicResource:
+ return AsyncAnthropicResource(self._client)
+
+ @cached_property
+ def openai(self) -> AsyncOpenAIResource:
+ return AsyncOpenAIResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncProvidersResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncProvidersResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncProvidersResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
+ """
+ return AsyncProvidersResourceWithStreamingResponse(self)
+
+
+class ProvidersResourceWithRawResponse:
+ def __init__(self, providers: ProvidersResource) -> None:
+ self._providers = providers
+
+ @cached_property
+ def anthropic(self) -> AnthropicResourceWithRawResponse:
+ return AnthropicResourceWithRawResponse(self._providers.anthropic)
+
+ @cached_property
+ def openai(self) -> OpenAIResourceWithRawResponse:
+ return OpenAIResourceWithRawResponse(self._providers.openai)
+
+
+class AsyncProvidersResourceWithRawResponse:
+ def __init__(self, providers: AsyncProvidersResource) -> None:
+ self._providers = providers
+
+ @cached_property
+ def anthropic(self) -> AsyncAnthropicResourceWithRawResponse:
+ return AsyncAnthropicResourceWithRawResponse(self._providers.anthropic)
+
+ @cached_property
+ def openai(self) -> AsyncOpenAIResourceWithRawResponse:
+ return AsyncOpenAIResourceWithRawResponse(self._providers.openai)
+
+
+class ProvidersResourceWithStreamingResponse:
+ def __init__(self, providers: ProvidersResource) -> None:
+ self._providers = providers
+
+ @cached_property
+ def anthropic(self) -> AnthropicResourceWithStreamingResponse:
+ return AnthropicResourceWithStreamingResponse(self._providers.anthropic)
+
+ @cached_property
+ def openai(self) -> OpenAIResourceWithStreamingResponse:
+ return OpenAIResourceWithStreamingResponse(self._providers.openai)
+
+
+class AsyncProvidersResourceWithStreamingResponse:
+ def __init__(self, providers: AsyncProvidersResource) -> None:
+ self._providers = providers
+
+ @cached_property
+ def anthropic(self) -> AsyncAnthropicResourceWithStreamingResponse:
+ return AsyncAnthropicResourceWithStreamingResponse(self._providers.anthropic)
+
+ @cached_property
+ def openai(self) -> AsyncOpenAIResourceWithStreamingResponse:
+ return AsyncOpenAIResourceWithStreamingResponse(self._providers.openai)
diff --git a/src/do_gradientai/types/models/__init__.py b/src/do_gradientai/types/providers/__init__.py
similarity index 100%
rename from src/do_gradientai/types/models/__init__.py
rename to src/do_gradientai/types/providers/__init__.py
diff --git a/src/do_gradientai/types/providers/anthropic/__init__.py b/src/do_gradientai/types/providers/anthropic/__init__.py
new file mode 100644
index 00000000..eb47e709
--- /dev/null
+++ b/src/do_gradientai/types/providers/anthropic/__init__.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .key_list_params import KeyListParams as KeyListParams
+from .key_create_params import KeyCreateParams as KeyCreateParams
+from .key_list_response import KeyListResponse as KeyListResponse
+from .key_update_params import KeyUpdateParams as KeyUpdateParams
+from .key_create_response import KeyCreateResponse as KeyCreateResponse
+from .key_delete_response import KeyDeleteResponse as KeyDeleteResponse
+from .key_update_response import KeyUpdateResponse as KeyUpdateResponse
+from .key_retrieve_response import KeyRetrieveResponse as KeyRetrieveResponse
+from .key_list_agents_params import KeyListAgentsParams as KeyListAgentsParams
+from .key_list_agents_response import KeyListAgentsResponse as KeyListAgentsResponse
diff --git a/src/do_gradientai/types/providers/anthropic/key_create_params.py b/src/do_gradientai/types/providers/anthropic/key_create_params.py
new file mode 100644
index 00000000..389f167c
--- /dev/null
+++ b/src/do_gradientai/types/providers/anthropic/key_create_params.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["KeyCreateParams"]
+
+
+class KeyCreateParams(TypedDict, total=False):
+ api_key: str
+
+ name: str
diff --git a/src/do_gradientai/types/providers/anthropic/key_create_response.py b/src/do_gradientai/types/providers/anthropic/key_create_response.py
new file mode 100644
index 00000000..a032810c
--- /dev/null
+++ b/src/do_gradientai/types/providers/anthropic/key_create_response.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ...._models import BaseModel
+from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo
+
+__all__ = ["KeyCreateResponse"]
+
+
+class KeyCreateResponse(BaseModel):
+ api_key_info: Optional[APIAnthropicAPIKeyInfo] = None
diff --git a/src/do_gradientai/types/providers/anthropic/key_delete_response.py b/src/do_gradientai/types/providers/anthropic/key_delete_response.py
new file mode 100644
index 00000000..2afe2dda
--- /dev/null
+++ b/src/do_gradientai/types/providers/anthropic/key_delete_response.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ...._models import BaseModel
+from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo
+
+__all__ = ["KeyDeleteResponse"]
+
+
+class KeyDeleteResponse(BaseModel):
+ api_key_info: Optional[APIAnthropicAPIKeyInfo] = None
diff --git a/src/do_gradientai/types/providers/anthropic/key_list_agents_params.py b/src/do_gradientai/types/providers/anthropic/key_list_agents_params.py
new file mode 100644
index 00000000..ebbc3b7e
--- /dev/null
+++ b/src/do_gradientai/types/providers/anthropic/key_list_agents_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["KeyListAgentsParams"]
+
+
+class KeyListAgentsParams(TypedDict, total=False):
+ page: int
+ """page number."""
+
+ per_page: int
+ """items per page."""
diff --git a/src/do_gradientai/types/providers/anthropic/key_list_agents_response.py b/src/do_gradientai/types/providers/anthropic/key_list_agents_response.py
new file mode 100644
index 00000000..ba6ca946
--- /dev/null
+++ b/src/do_gradientai/types/providers/anthropic/key_list_agents_response.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Optional
+
+from ...._models import BaseModel
+from ...agents.api_meta import APIMeta
+from ...agents.api_links import APILinks
+
+__all__ = ["KeyListAgentsResponse"]
+
+
+class KeyListAgentsResponse(BaseModel):
+ agents: Optional[List["APIAgent"]] = None
+
+ links: Optional[APILinks] = None
+
+ meta: Optional[APIMeta] = None
+
+
+from ...api_agent import APIAgent
diff --git a/src/do_gradientai/types/providers/anthropic/key_list_params.py b/src/do_gradientai/types/providers/anthropic/key_list_params.py
new file mode 100644
index 00000000..a11458ad
--- /dev/null
+++ b/src/do_gradientai/types/providers/anthropic/key_list_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["KeyListParams"]
+
+
+class KeyListParams(TypedDict, total=False):
+ page: int
+ """page number."""
+
+ per_page: int
+ """items per page."""
diff --git a/src/do_gradientai/types/providers/anthropic/key_list_response.py b/src/do_gradientai/types/providers/anthropic/key_list_response.py
new file mode 100644
index 00000000..d0b84e96
--- /dev/null
+++ b/src/do_gradientai/types/providers/anthropic/key_list_response.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ...._models import BaseModel
+from ...agents.api_meta import APIMeta
+from ...agents.api_links import APILinks
+from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo
+
+__all__ = ["KeyListResponse"]
+
+
+class KeyListResponse(BaseModel):
+ api_key_infos: Optional[List[APIAnthropicAPIKeyInfo]] = None
+
+ links: Optional[APILinks] = None
+
+ meta: Optional[APIMeta] = None
diff --git a/src/do_gradientai/types/providers/anthropic/key_retrieve_response.py b/src/do_gradientai/types/providers/anthropic/key_retrieve_response.py
new file mode 100644
index 00000000..b8361fc2
--- /dev/null
+++ b/src/do_gradientai/types/providers/anthropic/key_retrieve_response.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ...._models import BaseModel
+from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo
+
+__all__ = ["KeyRetrieveResponse"]
+
+
+class KeyRetrieveResponse(BaseModel):
+ api_key_info: Optional[APIAnthropicAPIKeyInfo] = None
diff --git a/src/do_gradientai/types/providers/anthropic/key_update_params.py b/src/do_gradientai/types/providers/anthropic/key_update_params.py
new file mode 100644
index 00000000..c07d7f66
--- /dev/null
+++ b/src/do_gradientai/types/providers/anthropic/key_update_params.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Annotated, TypedDict
+
+from ...._utils import PropertyInfo
+
+__all__ = ["KeyUpdateParams"]
+
+
+class KeyUpdateParams(TypedDict, total=False):
+ api_key: str
+
+ body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")]
+
+ name: str
diff --git a/src/do_gradientai/types/providers/anthropic/key_update_response.py b/src/do_gradientai/types/providers/anthropic/key_update_response.py
new file mode 100644
index 00000000..b04277a6
--- /dev/null
+++ b/src/do_gradientai/types/providers/anthropic/key_update_response.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ...._models import BaseModel
+from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo
+
+__all__ = ["KeyUpdateResponse"]
+
+
+class KeyUpdateResponse(BaseModel):
+ api_key_info: Optional[APIAnthropicAPIKeyInfo] = None
diff --git a/src/do_gradientai/types/providers/openai/__init__.py b/src/do_gradientai/types/providers/openai/__init__.py
new file mode 100644
index 00000000..70abf332
--- /dev/null
+++ b/src/do_gradientai/types/providers/openai/__init__.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .key_list_params import KeyListParams as KeyListParams
+from .key_create_params import KeyCreateParams as KeyCreateParams
+from .key_list_response import KeyListResponse as KeyListResponse
+from .key_update_params import KeyUpdateParams as KeyUpdateParams
+from .key_create_response import KeyCreateResponse as KeyCreateResponse
+from .key_delete_response import KeyDeleteResponse as KeyDeleteResponse
+from .key_update_response import KeyUpdateResponse as KeyUpdateResponse
+from .key_retrieve_response import KeyRetrieveResponse as KeyRetrieveResponse
+from .key_retrieve_agents_params import KeyRetrieveAgentsParams as KeyRetrieveAgentsParams
+from .key_retrieve_agents_response import KeyRetrieveAgentsResponse as KeyRetrieveAgentsResponse
diff --git a/src/do_gradientai/types/providers/openai/key_create_params.py b/src/do_gradientai/types/providers/openai/key_create_params.py
new file mode 100644
index 00000000..389f167c
--- /dev/null
+++ b/src/do_gradientai/types/providers/openai/key_create_params.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["KeyCreateParams"]
+
+
+class KeyCreateParams(TypedDict, total=False):
+ api_key: str
+
+ name: str
diff --git a/src/do_gradientai/types/providers/openai/key_create_response.py b/src/do_gradientai/types/providers/openai/key_create_response.py
new file mode 100644
index 00000000..f3b4d36c
--- /dev/null
+++ b/src/do_gradientai/types/providers/openai/key_create_response.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ...._models import BaseModel
+from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo
+
+__all__ = ["KeyCreateResponse"]
+
+
+class KeyCreateResponse(BaseModel):
+ api_key_info: Optional[APIOpenAIAPIKeyInfo] = None
diff --git a/src/do_gradientai/types/providers/openai/key_delete_response.py b/src/do_gradientai/types/providers/openai/key_delete_response.py
new file mode 100644
index 00000000..0c8922bb
--- /dev/null
+++ b/src/do_gradientai/types/providers/openai/key_delete_response.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ...._models import BaseModel
+from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo
+
+__all__ = ["KeyDeleteResponse"]
+
+
+class KeyDeleteResponse(BaseModel):
+ api_key_info: Optional[APIOpenAIAPIKeyInfo] = None
diff --git a/src/do_gradientai/types/providers/openai/key_list_params.py b/src/do_gradientai/types/providers/openai/key_list_params.py
new file mode 100644
index 00000000..a11458ad
--- /dev/null
+++ b/src/do_gradientai/types/providers/openai/key_list_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["KeyListParams"]
+
+
+class KeyListParams(TypedDict, total=False):
+ page: int
+ """page number."""
+
+ per_page: int
+ """items per page."""
diff --git a/src/do_gradientai/types/providers/openai/key_list_response.py b/src/do_gradientai/types/providers/openai/key_list_response.py
new file mode 100644
index 00000000..c263cba3
--- /dev/null
+++ b/src/do_gradientai/types/providers/openai/key_list_response.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ...._models import BaseModel
+from ...agents.api_meta import APIMeta
+from ...agents.api_links import APILinks
+from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo
+
+__all__ = ["KeyListResponse"]
+
+
+class KeyListResponse(BaseModel):
+ api_key_infos: Optional[List[APIOpenAIAPIKeyInfo]] = None
+
+ links: Optional[APILinks] = None
+
+ meta: Optional[APIMeta] = None
diff --git a/src/do_gradientai/types/providers/openai/key_retrieve_agents_params.py b/src/do_gradientai/types/providers/openai/key_retrieve_agents_params.py
new file mode 100644
index 00000000..ec745d14
--- /dev/null
+++ b/src/do_gradientai/types/providers/openai/key_retrieve_agents_params.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["KeyRetrieveAgentsParams"]
+
+
+class KeyRetrieveAgentsParams(TypedDict, total=False):
+ page: int
+ """page number."""
+
+ per_page: int
+ """items per page."""
diff --git a/src/do_gradientai/types/providers/openai/key_retrieve_agents_response.py b/src/do_gradientai/types/providers/openai/key_retrieve_agents_response.py
new file mode 100644
index 00000000..f42edea6
--- /dev/null
+++ b/src/do_gradientai/types/providers/openai/key_retrieve_agents_response.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Optional
+
+from ...._models import BaseModel
+from ...agents.api_meta import APIMeta
+from ...agents.api_links import APILinks
+
+__all__ = ["KeyRetrieveAgentsResponse"]
+
+
+class KeyRetrieveAgentsResponse(BaseModel):
+ agents: Optional[List["APIAgent"]] = None
+
+ links: Optional[APILinks] = None
+
+ meta: Optional[APIMeta] = None
+
+
+from ...api_agent import APIAgent
diff --git a/src/do_gradientai/types/providers/openai/key_retrieve_response.py b/src/do_gradientai/types/providers/openai/key_retrieve_response.py
new file mode 100644
index 00000000..7015b6f7
--- /dev/null
+++ b/src/do_gradientai/types/providers/openai/key_retrieve_response.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ...._models import BaseModel
+from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo
+
+__all__ = ["KeyRetrieveResponse"]
+
+
+class KeyRetrieveResponse(BaseModel):
+ api_key_info: Optional[APIOpenAIAPIKeyInfo] = None
diff --git a/src/do_gradientai/types/providers/openai/key_update_params.py b/src/do_gradientai/types/providers/openai/key_update_params.py
new file mode 100644
index 00000000..c07d7f66
--- /dev/null
+++ b/src/do_gradientai/types/providers/openai/key_update_params.py
@@ -0,0 +1,17 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Annotated, TypedDict
+
+from ...._utils import PropertyInfo
+
+__all__ = ["KeyUpdateParams"]
+
+
+class KeyUpdateParams(TypedDict, total=False):
+ api_key: str
+
+ body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")]
+
+ name: str
diff --git a/src/do_gradientai/types/providers/openai/key_update_response.py b/src/do_gradientai/types/providers/openai/key_update_response.py
new file mode 100644
index 00000000..4889f994
--- /dev/null
+++ b/src/do_gradientai/types/providers/openai/key_update_response.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ...._models import BaseModel
+from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo
+
+__all__ = ["KeyUpdateResponse"]
+
+
+class KeyUpdateResponse(BaseModel):
+ api_key_info: Optional[APIOpenAIAPIKeyInfo] = None
diff --git a/tests/api_resources/models/__init__.py b/tests/api_resources/providers/__init__.py
similarity index 100%
rename from tests/api_resources/models/__init__.py
rename to tests/api_resources/providers/__init__.py
diff --git a/tests/api_resources/providers/anthropic/__init__.py b/tests/api_resources/providers/anthropic/__init__.py
new file mode 100644
index 00000000..fd8019a9
--- /dev/null
+++ b/tests/api_resources/providers/anthropic/__init__.py
@@ -0,0 +1 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/providers/anthropic/test_keys.py b/tests/api_resources/providers/anthropic/test_keys.py
new file mode 100644
index 00000000..7aa595f7
--- /dev/null
+++ b/tests/api_resources/providers/anthropic/test_keys.py
@@ -0,0 +1,557 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from tests.utils import assert_matches_type
+from do_gradientai import GradientAI, AsyncGradientAI
+from do_gradientai.types.providers.anthropic import (
+ KeyListResponse,
+ KeyCreateResponse,
+ KeyDeleteResponse,
+ KeyUpdateResponse,
+ KeyRetrieveResponse,
+ KeyListAgentsResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestKeys:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_create(self, client: GradientAI) -> None:
+ key = client.providers.anthropic.keys.create()
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_create_with_all_params(self, client: GradientAI) -> None:
+ key = client.providers.anthropic.keys.create(
+ api_key="api_key",
+ name="name",
+ )
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_create(self, client: GradientAI) -> None:
+ response = client.providers.anthropic.keys.with_raw_response.create()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = response.parse()
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_create(self, client: GradientAI) -> None:
+ with client.providers.anthropic.keys.with_streaming_response.create() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = response.parse()
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_retrieve(self, client: GradientAI) -> None:
+ key = client.providers.anthropic.keys.retrieve(
+ "api_key_uuid",
+ )
+ assert_matches_type(KeyRetrieveResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_retrieve(self, client: GradientAI) -> None:
+ response = client.providers.anthropic.keys.with_raw_response.retrieve(
+ "api_key_uuid",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = response.parse()
+ assert_matches_type(KeyRetrieveResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_retrieve(self, client: GradientAI) -> None:
+ with client.providers.anthropic.keys.with_streaming_response.retrieve(
+ "api_key_uuid",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = response.parse()
+ assert_matches_type(KeyRetrieveResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_path_params_retrieve(self, client: GradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
+ client.providers.anthropic.keys.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_update(self, client: GradientAI) -> None:
+ key = client.providers.anthropic.keys.update(
+ path_api_key_uuid="api_key_uuid",
+ )
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_update_with_all_params(self, client: GradientAI) -> None:
+ key = client.providers.anthropic.keys.update(
+ path_api_key_uuid="api_key_uuid",
+ api_key="api_key",
+ body_api_key_uuid="api_key_uuid",
+ name="name",
+ )
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_update(self, client: GradientAI) -> None:
+ response = client.providers.anthropic.keys.with_raw_response.update(
+ path_api_key_uuid="api_key_uuid",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = response.parse()
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_update(self, client: GradientAI) -> None:
+ with client.providers.anthropic.keys.with_streaming_response.update(
+ path_api_key_uuid="api_key_uuid",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = response.parse()
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_path_params_update(self, client: GradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"):
+ client.providers.anthropic.keys.with_raw_response.update(
+ path_api_key_uuid="",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_list(self, client: GradientAI) -> None:
+ key = client.providers.anthropic.keys.list()
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_list_with_all_params(self, client: GradientAI) -> None:
+ key = client.providers.anthropic.keys.list(
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_list(self, client: GradientAI) -> None:
+ response = client.providers.anthropic.keys.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = response.parse()
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_list(self, client: GradientAI) -> None:
+ with client.providers.anthropic.keys.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = response.parse()
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_delete(self, client: GradientAI) -> None:
+ key = client.providers.anthropic.keys.delete(
+ "api_key_uuid",
+ )
+ assert_matches_type(KeyDeleteResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_delete(self, client: GradientAI) -> None:
+ response = client.providers.anthropic.keys.with_raw_response.delete(
+ "api_key_uuid",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = response.parse()
+ assert_matches_type(KeyDeleteResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_delete(self, client: GradientAI) -> None:
+ with client.providers.anthropic.keys.with_streaming_response.delete(
+ "api_key_uuid",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = response.parse()
+ assert_matches_type(KeyDeleteResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_path_params_delete(self, client: GradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
+ client.providers.anthropic.keys.with_raw_response.delete(
+ "",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_list_agents(self, client: GradientAI) -> None:
+ key = client.providers.anthropic.keys.list_agents(
+ uuid="uuid",
+ )
+ assert_matches_type(KeyListAgentsResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_list_agents_with_all_params(self, client: GradientAI) -> None:
+ key = client.providers.anthropic.keys.list_agents(
+ uuid="uuid",
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(KeyListAgentsResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_list_agents(self, client: GradientAI) -> None:
+ response = client.providers.anthropic.keys.with_raw_response.list_agents(
+ uuid="uuid",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = response.parse()
+ assert_matches_type(KeyListAgentsResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_list_agents(self, client: GradientAI) -> None:
+ with client.providers.anthropic.keys.with_streaming_response.list_agents(
+ uuid="uuid",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = response.parse()
+ assert_matches_type(KeyListAgentsResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_path_params_list_agents(self, client: GradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
+ client.providers.anthropic.keys.with_raw_response.list_agents(
+ uuid="",
+ )
+
+
+class TestAsyncKeys:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_create(self, async_client: AsyncGradientAI) -> None:
+ key = await async_client.providers.anthropic.keys.create()
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None:
+ key = await async_client.providers.anthropic.keys.create(
+ api_key="api_key",
+ name="name",
+ )
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.providers.anthropic.keys.with_raw_response.create()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = await response.parse()
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.providers.anthropic.keys.with_streaming_response.create() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = await response.parse()
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None:
+ key = await async_client.providers.anthropic.keys.retrieve(
+ "api_key_uuid",
+ )
+ assert_matches_type(KeyRetrieveResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.providers.anthropic.keys.with_raw_response.retrieve(
+ "api_key_uuid",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = await response.parse()
+ assert_matches_type(KeyRetrieveResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.providers.anthropic.keys.with_streaming_response.retrieve(
+ "api_key_uuid",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = await response.parse()
+ assert_matches_type(KeyRetrieveResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
+ await async_client.providers.anthropic.keys.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_update(self, async_client: AsyncGradientAI) -> None:
+ key = await async_client.providers.anthropic.keys.update(
+ path_api_key_uuid="api_key_uuid",
+ )
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None:
+ key = await async_client.providers.anthropic.keys.update(
+ path_api_key_uuid="api_key_uuid",
+ api_key="api_key",
+ body_api_key_uuid="api_key_uuid",
+ name="name",
+ )
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.providers.anthropic.keys.with_raw_response.update(
+ path_api_key_uuid="api_key_uuid",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = await response.parse()
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.providers.anthropic.keys.with_streaming_response.update(
+ path_api_key_uuid="api_key_uuid",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = await response.parse()
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_path_params_update(self, async_client: AsyncGradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"):
+ await async_client.providers.anthropic.keys.with_raw_response.update(
+ path_api_key_uuid="",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_list(self, async_client: AsyncGradientAI) -> None:
+ key = await async_client.providers.anthropic.keys.list()
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None:
+ key = await async_client.providers.anthropic.keys.list(
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.providers.anthropic.keys.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = await response.parse()
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.providers.anthropic.keys.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = await response.parse()
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncGradientAI) -> None:
+ key = await async_client.providers.anthropic.keys.delete(
+ "api_key_uuid",
+ )
+ assert_matches_type(KeyDeleteResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.providers.anthropic.keys.with_raw_response.delete(
+ "api_key_uuid",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = await response.parse()
+ assert_matches_type(KeyDeleteResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.providers.anthropic.keys.with_streaming_response.delete(
+ "api_key_uuid",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = await response.parse()
+ assert_matches_type(KeyDeleteResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
+ await async_client.providers.anthropic.keys.with_raw_response.delete(
+ "",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_list_agents(self, async_client: AsyncGradientAI) -> None:
+ key = await async_client.providers.anthropic.keys.list_agents(
+ uuid="uuid",
+ )
+ assert_matches_type(KeyListAgentsResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_list_agents_with_all_params(self, async_client: AsyncGradientAI) -> None:
+ key = await async_client.providers.anthropic.keys.list_agents(
+ uuid="uuid",
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(KeyListAgentsResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_list_agents(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.providers.anthropic.keys.with_raw_response.list_agents(
+ uuid="uuid",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = await response.parse()
+ assert_matches_type(KeyListAgentsResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_list_agents(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.providers.anthropic.keys.with_streaming_response.list_agents(
+ uuid="uuid",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = await response.parse()
+ assert_matches_type(KeyListAgentsResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_path_params_list_agents(self, async_client: AsyncGradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
+ await async_client.providers.anthropic.keys.with_raw_response.list_agents(
+ uuid="",
+ )
diff --git a/tests/api_resources/providers/openai/__init__.py b/tests/api_resources/providers/openai/__init__.py
new file mode 100644
index 00000000..fd8019a9
--- /dev/null
+++ b/tests/api_resources/providers/openai/__init__.py
@@ -0,0 +1 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/providers/openai/test_keys.py b/tests/api_resources/providers/openai/test_keys.py
new file mode 100644
index 00000000..714dc4bd
--- /dev/null
+++ b/tests/api_resources/providers/openai/test_keys.py
@@ -0,0 +1,557 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from tests.utils import assert_matches_type
+from do_gradientai import GradientAI, AsyncGradientAI
+from do_gradientai.types.providers.openai import (
+ KeyListResponse,
+ KeyCreateResponse,
+ KeyDeleteResponse,
+ KeyUpdateResponse,
+ KeyRetrieveResponse,
+ KeyRetrieveAgentsResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestKeys:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_create(self, client: GradientAI) -> None:
+ key = client.providers.openai.keys.create()
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_create_with_all_params(self, client: GradientAI) -> None:
+ key = client.providers.openai.keys.create(
+ api_key="api_key",
+ name="name",
+ )
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_create(self, client: GradientAI) -> None:
+ response = client.providers.openai.keys.with_raw_response.create()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = response.parse()
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_create(self, client: GradientAI) -> None:
+ with client.providers.openai.keys.with_streaming_response.create() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = response.parse()
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_retrieve(self, client: GradientAI) -> None:
+ key = client.providers.openai.keys.retrieve(
+ "api_key_uuid",
+ )
+ assert_matches_type(KeyRetrieveResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_retrieve(self, client: GradientAI) -> None:
+ response = client.providers.openai.keys.with_raw_response.retrieve(
+ "api_key_uuid",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = response.parse()
+ assert_matches_type(KeyRetrieveResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_retrieve(self, client: GradientAI) -> None:
+ with client.providers.openai.keys.with_streaming_response.retrieve(
+ "api_key_uuid",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = response.parse()
+ assert_matches_type(KeyRetrieveResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_path_params_retrieve(self, client: GradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
+ client.providers.openai.keys.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_update(self, client: GradientAI) -> None:
+ key = client.providers.openai.keys.update(
+ path_api_key_uuid="api_key_uuid",
+ )
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_update_with_all_params(self, client: GradientAI) -> None:
+ key = client.providers.openai.keys.update(
+ path_api_key_uuid="api_key_uuid",
+ api_key="api_key",
+ body_api_key_uuid="api_key_uuid",
+ name="name",
+ )
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_update(self, client: GradientAI) -> None:
+ response = client.providers.openai.keys.with_raw_response.update(
+ path_api_key_uuid="api_key_uuid",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = response.parse()
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_update(self, client: GradientAI) -> None:
+ with client.providers.openai.keys.with_streaming_response.update(
+ path_api_key_uuid="api_key_uuid",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = response.parse()
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_path_params_update(self, client: GradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"):
+ client.providers.openai.keys.with_raw_response.update(
+ path_api_key_uuid="",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_list(self, client: GradientAI) -> None:
+ key = client.providers.openai.keys.list()
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_list_with_all_params(self, client: GradientAI) -> None:
+ key = client.providers.openai.keys.list(
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_list(self, client: GradientAI) -> None:
+ response = client.providers.openai.keys.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = response.parse()
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_list(self, client: GradientAI) -> None:
+ with client.providers.openai.keys.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = response.parse()
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_delete(self, client: GradientAI) -> None:
+ key = client.providers.openai.keys.delete(
+ "api_key_uuid",
+ )
+ assert_matches_type(KeyDeleteResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_delete(self, client: GradientAI) -> None:
+ response = client.providers.openai.keys.with_raw_response.delete(
+ "api_key_uuid",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = response.parse()
+ assert_matches_type(KeyDeleteResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_delete(self, client: GradientAI) -> None:
+ with client.providers.openai.keys.with_streaming_response.delete(
+ "api_key_uuid",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = response.parse()
+ assert_matches_type(KeyDeleteResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_path_params_delete(self, client: GradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
+ client.providers.openai.keys.with_raw_response.delete(
+ "",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_retrieve_agents(self, client: GradientAI) -> None:
+ key = client.providers.openai.keys.retrieve_agents(
+ uuid="uuid",
+ )
+ assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_retrieve_agents_with_all_params(self, client: GradientAI) -> None:
+ key = client.providers.openai.keys.retrieve_agents(
+ uuid="uuid",
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_retrieve_agents(self, client: GradientAI) -> None:
+ response = client.providers.openai.keys.with_raw_response.retrieve_agents(
+ uuid="uuid",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = response.parse()
+ assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_retrieve_agents(self, client: GradientAI) -> None:
+ with client.providers.openai.keys.with_streaming_response.retrieve_agents(
+ uuid="uuid",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = response.parse()
+ assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_path_params_retrieve_agents(self, client: GradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
+ client.providers.openai.keys.with_raw_response.retrieve_agents(
+ uuid="",
+ )
+
+
+class TestAsyncKeys:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_create(self, async_client: AsyncGradientAI) -> None:
+ key = await async_client.providers.openai.keys.create()
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None:
+ key = await async_client.providers.openai.keys.create(
+ api_key="api_key",
+ name="name",
+ )
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.providers.openai.keys.with_raw_response.create()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = await response.parse()
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.providers.openai.keys.with_streaming_response.create() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = await response.parse()
+ assert_matches_type(KeyCreateResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None:
+ key = await async_client.providers.openai.keys.retrieve(
+ "api_key_uuid",
+ )
+ assert_matches_type(KeyRetrieveResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.providers.openai.keys.with_raw_response.retrieve(
+ "api_key_uuid",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = await response.parse()
+ assert_matches_type(KeyRetrieveResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.providers.openai.keys.with_streaming_response.retrieve(
+ "api_key_uuid",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = await response.parse()
+ assert_matches_type(KeyRetrieveResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
+ await async_client.providers.openai.keys.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_update(self, async_client: AsyncGradientAI) -> None:
+ key = await async_client.providers.openai.keys.update(
+ path_api_key_uuid="api_key_uuid",
+ )
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None:
+ key = await async_client.providers.openai.keys.update(
+ path_api_key_uuid="api_key_uuid",
+ api_key="api_key",
+ body_api_key_uuid="api_key_uuid",
+ name="name",
+ )
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.providers.openai.keys.with_raw_response.update(
+ path_api_key_uuid="api_key_uuid",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = await response.parse()
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.providers.openai.keys.with_streaming_response.update(
+ path_api_key_uuid="api_key_uuid",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = await response.parse()
+ assert_matches_type(KeyUpdateResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_path_params_update(self, async_client: AsyncGradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"):
+ await async_client.providers.openai.keys.with_raw_response.update(
+ path_api_key_uuid="",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_list(self, async_client: AsyncGradientAI) -> None:
+ key = await async_client.providers.openai.keys.list()
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None:
+ key = await async_client.providers.openai.keys.list(
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.providers.openai.keys.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = await response.parse()
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.providers.openai.keys.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = await response.parse()
+ assert_matches_type(KeyListResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncGradientAI) -> None:
+ key = await async_client.providers.openai.keys.delete(
+ "api_key_uuid",
+ )
+ assert_matches_type(KeyDeleteResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.providers.openai.keys.with_raw_response.delete(
+ "api_key_uuid",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = await response.parse()
+ assert_matches_type(KeyDeleteResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.providers.openai.keys.with_streaming_response.delete(
+ "api_key_uuid",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = await response.parse()
+ assert_matches_type(KeyDeleteResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
+ await async_client.providers.openai.keys.with_raw_response.delete(
+ "",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_retrieve_agents(self, async_client: AsyncGradientAI) -> None:
+ key = await async_client.providers.openai.keys.retrieve_agents(
+ uuid="uuid",
+ )
+ assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_retrieve_agents_with_all_params(self, async_client: AsyncGradientAI) -> None:
+ key = await async_client.providers.openai.keys.retrieve_agents(
+ uuid="uuid",
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_retrieve_agents(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.providers.openai.keys.with_raw_response.retrieve_agents(
+ uuid="uuid",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ key = await response.parse()
+ assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_retrieve_agents(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.providers.openai.keys.with_streaming_response.retrieve_agents(
+ uuid="uuid",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ key = await response.parse()
+ assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_path_params_retrieve_agents(self, async_client: AsyncGradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
+ await async_client.providers.openai.keys.with_raw_response.retrieve_agents(
+ uuid="",
+ )
From abe573fcc2233c7d71f0a925eea8fa9dd4d0fb91 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 25 Jun 2025 05:30:27 +0000
Subject: [PATCH 09/21] feat(api): update via SDK Studio
---
.stats.yml | 2 +-
api.md | 46 +++++-----
src/do_gradientai/_client.py | 39 +--------
src/do_gradientai/resources/__init__.py | 14 ---
.../resources/knowledge_bases/__init__.py | 14 +++
.../{ => knowledge_bases}/indexing_jobs.py | 28 +++---
.../knowledge_bases/knowledge_bases.py | 32 +++++++
src/do_gradientai/types/__init__.py | 11 ---
src/do_gradientai/types/api_knowledge_base.py | 2 +-
.../types/knowledge_bases/__init__.py | 11 +++
.../{ => knowledge_bases}/api_indexing_job.py | 2 +-
.../api_knowledge_base_data_source.py | 2 +-
.../indexing_job_create_params.py | 0
.../indexing_job_create_response.py | 2 +-
.../indexing_job_list_params.py | 0
.../indexing_job_list_response.py | 6 +-
...xing_job_retrieve_data_sources_response.py | 2 +-
.../indexing_job_retrieve_response.py | 2 +-
.../indexing_job_update_cancel_params.py | 2 +-
.../indexing_job_update_cancel_response.py | 2 +-
.../test_indexing_jobs.py | 86 +++++++++----------
21 files changed, 152 insertions(+), 153 deletions(-)
rename src/do_gradientai/resources/{ => knowledge_bases}/indexing_jobs.py (95%)
rename src/do_gradientai/types/{ => knowledge_bases}/api_indexing_job.py (96%)
rename src/do_gradientai/types/{ => knowledge_bases}/indexing_job_create_params.py (100%)
rename src/do_gradientai/types/{ => knowledge_bases}/indexing_job_create_response.py (89%)
rename src/do_gradientai/types/{ => knowledge_bases}/indexing_job_list_params.py (100%)
rename src/do_gradientai/types/{ => knowledge_bases}/indexing_job_list_response.py (77%)
rename src/do_gradientai/types/{ => knowledge_bases}/indexing_job_retrieve_data_sources_response.py (97%)
rename src/do_gradientai/types/{ => knowledge_bases}/indexing_job_retrieve_response.py (89%)
rename src/do_gradientai/types/{ => knowledge_bases}/indexing_job_update_cancel_params.py (91%)
rename src/do_gradientai/types/{ => knowledge_bases}/indexing_job_update_cancel_response.py (90%)
rename tests/api_resources/{ => knowledge_bases}/test_indexing_jobs.py (80%)
diff --git a/.stats.yml b/.stats.yml
index e9d82b51..3dd4e641 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 67
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml
openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667
-config_hash: e178baf496088c521dd245cbc46c932a
+config_hash: 2529d2f80a3d70107331426b594b7f9b
diff --git a/api.md b/api.md
index a7297098..b67c57a4 100644
--- a/api.md
+++ b/api.md
@@ -257,29 +257,6 @@ Methods:
- client.regions.list(\*\*params) -> RegionListResponse
-# IndexingJobs
-
-Types:
-
-```python
-from do_gradientai.types import (
- APIIndexingJob,
- IndexingJobCreateResponse,
- IndexingJobRetrieveResponse,
- IndexingJobListResponse,
- IndexingJobRetrieveDataSourcesResponse,
- IndexingJobUpdateCancelResponse,
-)
-```
-
-Methods:
-
-- client.indexing_jobs.create(\*\*params) -> IndexingJobCreateResponse
-- client.indexing_jobs.retrieve(uuid) -> IndexingJobRetrieveResponse
-- client.indexing_jobs.list(\*\*params) -> IndexingJobListResponse
-- client.indexing_jobs.retrieve_data_sources(indexing_job_uuid) -> IndexingJobRetrieveDataSourcesResponse
-- client.indexing_jobs.update_cancel(path_uuid, \*\*params) -> IndexingJobUpdateCancelResponse
-
# KnowledgeBases
Types:
@@ -326,6 +303,29 @@ Methods:
- client.knowledge_bases.data_sources.list(knowledge_base_uuid, \*\*params) -> DataSourceListResponse
- client.knowledge_bases.data_sources.delete(data_source_uuid, \*, knowledge_base_uuid) -> DataSourceDeleteResponse
+## IndexingJobs
+
+Types:
+
+```python
+from do_gradientai.types.knowledge_bases import (
+ APIIndexingJob,
+ IndexingJobCreateResponse,
+ IndexingJobRetrieveResponse,
+ IndexingJobListResponse,
+ IndexingJobRetrieveDataSourcesResponse,
+ IndexingJobUpdateCancelResponse,
+)
+```
+
+Methods:
+
+- client.knowledge_bases.indexing_jobs.create(\*\*params) -> IndexingJobCreateResponse
+- client.knowledge_bases.indexing_jobs.retrieve(uuid) -> IndexingJobRetrieveResponse
+- client.knowledge_bases.indexing_jobs.list(\*\*params) -> IndexingJobListResponse
+- client.knowledge_bases.indexing_jobs.retrieve_data_sources(indexing_job_uuid) -> IndexingJobRetrieveDataSourcesResponse
+- client.knowledge_bases.indexing_jobs.update_cancel(path_uuid, \*\*params) -> IndexingJobUpdateCancelResponse
+
# Chat
## Completions
diff --git a/src/do_gradientai/_client.py b/src/do_gradientai/_client.py
index 8710fe68..b8d55962 100644
--- a/src/do_gradientai/_client.py
+++ b/src/do_gradientai/_client.py
@@ -31,12 +31,11 @@
)
if TYPE_CHECKING:
- from .resources import chat, agents, models, regions, inference, providers, indexing_jobs, knowledge_bases
+ from .resources import chat, agents, models, regions, inference, providers, knowledge_bases
from .resources.models import ModelsResource, AsyncModelsResource
from .resources.regions import RegionsResource, AsyncRegionsResource
from .resources.chat.chat import ChatResource, AsyncChatResource
from .resources.agents.agents import AgentsResource, AsyncAgentsResource
- from .resources.indexing_jobs import IndexingJobsResource, AsyncIndexingJobsResource
from .resources.inference.inference import InferenceResource, AsyncInferenceResource
from .resources.providers.providers import ProvidersResource, AsyncProvidersResource
from .resources.knowledge_bases.knowledge_bases import KnowledgeBasesResource, AsyncKnowledgeBasesResource
@@ -127,12 +126,6 @@ def regions(self) -> RegionsResource:
return RegionsResource(self)
- @cached_property
- def indexing_jobs(self) -> IndexingJobsResource:
- from .resources.indexing_jobs import IndexingJobsResource
-
- return IndexingJobsResource(self)
-
@cached_property
def knowledge_bases(self) -> KnowledgeBasesResource:
from .resources.knowledge_bases import KnowledgeBasesResource
@@ -346,12 +339,6 @@ def regions(self) -> AsyncRegionsResource:
return AsyncRegionsResource(self)
- @cached_property
- def indexing_jobs(self) -> AsyncIndexingJobsResource:
- from .resources.indexing_jobs import AsyncIndexingJobsResource
-
- return AsyncIndexingJobsResource(self)
-
@cached_property
def knowledge_bases(self) -> AsyncKnowledgeBasesResource:
from .resources.knowledge_bases import AsyncKnowledgeBasesResource
@@ -515,12 +502,6 @@ def regions(self) -> regions.RegionsResourceWithRawResponse:
return RegionsResourceWithRawResponse(self._client.regions)
- @cached_property
- def indexing_jobs(self) -> indexing_jobs.IndexingJobsResourceWithRawResponse:
- from .resources.indexing_jobs import IndexingJobsResourceWithRawResponse
-
- return IndexingJobsResourceWithRawResponse(self._client.indexing_jobs)
-
@cached_property
def knowledge_bases(self) -> knowledge_bases.KnowledgeBasesResourceWithRawResponse:
from .resources.knowledge_bases import KnowledgeBasesResourceWithRawResponse
@@ -570,12 +551,6 @@ def regions(self) -> regions.AsyncRegionsResourceWithRawResponse:
return AsyncRegionsResourceWithRawResponse(self._client.regions)
- @cached_property
- def indexing_jobs(self) -> indexing_jobs.AsyncIndexingJobsResourceWithRawResponse:
- from .resources.indexing_jobs import AsyncIndexingJobsResourceWithRawResponse
-
- return AsyncIndexingJobsResourceWithRawResponse(self._client.indexing_jobs)
-
@cached_property
def knowledge_bases(self) -> knowledge_bases.AsyncKnowledgeBasesResourceWithRawResponse:
from .resources.knowledge_bases import AsyncKnowledgeBasesResourceWithRawResponse
@@ -625,12 +600,6 @@ def regions(self) -> regions.RegionsResourceWithStreamingResponse:
return RegionsResourceWithStreamingResponse(self._client.regions)
- @cached_property
- def indexing_jobs(self) -> indexing_jobs.IndexingJobsResourceWithStreamingResponse:
- from .resources.indexing_jobs import IndexingJobsResourceWithStreamingResponse
-
- return IndexingJobsResourceWithStreamingResponse(self._client.indexing_jobs)
-
@cached_property
def knowledge_bases(self) -> knowledge_bases.KnowledgeBasesResourceWithStreamingResponse:
from .resources.knowledge_bases import KnowledgeBasesResourceWithStreamingResponse
@@ -680,12 +649,6 @@ def regions(self) -> regions.AsyncRegionsResourceWithStreamingResponse:
return AsyncRegionsResourceWithStreamingResponse(self._client.regions)
- @cached_property
- def indexing_jobs(self) -> indexing_jobs.AsyncIndexingJobsResourceWithStreamingResponse:
- from .resources.indexing_jobs import AsyncIndexingJobsResourceWithStreamingResponse
-
- return AsyncIndexingJobsResourceWithStreamingResponse(self._client.indexing_jobs)
-
@cached_property
def knowledge_bases(self) -> knowledge_bases.AsyncKnowledgeBasesResourceWithStreamingResponse:
from .resources.knowledge_bases import AsyncKnowledgeBasesResourceWithStreamingResponse
diff --git a/src/do_gradientai/resources/__init__.py b/src/do_gradientai/resources/__init__.py
index 1763a13e..6ad0aa32 100644
--- a/src/do_gradientai/resources/__init__.py
+++ b/src/do_gradientai/resources/__init__.py
@@ -48,14 +48,6 @@
ProvidersResourceWithStreamingResponse,
AsyncProvidersResourceWithStreamingResponse,
)
-from .indexing_jobs import (
- IndexingJobsResource,
- AsyncIndexingJobsResource,
- IndexingJobsResourceWithRawResponse,
- AsyncIndexingJobsResourceWithRawResponse,
- IndexingJobsResourceWithStreamingResponse,
- AsyncIndexingJobsResourceWithStreamingResponse,
-)
from .knowledge_bases import (
KnowledgeBasesResource,
AsyncKnowledgeBasesResource,
@@ -84,12 +76,6 @@
"AsyncRegionsResourceWithRawResponse",
"RegionsResourceWithStreamingResponse",
"AsyncRegionsResourceWithStreamingResponse",
- "IndexingJobsResource",
- "AsyncIndexingJobsResource",
- "IndexingJobsResourceWithRawResponse",
- "AsyncIndexingJobsResourceWithRawResponse",
- "IndexingJobsResourceWithStreamingResponse",
- "AsyncIndexingJobsResourceWithStreamingResponse",
"KnowledgeBasesResource",
"AsyncKnowledgeBasesResource",
"KnowledgeBasesResourceWithRawResponse",
diff --git a/src/do_gradientai/resources/knowledge_bases/__init__.py b/src/do_gradientai/resources/knowledge_bases/__init__.py
index 03d143e2..80d04328 100644
--- a/src/do_gradientai/resources/knowledge_bases/__init__.py
+++ b/src/do_gradientai/resources/knowledge_bases/__init__.py
@@ -8,6 +8,14 @@
DataSourcesResourceWithStreamingResponse,
AsyncDataSourcesResourceWithStreamingResponse,
)
+from .indexing_jobs import (
+ IndexingJobsResource,
+ AsyncIndexingJobsResource,
+ IndexingJobsResourceWithRawResponse,
+ AsyncIndexingJobsResourceWithRawResponse,
+ IndexingJobsResourceWithStreamingResponse,
+ AsyncIndexingJobsResourceWithStreamingResponse,
+)
from .knowledge_bases import (
KnowledgeBasesResource,
AsyncKnowledgeBasesResource,
@@ -24,6 +32,12 @@
"AsyncDataSourcesResourceWithRawResponse",
"DataSourcesResourceWithStreamingResponse",
"AsyncDataSourcesResourceWithStreamingResponse",
+ "IndexingJobsResource",
+ "AsyncIndexingJobsResource",
+ "IndexingJobsResourceWithRawResponse",
+ "AsyncIndexingJobsResourceWithRawResponse",
+ "IndexingJobsResourceWithStreamingResponse",
+ "AsyncIndexingJobsResourceWithStreamingResponse",
"KnowledgeBasesResource",
"AsyncKnowledgeBasesResource",
"KnowledgeBasesResourceWithRawResponse",
diff --git a/src/do_gradientai/resources/indexing_jobs.py b/src/do_gradientai/resources/knowledge_bases/indexing_jobs.py
similarity index 95%
rename from src/do_gradientai/resources/indexing_jobs.py
rename to src/do_gradientai/resources/knowledge_bases/indexing_jobs.py
index 71c59023..39151e41 100644
--- a/src/do_gradientai/resources/indexing_jobs.py
+++ b/src/do_gradientai/resources/knowledge_bases/indexing_jobs.py
@@ -6,23 +6,27 @@
import httpx
-from ..types import indexing_job_list_params, indexing_job_create_params, indexing_job_update_cancel_params
-from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from .._utils import maybe_transform, async_maybe_transform
-from .._compat import cached_property
-from .._resource import SyncAPIResource, AsyncAPIResource
-from .._response import (
+from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ..._utils import maybe_transform, async_maybe_transform
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import (
to_raw_response_wrapper,
to_streamed_response_wrapper,
async_to_raw_response_wrapper,
async_to_streamed_response_wrapper,
)
-from .._base_client import make_request_options
-from ..types.indexing_job_list_response import IndexingJobListResponse
-from ..types.indexing_job_create_response import IndexingJobCreateResponse
-from ..types.indexing_job_retrieve_response import IndexingJobRetrieveResponse
-from ..types.indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse
-from ..types.indexing_job_retrieve_data_sources_response import IndexingJobRetrieveDataSourcesResponse
+from ..._base_client import make_request_options
+from ...types.knowledge_bases import (
+ indexing_job_list_params,
+ indexing_job_create_params,
+ indexing_job_update_cancel_params,
+)
+from ...types.knowledge_bases.indexing_job_list_response import IndexingJobListResponse
+from ...types.knowledge_bases.indexing_job_create_response import IndexingJobCreateResponse
+from ...types.knowledge_bases.indexing_job_retrieve_response import IndexingJobRetrieveResponse
+from ...types.knowledge_bases.indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse
+from ...types.knowledge_bases.indexing_job_retrieve_data_sources_response import IndexingJobRetrieveDataSourcesResponse
__all__ = ["IndexingJobsResource", "AsyncIndexingJobsResource"]
diff --git a/src/do_gradientai/resources/knowledge_bases/knowledge_bases.py b/src/do_gradientai/resources/knowledge_bases/knowledge_bases.py
index 2cab4f7b..28acdd7f 100644
--- a/src/do_gradientai/resources/knowledge_bases/knowledge_bases.py
+++ b/src/do_gradientai/resources/knowledge_bases/knowledge_bases.py
@@ -25,6 +25,14 @@
DataSourcesResourceWithStreamingResponse,
AsyncDataSourcesResourceWithStreamingResponse,
)
+from .indexing_jobs import (
+ IndexingJobsResource,
+ AsyncIndexingJobsResource,
+ IndexingJobsResourceWithRawResponse,
+ AsyncIndexingJobsResourceWithRawResponse,
+ IndexingJobsResourceWithStreamingResponse,
+ AsyncIndexingJobsResourceWithStreamingResponse,
+)
from ..._base_client import make_request_options
from ...types.knowledge_base_list_response import KnowledgeBaseListResponse
from ...types.knowledge_base_create_response import KnowledgeBaseCreateResponse
@@ -40,6 +48,10 @@ class KnowledgeBasesResource(SyncAPIResource):
def data_sources(self) -> DataSourcesResource:
return DataSourcesResource(self._client)
+ @cached_property
+ def indexing_jobs(self) -> IndexingJobsResource:
+ return IndexingJobsResource(self._client)
+
@cached_property
def with_raw_response(self) -> KnowledgeBasesResourceWithRawResponse:
"""
@@ -316,6 +328,10 @@ class AsyncKnowledgeBasesResource(AsyncAPIResource):
def data_sources(self) -> AsyncDataSourcesResource:
return AsyncDataSourcesResource(self._client)
+ @cached_property
+ def indexing_jobs(self) -> AsyncIndexingJobsResource:
+ return AsyncIndexingJobsResource(self._client)
+
@cached_property
def with_raw_response(self) -> AsyncKnowledgeBasesResourceWithRawResponse:
"""
@@ -611,6 +627,10 @@ def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None:
def data_sources(self) -> DataSourcesResourceWithRawResponse:
return DataSourcesResourceWithRawResponse(self._knowledge_bases.data_sources)
+ @cached_property
+ def indexing_jobs(self) -> IndexingJobsResourceWithRawResponse:
+ return IndexingJobsResourceWithRawResponse(self._knowledge_bases.indexing_jobs)
+
class AsyncKnowledgeBasesResourceWithRawResponse:
def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None:
@@ -636,6 +656,10 @@ def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None:
def data_sources(self) -> AsyncDataSourcesResourceWithRawResponse:
return AsyncDataSourcesResourceWithRawResponse(self._knowledge_bases.data_sources)
+ @cached_property
+ def indexing_jobs(self) -> AsyncIndexingJobsResourceWithRawResponse:
+ return AsyncIndexingJobsResourceWithRawResponse(self._knowledge_bases.indexing_jobs)
+
class KnowledgeBasesResourceWithStreamingResponse:
def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None:
@@ -661,6 +685,10 @@ def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None:
def data_sources(self) -> DataSourcesResourceWithStreamingResponse:
return DataSourcesResourceWithStreamingResponse(self._knowledge_bases.data_sources)
+ @cached_property
+ def indexing_jobs(self) -> IndexingJobsResourceWithStreamingResponse:
+ return IndexingJobsResourceWithStreamingResponse(self._knowledge_bases.indexing_jobs)
+
class AsyncKnowledgeBasesResourceWithStreamingResponse:
def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None:
@@ -685,3 +713,7 @@ def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None:
@cached_property
def data_sources(self) -> AsyncDataSourcesResourceWithStreamingResponse:
return AsyncDataSourcesResourceWithStreamingResponse(self._knowledge_bases.data_sources)
+
+ @cached_property
+ def indexing_jobs(self) -> AsyncIndexingJobsResourceWithStreamingResponse:
+ return AsyncIndexingJobsResourceWithStreamingResponse(self._knowledge_bases.indexing_jobs)
diff --git a/src/do_gradientai/types/__init__.py b/src/do_gradientai/types/__init__.py
index e3c2ab9c..dde7f848 100644
--- a/src/do_gradientai/types/__init__.py
+++ b/src/do_gradientai/types/__init__.py
@@ -7,7 +7,6 @@
from .api_agreement import APIAgreement as APIAgreement
from .api_workspace import APIWorkspace as APIWorkspace
from .api_agent_model import APIAgentModel as APIAgentModel
-from .api_indexing_job import APIIndexingJob as APIIndexingJob
from .agent_list_params import AgentListParams as AgentListParams
from .api_model_version import APIModelVersion as APIModelVersion
from .model_list_params import ModelListParams as ModelListParams
@@ -25,25 +24,15 @@
from .api_agent_api_key_info import APIAgentAPIKeyInfo as APIAgentAPIKeyInfo
from .agent_retrieve_response import AgentRetrieveResponse as AgentRetrieveResponse
from .api_openai_api_key_info import APIOpenAIAPIKeyInfo as APIOpenAIAPIKeyInfo
-from .indexing_job_list_params import IndexingJobListParams as IndexingJobListParams
from .api_deployment_visibility import APIDeploymentVisibility as APIDeploymentVisibility
from .agent_update_status_params import AgentUpdateStatusParams as AgentUpdateStatusParams
from .api_anthropic_api_key_info import APIAnthropicAPIKeyInfo as APIAnthropicAPIKeyInfo
-from .indexing_job_create_params import IndexingJobCreateParams as IndexingJobCreateParams
-from .indexing_job_list_response import IndexingJobListResponse as IndexingJobListResponse
from .knowledge_base_list_params import KnowledgeBaseListParams as KnowledgeBaseListParams
from .agent_update_status_response import AgentUpdateStatusResponse as AgentUpdateStatusResponse
-from .indexing_job_create_response import IndexingJobCreateResponse as IndexingJobCreateResponse
from .knowledge_base_create_params import KnowledgeBaseCreateParams as KnowledgeBaseCreateParams
from .knowledge_base_list_response import KnowledgeBaseListResponse as KnowledgeBaseListResponse
from .knowledge_base_update_params import KnowledgeBaseUpdateParams as KnowledgeBaseUpdateParams
-from .indexing_job_retrieve_response import IndexingJobRetrieveResponse as IndexingJobRetrieveResponse
from .knowledge_base_create_response import KnowledgeBaseCreateResponse as KnowledgeBaseCreateResponse
from .knowledge_base_delete_response import KnowledgeBaseDeleteResponse as KnowledgeBaseDeleteResponse
from .knowledge_base_update_response import KnowledgeBaseUpdateResponse as KnowledgeBaseUpdateResponse
from .knowledge_base_retrieve_response import KnowledgeBaseRetrieveResponse as KnowledgeBaseRetrieveResponse
-from .indexing_job_update_cancel_params import IndexingJobUpdateCancelParams as IndexingJobUpdateCancelParams
-from .indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse as IndexingJobUpdateCancelResponse
-from .indexing_job_retrieve_data_sources_response import (
- IndexingJobRetrieveDataSourcesResponse as IndexingJobRetrieveDataSourcesResponse,
-)
diff --git a/src/do_gradientai/types/api_knowledge_base.py b/src/do_gradientai/types/api_knowledge_base.py
index 5b4b6e2c..2b0676f0 100644
--- a/src/do_gradientai/types/api_knowledge_base.py
+++ b/src/do_gradientai/types/api_knowledge_base.py
@@ -4,7 +4,7 @@
from datetime import datetime
from .._models import BaseModel
-from .api_indexing_job import APIIndexingJob
+from .knowledge_bases.api_indexing_job import APIIndexingJob
__all__ = ["APIKnowledgeBase"]
diff --git a/src/do_gradientai/types/knowledge_bases/__init__.py b/src/do_gradientai/types/knowledge_bases/__init__.py
index 859c3618..9fc915e5 100644
--- a/src/do_gradientai/types/knowledge_bases/__init__.py
+++ b/src/do_gradientai/types/knowledge_bases/__init__.py
@@ -2,16 +2,27 @@
from __future__ import annotations
+from .api_indexing_job import APIIndexingJob as APIIndexingJob
from .aws_data_source_param import AwsDataSourceParam as AwsDataSourceParam
from .api_spaces_data_source import APISpacesDataSource as APISpacesDataSource
from .data_source_list_params import DataSourceListParams as DataSourceListParams
+from .indexing_job_list_params import IndexingJobListParams as IndexingJobListParams
from .data_source_create_params import DataSourceCreateParams as DataSourceCreateParams
from .data_source_list_response import DataSourceListResponse as DataSourceListResponse
+from .indexing_job_create_params import IndexingJobCreateParams as IndexingJobCreateParams
+from .indexing_job_list_response import IndexingJobListResponse as IndexingJobListResponse
from .api_file_upload_data_source import APIFileUploadDataSource as APIFileUploadDataSource
from .api_web_crawler_data_source import APIWebCrawlerDataSource as APIWebCrawlerDataSource
from .data_source_create_response import DataSourceCreateResponse as DataSourceCreateResponse
from .data_source_delete_response import DataSourceDeleteResponse as DataSourceDeleteResponse
from .api_spaces_data_source_param import APISpacesDataSourceParam as APISpacesDataSourceParam
+from .indexing_job_create_response import IndexingJobCreateResponse as IndexingJobCreateResponse
from .api_knowledge_base_data_source import APIKnowledgeBaseDataSource as APIKnowledgeBaseDataSource
+from .indexing_job_retrieve_response import IndexingJobRetrieveResponse as IndexingJobRetrieveResponse
from .api_file_upload_data_source_param import APIFileUploadDataSourceParam as APIFileUploadDataSourceParam
from .api_web_crawler_data_source_param import APIWebCrawlerDataSourceParam as APIWebCrawlerDataSourceParam
+from .indexing_job_update_cancel_params import IndexingJobUpdateCancelParams as IndexingJobUpdateCancelParams
+from .indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse as IndexingJobUpdateCancelResponse
+from .indexing_job_retrieve_data_sources_response import (
+ IndexingJobRetrieveDataSourcesResponse as IndexingJobRetrieveDataSourcesResponse,
+)
diff --git a/src/do_gradientai/types/api_indexing_job.py b/src/do_gradientai/types/knowledge_bases/api_indexing_job.py
similarity index 96%
rename from src/do_gradientai/types/api_indexing_job.py
rename to src/do_gradientai/types/knowledge_bases/api_indexing_job.py
index f24aac94..2809141c 100644
--- a/src/do_gradientai/types/api_indexing_job.py
+++ b/src/do_gradientai/types/knowledge_bases/api_indexing_job.py
@@ -4,7 +4,7 @@
from datetime import datetime
from typing_extensions import Literal
-from .._models import BaseModel
+from ..._models import BaseModel
__all__ = ["APIIndexingJob"]
diff --git a/src/do_gradientai/types/knowledge_bases/api_knowledge_base_data_source.py b/src/do_gradientai/types/knowledge_bases/api_knowledge_base_data_source.py
index 57080aaa..ca24d6f0 100644
--- a/src/do_gradientai/types/knowledge_bases/api_knowledge_base_data_source.py
+++ b/src/do_gradientai/types/knowledge_bases/api_knowledge_base_data_source.py
@@ -4,7 +4,7 @@
from datetime import datetime
from ..._models import BaseModel
-from ..api_indexing_job import APIIndexingJob
+from .api_indexing_job import APIIndexingJob
from .api_spaces_data_source import APISpacesDataSource
from .api_file_upload_data_source import APIFileUploadDataSource
from .api_web_crawler_data_source import APIWebCrawlerDataSource
diff --git a/src/do_gradientai/types/indexing_job_create_params.py b/src/do_gradientai/types/knowledge_bases/indexing_job_create_params.py
similarity index 100%
rename from src/do_gradientai/types/indexing_job_create_params.py
rename to src/do_gradientai/types/knowledge_bases/indexing_job_create_params.py
diff --git a/src/do_gradientai/types/indexing_job_create_response.py b/src/do_gradientai/types/knowledge_bases/indexing_job_create_response.py
similarity index 89%
rename from src/do_gradientai/types/indexing_job_create_response.py
rename to src/do_gradientai/types/knowledge_bases/indexing_job_create_response.py
index 839bc83b..835ec60d 100644
--- a/src/do_gradientai/types/indexing_job_create_response.py
+++ b/src/do_gradientai/types/knowledge_bases/indexing_job_create_response.py
@@ -2,7 +2,7 @@
from typing import Optional
-from .._models import BaseModel
+from ..._models import BaseModel
from .api_indexing_job import APIIndexingJob
__all__ = ["IndexingJobCreateResponse"]
diff --git a/src/do_gradientai/types/indexing_job_list_params.py b/src/do_gradientai/types/knowledge_bases/indexing_job_list_params.py
similarity index 100%
rename from src/do_gradientai/types/indexing_job_list_params.py
rename to src/do_gradientai/types/knowledge_bases/indexing_job_list_params.py
diff --git a/src/do_gradientai/types/indexing_job_list_response.py b/src/do_gradientai/types/knowledge_bases/indexing_job_list_response.py
similarity index 77%
rename from src/do_gradientai/types/indexing_job_list_response.py
rename to src/do_gradientai/types/knowledge_bases/indexing_job_list_response.py
index 1379cc55..4784c1a1 100644
--- a/src/do_gradientai/types/indexing_job_list_response.py
+++ b/src/do_gradientai/types/knowledge_bases/indexing_job_list_response.py
@@ -2,10 +2,10 @@
from typing import List, Optional
-from .._models import BaseModel
-from .agents.api_meta import APIMeta
-from .agents.api_links import APILinks
+from ..._models import BaseModel
+from ..agents.api_meta import APIMeta
from .api_indexing_job import APIIndexingJob
+from ..agents.api_links import APILinks
__all__ = ["IndexingJobListResponse"]
diff --git a/src/do_gradientai/types/indexing_job_retrieve_data_sources_response.py b/src/do_gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py
similarity index 97%
rename from src/do_gradientai/types/indexing_job_retrieve_data_sources_response.py
rename to src/do_gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py
index b178b984..a9d0c2c0 100644
--- a/src/do_gradientai/types/indexing_job_retrieve_data_sources_response.py
+++ b/src/do_gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py
@@ -4,7 +4,7 @@
from datetime import datetime
from typing_extensions import Literal
-from .._models import BaseModel
+from ..._models import BaseModel
__all__ = ["IndexingJobRetrieveDataSourcesResponse", "IndexedDataSource"]
diff --git a/src/do_gradientai/types/indexing_job_retrieve_response.py b/src/do_gradientai/types/knowledge_bases/indexing_job_retrieve_response.py
similarity index 89%
rename from src/do_gradientai/types/indexing_job_retrieve_response.py
rename to src/do_gradientai/types/knowledge_bases/indexing_job_retrieve_response.py
index 95f33d7a..6034bdf1 100644
--- a/src/do_gradientai/types/indexing_job_retrieve_response.py
+++ b/src/do_gradientai/types/knowledge_bases/indexing_job_retrieve_response.py
@@ -2,7 +2,7 @@
from typing import Optional
-from .._models import BaseModel
+from ..._models import BaseModel
from .api_indexing_job import APIIndexingJob
__all__ = ["IndexingJobRetrieveResponse"]
diff --git a/src/do_gradientai/types/indexing_job_update_cancel_params.py b/src/do_gradientai/types/knowledge_bases/indexing_job_update_cancel_params.py
similarity index 91%
rename from src/do_gradientai/types/indexing_job_update_cancel_params.py
rename to src/do_gradientai/types/knowledge_bases/indexing_job_update_cancel_params.py
index 4c2848b0..9359a42a 100644
--- a/src/do_gradientai/types/indexing_job_update_cancel_params.py
+++ b/src/do_gradientai/types/knowledge_bases/indexing_job_update_cancel_params.py
@@ -4,7 +4,7 @@
from typing_extensions import Annotated, TypedDict
-from .._utils import PropertyInfo
+from ..._utils import PropertyInfo
__all__ = ["IndexingJobUpdateCancelParams"]
diff --git a/src/do_gradientai/types/indexing_job_update_cancel_response.py b/src/do_gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py
similarity index 90%
rename from src/do_gradientai/types/indexing_job_update_cancel_response.py
rename to src/do_gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py
index d50e1865..ae4b394f 100644
--- a/src/do_gradientai/types/indexing_job_update_cancel_response.py
+++ b/src/do_gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py
@@ -2,7 +2,7 @@
from typing import Optional
-from .._models import BaseModel
+from ..._models import BaseModel
from .api_indexing_job import APIIndexingJob
__all__ = ["IndexingJobUpdateCancelResponse"]
diff --git a/tests/api_resources/test_indexing_jobs.py b/tests/api_resources/knowledge_bases/test_indexing_jobs.py
similarity index 80%
rename from tests/api_resources/test_indexing_jobs.py
rename to tests/api_resources/knowledge_bases/test_indexing_jobs.py
index 41ba0f8c..206339e0 100644
--- a/tests/api_resources/test_indexing_jobs.py
+++ b/tests/api_resources/knowledge_bases/test_indexing_jobs.py
@@ -9,7 +9,7 @@
from tests.utils import assert_matches_type
from do_gradientai import GradientAI, AsyncGradientAI
-from do_gradientai.types import (
+from do_gradientai.types.knowledge_bases import (
IndexingJobListResponse,
IndexingJobCreateResponse,
IndexingJobRetrieveResponse,
@@ -26,13 +26,13 @@ class TestIndexingJobs:
@pytest.mark.skip()
@parametrize
def test_method_create(self, client: GradientAI) -> None:
- indexing_job = client.indexing_jobs.create()
+ indexing_job = client.knowledge_bases.indexing_jobs.create()
assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"])
@pytest.mark.skip()
@parametrize
def test_method_create_with_all_params(self, client: GradientAI) -> None:
- indexing_job = client.indexing_jobs.create(
+ indexing_job = client.knowledge_bases.indexing_jobs.create(
data_source_uuids=["string"],
knowledge_base_uuid="knowledge_base_uuid",
)
@@ -41,7 +41,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_raw_response_create(self, client: GradientAI) -> None:
- response = client.indexing_jobs.with_raw_response.create()
+ response = client.knowledge_bases.indexing_jobs.with_raw_response.create()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -51,7 +51,7 @@ def test_raw_response_create(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_streaming_response_create(self, client: GradientAI) -> None:
- with client.indexing_jobs.with_streaming_response.create() as response:
+ with client.knowledge_bases.indexing_jobs.with_streaming_response.create() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -63,7 +63,7 @@ def test_streaming_response_create(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_method_retrieve(self, client: GradientAI) -> None:
- indexing_job = client.indexing_jobs.retrieve(
+ indexing_job = client.knowledge_bases.indexing_jobs.retrieve(
"uuid",
)
assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"])
@@ -71,7 +71,7 @@ def test_method_retrieve(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_raw_response_retrieve(self, client: GradientAI) -> None:
- response = client.indexing_jobs.with_raw_response.retrieve(
+ response = client.knowledge_bases.indexing_jobs.with_raw_response.retrieve(
"uuid",
)
@@ -83,7 +83,7 @@ def test_raw_response_retrieve(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_streaming_response_retrieve(self, client: GradientAI) -> None:
- with client.indexing_jobs.with_streaming_response.retrieve(
+ with client.knowledge_bases.indexing_jobs.with_streaming_response.retrieve(
"uuid",
) as response:
assert not response.is_closed
@@ -98,20 +98,20 @@ def test_streaming_response_retrieve(self, client: GradientAI) -> None:
@parametrize
def test_path_params_retrieve(self, client: GradientAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
- client.indexing_jobs.with_raw_response.retrieve(
+ client.knowledge_bases.indexing_jobs.with_raw_response.retrieve(
"",
)
@pytest.mark.skip()
@parametrize
def test_method_list(self, client: GradientAI) -> None:
- indexing_job = client.indexing_jobs.list()
+ indexing_job = client.knowledge_bases.indexing_jobs.list()
assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"])
@pytest.mark.skip()
@parametrize
def test_method_list_with_all_params(self, client: GradientAI) -> None:
- indexing_job = client.indexing_jobs.list(
+ indexing_job = client.knowledge_bases.indexing_jobs.list(
page=0,
per_page=0,
)
@@ -120,7 +120,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_raw_response_list(self, client: GradientAI) -> None:
- response = client.indexing_jobs.with_raw_response.list()
+ response = client.knowledge_bases.indexing_jobs.with_raw_response.list()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -130,7 +130,7 @@ def test_raw_response_list(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_streaming_response_list(self, client: GradientAI) -> None:
- with client.indexing_jobs.with_streaming_response.list() as response:
+ with client.knowledge_bases.indexing_jobs.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -142,7 +142,7 @@ def test_streaming_response_list(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_method_retrieve_data_sources(self, client: GradientAI) -> None:
- indexing_job = client.indexing_jobs.retrieve_data_sources(
+ indexing_job = client.knowledge_bases.indexing_jobs.retrieve_data_sources(
"indexing_job_uuid",
)
assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"])
@@ -150,7 +150,7 @@ def test_method_retrieve_data_sources(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_raw_response_retrieve_data_sources(self, client: GradientAI) -> None:
- response = client.indexing_jobs.with_raw_response.retrieve_data_sources(
+ response = client.knowledge_bases.indexing_jobs.with_raw_response.retrieve_data_sources(
"indexing_job_uuid",
)
@@ -162,7 +162,7 @@ def test_raw_response_retrieve_data_sources(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_streaming_response_retrieve_data_sources(self, client: GradientAI) -> None:
- with client.indexing_jobs.with_streaming_response.retrieve_data_sources(
+ with client.knowledge_bases.indexing_jobs.with_streaming_response.retrieve_data_sources(
"indexing_job_uuid",
) as response:
assert not response.is_closed
@@ -177,14 +177,14 @@ def test_streaming_response_retrieve_data_sources(self, client: GradientAI) -> N
@parametrize
def test_path_params_retrieve_data_sources(self, client: GradientAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `indexing_job_uuid` but received ''"):
- client.indexing_jobs.with_raw_response.retrieve_data_sources(
+ client.knowledge_bases.indexing_jobs.with_raw_response.retrieve_data_sources(
"",
)
@pytest.mark.skip()
@parametrize
def test_method_update_cancel(self, client: GradientAI) -> None:
- indexing_job = client.indexing_jobs.update_cancel(
+ indexing_job = client.knowledge_bases.indexing_jobs.update_cancel(
path_uuid="uuid",
)
assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"])
@@ -192,7 +192,7 @@ def test_method_update_cancel(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_method_update_cancel_with_all_params(self, client: GradientAI) -> None:
- indexing_job = client.indexing_jobs.update_cancel(
+ indexing_job = client.knowledge_bases.indexing_jobs.update_cancel(
path_uuid="uuid",
body_uuid="uuid",
)
@@ -201,7 +201,7 @@ def test_method_update_cancel_with_all_params(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_raw_response_update_cancel(self, client: GradientAI) -> None:
- response = client.indexing_jobs.with_raw_response.update_cancel(
+ response = client.knowledge_bases.indexing_jobs.with_raw_response.update_cancel(
path_uuid="uuid",
)
@@ -213,7 +213,7 @@ def test_raw_response_update_cancel(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_streaming_response_update_cancel(self, client: GradientAI) -> None:
- with client.indexing_jobs.with_streaming_response.update_cancel(
+ with client.knowledge_bases.indexing_jobs.with_streaming_response.update_cancel(
path_uuid="uuid",
) as response:
assert not response.is_closed
@@ -228,7 +228,7 @@ def test_streaming_response_update_cancel(self, client: GradientAI) -> None:
@parametrize
def test_path_params_update_cancel(self, client: GradientAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"):
- client.indexing_jobs.with_raw_response.update_cancel(
+ client.knowledge_bases.indexing_jobs.with_raw_response.update_cancel(
path_uuid="",
)
@@ -241,13 +241,13 @@ class TestAsyncIndexingJobs:
@pytest.mark.skip()
@parametrize
async def test_method_create(self, async_client: AsyncGradientAI) -> None:
- indexing_job = await async_client.indexing_jobs.create()
+ indexing_job = await async_client.knowledge_bases.indexing_jobs.create()
assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"])
@pytest.mark.skip()
@parametrize
async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None:
- indexing_job = await async_client.indexing_jobs.create(
+ indexing_job = await async_client.knowledge_bases.indexing_jobs.create(
data_source_uuids=["string"],
knowledge_base_uuid="knowledge_base_uuid",
)
@@ -256,7 +256,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI
@pytest.mark.skip()
@parametrize
async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.indexing_jobs.with_raw_response.create()
+ response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.create()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -266,7 +266,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None:
@pytest.mark.skip()
@parametrize
async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None:
- async with async_client.indexing_jobs.with_streaming_response.create() as response:
+ async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.create() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -278,7 +278,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) ->
@pytest.mark.skip()
@parametrize
async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None:
- indexing_job = await async_client.indexing_jobs.retrieve(
+ indexing_job = await async_client.knowledge_bases.indexing_jobs.retrieve(
"uuid",
)
assert_matches_type(IndexingJobRetrieveResponse, indexing_job, path=["response"])
@@ -286,7 +286,7 @@ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None:
@pytest.mark.skip()
@parametrize
async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.indexing_jobs.with_raw_response.retrieve(
+ response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.retrieve(
"uuid",
)
@@ -298,7 +298,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> Non
@pytest.mark.skip()
@parametrize
async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None:
- async with async_client.indexing_jobs.with_streaming_response.retrieve(
+ async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.retrieve(
"uuid",
) as response:
assert not response.is_closed
@@ -313,20 +313,20 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI)
@parametrize
async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
- await async_client.indexing_jobs.with_raw_response.retrieve(
+ await async_client.knowledge_bases.indexing_jobs.with_raw_response.retrieve(
"",
)
@pytest.mark.skip()
@parametrize
async def test_method_list(self, async_client: AsyncGradientAI) -> None:
- indexing_job = await async_client.indexing_jobs.list()
+ indexing_job = await async_client.knowledge_bases.indexing_jobs.list()
assert_matches_type(IndexingJobListResponse, indexing_job, path=["response"])
@pytest.mark.skip()
@parametrize
async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None:
- indexing_job = await async_client.indexing_jobs.list(
+ indexing_job = await async_client.knowledge_bases.indexing_jobs.list(
page=0,
per_page=0,
)
@@ -335,7 +335,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI)
@pytest.mark.skip()
@parametrize
async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.indexing_jobs.with_raw_response.list()
+ response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.list()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -345,7 +345,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None:
@pytest.mark.skip()
@parametrize
async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None:
- async with async_client.indexing_jobs.with_streaming_response.list() as response:
+ async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -357,7 +357,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N
@pytest.mark.skip()
@parametrize
async def test_method_retrieve_data_sources(self, async_client: AsyncGradientAI) -> None:
- indexing_job = await async_client.indexing_jobs.retrieve_data_sources(
+ indexing_job = await async_client.knowledge_bases.indexing_jobs.retrieve_data_sources(
"indexing_job_uuid",
)
assert_matches_type(IndexingJobRetrieveDataSourcesResponse, indexing_job, path=["response"])
@@ -365,7 +365,7 @@ async def test_method_retrieve_data_sources(self, async_client: AsyncGradientAI)
@pytest.mark.skip()
@parametrize
async def test_raw_response_retrieve_data_sources(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.indexing_jobs.with_raw_response.retrieve_data_sources(
+ response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.retrieve_data_sources(
"indexing_job_uuid",
)
@@ -377,7 +377,7 @@ async def test_raw_response_retrieve_data_sources(self, async_client: AsyncGradi
@pytest.mark.skip()
@parametrize
async def test_streaming_response_retrieve_data_sources(self, async_client: AsyncGradientAI) -> None:
- async with async_client.indexing_jobs.with_streaming_response.retrieve_data_sources(
+ async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.retrieve_data_sources(
"indexing_job_uuid",
) as response:
assert not response.is_closed
@@ -392,14 +392,14 @@ async def test_streaming_response_retrieve_data_sources(self, async_client: Asyn
@parametrize
async def test_path_params_retrieve_data_sources(self, async_client: AsyncGradientAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `indexing_job_uuid` but received ''"):
- await async_client.indexing_jobs.with_raw_response.retrieve_data_sources(
+ await async_client.knowledge_bases.indexing_jobs.with_raw_response.retrieve_data_sources(
"",
)
@pytest.mark.skip()
@parametrize
async def test_method_update_cancel(self, async_client: AsyncGradientAI) -> None:
- indexing_job = await async_client.indexing_jobs.update_cancel(
+ indexing_job = await async_client.knowledge_bases.indexing_jobs.update_cancel(
path_uuid="uuid",
)
assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"])
@@ -407,7 +407,7 @@ async def test_method_update_cancel(self, async_client: AsyncGradientAI) -> None
@pytest.mark.skip()
@parametrize
async def test_method_update_cancel_with_all_params(self, async_client: AsyncGradientAI) -> None:
- indexing_job = await async_client.indexing_jobs.update_cancel(
+ indexing_job = await async_client.knowledge_bases.indexing_jobs.update_cancel(
path_uuid="uuid",
body_uuid="uuid",
)
@@ -416,7 +416,7 @@ async def test_method_update_cancel_with_all_params(self, async_client: AsyncGra
@pytest.mark.skip()
@parametrize
async def test_raw_response_update_cancel(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.indexing_jobs.with_raw_response.update_cancel(
+ response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.update_cancel(
path_uuid="uuid",
)
@@ -428,7 +428,7 @@ async def test_raw_response_update_cancel(self, async_client: AsyncGradientAI) -
@pytest.mark.skip()
@parametrize
async def test_streaming_response_update_cancel(self, async_client: AsyncGradientAI) -> None:
- async with async_client.indexing_jobs.with_streaming_response.update_cancel(
+ async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.update_cancel(
path_uuid="uuid",
) as response:
assert not response.is_closed
@@ -443,6 +443,6 @@ async def test_streaming_response_update_cancel(self, async_client: AsyncGradien
@parametrize
async def test_path_params_update_cancel(self, async_client: AsyncGradientAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_uuid` but received ''"):
- await async_client.indexing_jobs.with_raw_response.update_cancel(
+ await async_client.knowledge_bases.indexing_jobs.with_raw_response.update_cancel(
path_uuid="",
)
From 9a45427678644c34afe9792a2561f394718e64ff Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 25 Jun 2025 06:10:32 +0000
Subject: [PATCH 10/21] feat(api): update via SDK Studio
---
.stats.yml | 2 +-
api.md | 48 ++---
src/do_gradientai/_client.py | 40 ++--
src/do_gradientai/resources/__init__.py | 28 +--
.../resources/agents/__init__.py | 28 +--
src/do_gradientai/resources/agents/agents.py | 40 ++--
.../agents/{child_agents.py => routes.py} | 128 ++++++------
.../__init__.py | 26 +--
.../anthropic/__init__.py | 0
.../anthropic/anthropic.py | 0
.../anthropic/keys.py | 19 +-
.../model_providers.py} | 62 +++---
.../openai/__init__.py | 0
.../openai/keys.py | 19 +-
.../openai/openai.py | 0
src/do_gradientai/types/agents/__init__.py | 12 +-
...gent_add_params.py => route_add_params.py} | 4 +-
..._add_response.py => route_add_response.py} | 4 +-
...e_response.py => route_delete_response.py} | 4 +-
...pdate_params.py => route_update_params.py} | 4 +-
...e_response.py => route_update_response.py} | 4 +-
...iew_response.py => route_view_response.py} | 4 +-
.../__init__.py | 0
.../anthropic/__init__.py | 0
.../anthropic/key_create_params.py | 0
.../anthropic/key_create_response.py | 0
.../anthropic/key_delete_response.py | 0
.../anthropic/key_list_agents_params.py | 0
.../anthropic/key_list_agents_response.py | 0
.../anthropic/key_list_params.py | 0
.../anthropic/key_list_response.py | 0
.../anthropic/key_retrieve_response.py | 0
.../anthropic/key_update_params.py | 0
.../anthropic/key_update_response.py | 0
.../openai/__init__.py | 0
.../openai/key_create_params.py | 0
.../openai/key_create_response.py | 0
.../openai/key_delete_response.py | 0
.../openai/key_list_params.py | 0
.../openai/key_list_response.py | 0
.../openai/key_retrieve_agents_params.py | 0
.../openai/key_retrieve_agents_response.py | 0
.../openai/key_retrieve_response.py | 0
.../openai/key_update_params.py | 0
.../openai/key_update_response.py | 0
.../{test_child_agents.py => test_routes.py} | 184 +++++++++---------
.../__init__.py | 0
.../anthropic/__init__.py | 0
.../anthropic/test_keys.py | 106 +++++-----
.../openai/__init__.py | 0
.../openai/test_keys.py | 106 +++++-----
51 files changed, 441 insertions(+), 431 deletions(-)
rename src/do_gradientai/resources/agents/{child_agents.py => routes.py} (86%)
rename src/do_gradientai/resources/{providers => model_providers}/__init__.py (65%)
rename src/do_gradientai/resources/{providers => model_providers}/anthropic/__init__.py (100%)
rename src/do_gradientai/resources/{providers => model_providers}/anthropic/anthropic.py (100%)
rename src/do_gradientai/resources/{providers => model_providers}/anthropic/keys.py (97%)
rename src/do_gradientai/resources/{providers/providers.py => model_providers/model_providers.py} (61%)
rename src/do_gradientai/resources/{providers => model_providers}/openai/__init__.py (100%)
rename src/do_gradientai/resources/{providers => model_providers}/openai/keys.py (97%)
rename src/do_gradientai/resources/{providers => model_providers}/openai/openai.py (100%)
rename src/do_gradientai/types/agents/{child_agent_add_params.py => route_add_params.py} (87%)
rename src/do_gradientai/types/agents/{child_agent_add_response.py => route_add_response.py} (79%)
rename src/do_gradientai/types/agents/{child_agent_delete_response.py => route_delete_response.py} (74%)
rename src/do_gradientai/types/agents/{child_agent_update_params.py => route_update_params.py} (86%)
rename src/do_gradientai/types/agents/{child_agent_update_response.py => route_update_response.py} (81%)
rename src/do_gradientai/types/agents/{child_agent_view_response.py => route_view_response.py} (78%)
rename src/do_gradientai/types/{providers => model_providers}/__init__.py (100%)
rename src/do_gradientai/types/{providers => model_providers}/anthropic/__init__.py (100%)
rename src/do_gradientai/types/{providers => model_providers}/anthropic/key_create_params.py (100%)
rename src/do_gradientai/types/{providers => model_providers}/anthropic/key_create_response.py (100%)
rename src/do_gradientai/types/{providers => model_providers}/anthropic/key_delete_response.py (100%)
rename src/do_gradientai/types/{providers => model_providers}/anthropic/key_list_agents_params.py (100%)
rename src/do_gradientai/types/{providers => model_providers}/anthropic/key_list_agents_response.py (100%)
rename src/do_gradientai/types/{providers => model_providers}/anthropic/key_list_params.py (100%)
rename src/do_gradientai/types/{providers => model_providers}/anthropic/key_list_response.py (100%)
rename src/do_gradientai/types/{providers => model_providers}/anthropic/key_retrieve_response.py (100%)
rename src/do_gradientai/types/{providers => model_providers}/anthropic/key_update_params.py (100%)
rename src/do_gradientai/types/{providers => model_providers}/anthropic/key_update_response.py (100%)
rename src/do_gradientai/types/{providers => model_providers}/openai/__init__.py (100%)
rename src/do_gradientai/types/{providers => model_providers}/openai/key_create_params.py (100%)
rename src/do_gradientai/types/{providers => model_providers}/openai/key_create_response.py (100%)
rename src/do_gradientai/types/{providers => model_providers}/openai/key_delete_response.py (100%)
rename src/do_gradientai/types/{providers => model_providers}/openai/key_list_params.py (100%)
rename src/do_gradientai/types/{providers => model_providers}/openai/key_list_response.py (100%)
rename src/do_gradientai/types/{providers => model_providers}/openai/key_retrieve_agents_params.py (100%)
rename src/do_gradientai/types/{providers => model_providers}/openai/key_retrieve_agents_response.py (100%)
rename src/do_gradientai/types/{providers => model_providers}/openai/key_retrieve_response.py (100%)
rename src/do_gradientai/types/{providers => model_providers}/openai/key_update_params.py (100%)
rename src/do_gradientai/types/{providers => model_providers}/openai/key_update_response.py (100%)
rename tests/api_resources/agents/{test_child_agents.py => test_routes.py} (69%)
rename tests/api_resources/{providers => model_providers}/__init__.py (100%)
rename tests/api_resources/{providers => model_providers}/anthropic/__init__.py (100%)
rename tests/api_resources/{providers => model_providers}/anthropic/test_keys.py (80%)
rename tests/api_resources/{providers => model_providers}/openai/__init__.py (100%)
rename tests/api_resources/{providers => model_providers}/openai/test_keys.py (81%)
diff --git a/.stats.yml b/.stats.yml
index 3dd4e641..d58c3c34 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 67
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml
openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667
-config_hash: 2529d2f80a3d70107331426b594b7f9b
+config_hash: a5bfbbd032355b26ddd41d659c93495b
diff --git a/api.md b/api.md
index b67c57a4..686761f9 100644
--- a/api.md
+++ b/api.md
@@ -171,27 +171,27 @@ Methods:
- client.agents.knowledge_bases.attach_single(knowledge_base_uuid, \*, agent_uuid) -> APILinkKnowledgeBaseOutput
- client.agents.knowledge_bases.detach(knowledge_base_uuid, \*, agent_uuid) -> KnowledgeBaseDetachResponse
-## ChildAgents
+## Routes
Types:
```python
from do_gradientai.types.agents import (
- ChildAgentUpdateResponse,
- ChildAgentDeleteResponse,
- ChildAgentAddResponse,
- ChildAgentViewResponse,
+ RouteUpdateResponse,
+ RouteDeleteResponse,
+ RouteAddResponse,
+ RouteViewResponse,
)
```
Methods:
-- client.agents.child_agents.update(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> ChildAgentUpdateResponse
-- client.agents.child_agents.delete(child_agent_uuid, \*, parent_agent_uuid) -> ChildAgentDeleteResponse
-- client.agents.child_agents.add(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> ChildAgentAddResponse
-- client.agents.child_agents.view(uuid) -> ChildAgentViewResponse
+- client.agents.routes.update(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> RouteUpdateResponse
+- client.agents.routes.delete(child_agent_uuid, \*, parent_agent_uuid) -> RouteDeleteResponse
+- client.agents.routes.add(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> RouteAddResponse
+- client.agents.routes.view(uuid) -> RouteViewResponse
-# Providers
+# ModelProviders
## Anthropic
@@ -200,7 +200,7 @@ Methods:
Types:
```python
-from do_gradientai.types.providers.anthropic import (
+from do_gradientai.types.model_providers.anthropic import (
KeyCreateResponse,
KeyRetrieveResponse,
KeyUpdateResponse,
@@ -212,12 +212,12 @@ from do_gradientai.types.providers.anthropic import (
Methods:
-- client.providers.anthropic.keys.create(\*\*params) -> KeyCreateResponse
-- client.providers.anthropic.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse
-- client.providers.anthropic.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse
-- client.providers.anthropic.keys.list(\*\*params) -> KeyListResponse
-- client.providers.anthropic.keys.delete(api_key_uuid) -> KeyDeleteResponse
-- client.providers.anthropic.keys.list_agents(uuid, \*\*params) -> KeyListAgentsResponse
+- client.model_providers.anthropic.keys.create(\*\*params) -> KeyCreateResponse
+- client.model_providers.anthropic.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse
+- client.model_providers.anthropic.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse
+- client.model_providers.anthropic.keys.list(\*\*params) -> KeyListResponse
+- client.model_providers.anthropic.keys.delete(api_key_uuid) -> KeyDeleteResponse
+- client.model_providers.anthropic.keys.list_agents(uuid, \*\*params) -> KeyListAgentsResponse
## OpenAI
@@ -226,7 +226,7 @@ Methods:
Types:
```python
-from do_gradientai.types.providers.openai import (
+from do_gradientai.types.model_providers.openai import (
KeyCreateResponse,
KeyRetrieveResponse,
KeyUpdateResponse,
@@ -238,12 +238,12 @@ from do_gradientai.types.providers.openai import (
Methods:
-- client.providers.openai.keys.create(\*\*params) -> KeyCreateResponse
-- client.providers.openai.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse
-- client.providers.openai.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse
-- client.providers.openai.keys.list(\*\*params) -> KeyListResponse
-- client.providers.openai.keys.delete(api_key_uuid) -> KeyDeleteResponse
-- client.providers.openai.keys.retrieve_agents(uuid, \*\*params) -> KeyRetrieveAgentsResponse
+- client.model_providers.openai.keys.create(\*\*params) -> KeyCreateResponse
+- client.model_providers.openai.keys.retrieve(api_key_uuid) -> KeyRetrieveResponse
+- client.model_providers.openai.keys.update(path_api_key_uuid, \*\*params) -> KeyUpdateResponse
+- client.model_providers.openai.keys.list(\*\*params) -> KeyListResponse
+- client.model_providers.openai.keys.delete(api_key_uuid) -> KeyDeleteResponse
+- client.model_providers.openai.keys.retrieve_agents(uuid, \*\*params) -> KeyRetrieveAgentsResponse
# Regions
diff --git a/src/do_gradientai/_client.py b/src/do_gradientai/_client.py
index b8d55962..a57125ee 100644
--- a/src/do_gradientai/_client.py
+++ b/src/do_gradientai/_client.py
@@ -31,14 +31,14 @@
)
if TYPE_CHECKING:
- from .resources import chat, agents, models, regions, inference, providers, knowledge_bases
+ from .resources import chat, agents, models, regions, inference, knowledge_bases, model_providers
from .resources.models import ModelsResource, AsyncModelsResource
from .resources.regions import RegionsResource, AsyncRegionsResource
from .resources.chat.chat import ChatResource, AsyncChatResource
from .resources.agents.agents import AgentsResource, AsyncAgentsResource
from .resources.inference.inference import InferenceResource, AsyncInferenceResource
- from .resources.providers.providers import ProvidersResource, AsyncProvidersResource
from .resources.knowledge_bases.knowledge_bases import KnowledgeBasesResource, AsyncKnowledgeBasesResource
+ from .resources.model_providers.model_providers import ModelProvidersResource, AsyncModelProvidersResource
__all__ = [
"Timeout",
@@ -115,10 +115,10 @@ def agents(self) -> AgentsResource:
return AgentsResource(self)
@cached_property
- def providers(self) -> ProvidersResource:
- from .resources.providers import ProvidersResource
+ def model_providers(self) -> ModelProvidersResource:
+ from .resources.model_providers import ModelProvidersResource
- return ProvidersResource(self)
+ return ModelProvidersResource(self)
@cached_property
def regions(self) -> RegionsResource:
@@ -328,10 +328,10 @@ def agents(self) -> AsyncAgentsResource:
return AsyncAgentsResource(self)
@cached_property
- def providers(self) -> AsyncProvidersResource:
- from .resources.providers import AsyncProvidersResource
+ def model_providers(self) -> AsyncModelProvidersResource:
+ from .resources.model_providers import AsyncModelProvidersResource
- return AsyncProvidersResource(self)
+ return AsyncModelProvidersResource(self)
@cached_property
def regions(self) -> AsyncRegionsResource:
@@ -491,10 +491,10 @@ def agents(self) -> agents.AgentsResourceWithRawResponse:
return AgentsResourceWithRawResponse(self._client.agents)
@cached_property
- def providers(self) -> providers.ProvidersResourceWithRawResponse:
- from .resources.providers import ProvidersResourceWithRawResponse
+ def model_providers(self) -> model_providers.ModelProvidersResourceWithRawResponse:
+ from .resources.model_providers import ModelProvidersResourceWithRawResponse
- return ProvidersResourceWithRawResponse(self._client.providers)
+ return ModelProvidersResourceWithRawResponse(self._client.model_providers)
@cached_property
def regions(self) -> regions.RegionsResourceWithRawResponse:
@@ -540,10 +540,10 @@ def agents(self) -> agents.AsyncAgentsResourceWithRawResponse:
return AsyncAgentsResourceWithRawResponse(self._client.agents)
@cached_property
- def providers(self) -> providers.AsyncProvidersResourceWithRawResponse:
- from .resources.providers import AsyncProvidersResourceWithRawResponse
+ def model_providers(self) -> model_providers.AsyncModelProvidersResourceWithRawResponse:
+ from .resources.model_providers import AsyncModelProvidersResourceWithRawResponse
- return AsyncProvidersResourceWithRawResponse(self._client.providers)
+ return AsyncModelProvidersResourceWithRawResponse(self._client.model_providers)
@cached_property
def regions(self) -> regions.AsyncRegionsResourceWithRawResponse:
@@ -589,10 +589,10 @@ def agents(self) -> agents.AgentsResourceWithStreamingResponse:
return AgentsResourceWithStreamingResponse(self._client.agents)
@cached_property
- def providers(self) -> providers.ProvidersResourceWithStreamingResponse:
- from .resources.providers import ProvidersResourceWithStreamingResponse
+ def model_providers(self) -> model_providers.ModelProvidersResourceWithStreamingResponse:
+ from .resources.model_providers import ModelProvidersResourceWithStreamingResponse
- return ProvidersResourceWithStreamingResponse(self._client.providers)
+ return ModelProvidersResourceWithStreamingResponse(self._client.model_providers)
@cached_property
def regions(self) -> regions.RegionsResourceWithStreamingResponse:
@@ -638,10 +638,10 @@ def agents(self) -> agents.AsyncAgentsResourceWithStreamingResponse:
return AsyncAgentsResourceWithStreamingResponse(self._client.agents)
@cached_property
- def providers(self) -> providers.AsyncProvidersResourceWithStreamingResponse:
- from .resources.providers import AsyncProvidersResourceWithStreamingResponse
+ def model_providers(self) -> model_providers.AsyncModelProvidersResourceWithStreamingResponse:
+ from .resources.model_providers import AsyncModelProvidersResourceWithStreamingResponse
- return AsyncProvidersResourceWithStreamingResponse(self._client.providers)
+ return AsyncModelProvidersResourceWithStreamingResponse(self._client.model_providers)
@cached_property
def regions(self) -> regions.AsyncRegionsResourceWithStreamingResponse:
diff --git a/src/do_gradientai/resources/__init__.py b/src/do_gradientai/resources/__init__.py
index 6ad0aa32..785bf1ac 100644
--- a/src/do_gradientai/resources/__init__.py
+++ b/src/do_gradientai/resources/__init__.py
@@ -40,14 +40,6 @@
InferenceResourceWithStreamingResponse,
AsyncInferenceResourceWithStreamingResponse,
)
-from .providers import (
- ProvidersResource,
- AsyncProvidersResource,
- ProvidersResourceWithRawResponse,
- AsyncProvidersResourceWithRawResponse,
- ProvidersResourceWithStreamingResponse,
- AsyncProvidersResourceWithStreamingResponse,
-)
from .knowledge_bases import (
KnowledgeBasesResource,
AsyncKnowledgeBasesResource,
@@ -56,6 +48,14 @@
KnowledgeBasesResourceWithStreamingResponse,
AsyncKnowledgeBasesResourceWithStreamingResponse,
)
+from .model_providers import (
+ ModelProvidersResource,
+ AsyncModelProvidersResource,
+ ModelProvidersResourceWithRawResponse,
+ AsyncModelProvidersResourceWithRawResponse,
+ ModelProvidersResourceWithStreamingResponse,
+ AsyncModelProvidersResourceWithStreamingResponse,
+)
__all__ = [
"AgentsResource",
@@ -64,12 +64,12 @@
"AsyncAgentsResourceWithRawResponse",
"AgentsResourceWithStreamingResponse",
"AsyncAgentsResourceWithStreamingResponse",
- "ProvidersResource",
- "AsyncProvidersResource",
- "ProvidersResourceWithRawResponse",
- "AsyncProvidersResourceWithRawResponse",
- "ProvidersResourceWithStreamingResponse",
- "AsyncProvidersResourceWithStreamingResponse",
+ "ModelProvidersResource",
+ "AsyncModelProvidersResource",
+ "ModelProvidersResourceWithRawResponse",
+ "AsyncModelProvidersResourceWithRawResponse",
+ "ModelProvidersResourceWithStreamingResponse",
+ "AsyncModelProvidersResourceWithStreamingResponse",
"RegionsResource",
"AsyncRegionsResource",
"RegionsResourceWithRawResponse",
diff --git a/src/do_gradientai/resources/agents/__init__.py b/src/do_gradientai/resources/agents/__init__.py
index 3eb9cde8..f5423f00 100644
--- a/src/do_gradientai/resources/agents/__init__.py
+++ b/src/do_gradientai/resources/agents/__init__.py
@@ -8,6 +8,14 @@
AgentsResourceWithStreamingResponse,
AsyncAgentsResourceWithStreamingResponse,
)
+from .routes import (
+ RoutesResource,
+ AsyncRoutesResource,
+ RoutesResourceWithRawResponse,
+ AsyncRoutesResourceWithRawResponse,
+ RoutesResourceWithStreamingResponse,
+ AsyncRoutesResourceWithStreamingResponse,
+)
from .api_keys import (
APIKeysResource,
AsyncAPIKeysResource,
@@ -32,14 +40,6 @@
FunctionsResourceWithStreamingResponse,
AsyncFunctionsResourceWithStreamingResponse,
)
-from .child_agents import (
- ChildAgentsResource,
- AsyncChildAgentsResource,
- ChildAgentsResourceWithRawResponse,
- AsyncChildAgentsResourceWithRawResponse,
- ChildAgentsResourceWithStreamingResponse,
- AsyncChildAgentsResourceWithStreamingResponse,
-)
from .evaluation_runs import (
EvaluationRunsResource,
AsyncEvaluationRunsResource,
@@ -130,12 +130,12 @@
"AsyncKnowledgeBasesResourceWithRawResponse",
"KnowledgeBasesResourceWithStreamingResponse",
"AsyncKnowledgeBasesResourceWithStreamingResponse",
- "ChildAgentsResource",
- "AsyncChildAgentsResource",
- "ChildAgentsResourceWithRawResponse",
- "AsyncChildAgentsResourceWithRawResponse",
- "ChildAgentsResourceWithStreamingResponse",
- "AsyncChildAgentsResourceWithStreamingResponse",
+ "RoutesResource",
+ "AsyncRoutesResource",
+ "RoutesResourceWithRawResponse",
+ "AsyncRoutesResourceWithRawResponse",
+ "RoutesResourceWithStreamingResponse",
+ "AsyncRoutesResourceWithStreamingResponse",
"AgentsResource",
"AsyncAgentsResource",
"AgentsResourceWithRawResponse",
diff --git a/src/do_gradientai/resources/agents/agents.py b/src/do_gradientai/resources/agents/agents.py
index 6bb39894..5762139d 100644
--- a/src/do_gradientai/resources/agents/agents.py
+++ b/src/do_gradientai/resources/agents/agents.py
@@ -6,6 +6,14 @@
import httpx
+from .routes import (
+ RoutesResource,
+ AsyncRoutesResource,
+ RoutesResourceWithRawResponse,
+ AsyncRoutesResourceWithRawResponse,
+ RoutesResourceWithStreamingResponse,
+ AsyncRoutesResourceWithStreamingResponse,
+)
from ...types import (
APIRetrievalMethod,
APIDeploymentVisibility,
@@ -48,14 +56,6 @@
async_to_raw_response_wrapper,
async_to_streamed_response_wrapper,
)
-from .child_agents import (
- ChildAgentsResource,
- AsyncChildAgentsResource,
- ChildAgentsResourceWithRawResponse,
- AsyncChildAgentsResourceWithRawResponse,
- ChildAgentsResourceWithStreamingResponse,
- AsyncChildAgentsResourceWithStreamingResponse,
-)
from ..._base_client import make_request_options
from .evaluation_runs import (
EvaluationRunsResource,
@@ -143,8 +143,8 @@ def knowledge_bases(self) -> KnowledgeBasesResource:
return KnowledgeBasesResource(self._client)
@cached_property
- def child_agents(self) -> ChildAgentsResource:
- return ChildAgentsResource(self._client)
+ def routes(self) -> RoutesResource:
+ return RoutesResource(self._client)
@cached_property
def with_raw_response(self) -> AgentsResourceWithRawResponse:
@@ -527,8 +527,8 @@ def knowledge_bases(self) -> AsyncKnowledgeBasesResource:
return AsyncKnowledgeBasesResource(self._client)
@cached_property
- def child_agents(self) -> AsyncChildAgentsResource:
- return AsyncChildAgentsResource(self._client)
+ def routes(self) -> AsyncRoutesResource:
+ return AsyncRoutesResource(self._client)
@cached_property
def with_raw_response(self) -> AsyncAgentsResourceWithRawResponse:
@@ -933,8 +933,8 @@ def knowledge_bases(self) -> KnowledgeBasesResourceWithRawResponse:
return KnowledgeBasesResourceWithRawResponse(self._agents.knowledge_bases)
@cached_property
- def child_agents(self) -> ChildAgentsResourceWithRawResponse:
- return ChildAgentsResourceWithRawResponse(self._agents.child_agents)
+ def routes(self) -> RoutesResourceWithRawResponse:
+ return RoutesResourceWithRawResponse(self._agents.routes)
class AsyncAgentsResourceWithRawResponse:
@@ -993,8 +993,8 @@ def knowledge_bases(self) -> AsyncKnowledgeBasesResourceWithRawResponse:
return AsyncKnowledgeBasesResourceWithRawResponse(self._agents.knowledge_bases)
@cached_property
- def child_agents(self) -> AsyncChildAgentsResourceWithRawResponse:
- return AsyncChildAgentsResourceWithRawResponse(self._agents.child_agents)
+ def routes(self) -> AsyncRoutesResourceWithRawResponse:
+ return AsyncRoutesResourceWithRawResponse(self._agents.routes)
class AgentsResourceWithStreamingResponse:
@@ -1053,8 +1053,8 @@ def knowledge_bases(self) -> KnowledgeBasesResourceWithStreamingResponse:
return KnowledgeBasesResourceWithStreamingResponse(self._agents.knowledge_bases)
@cached_property
- def child_agents(self) -> ChildAgentsResourceWithStreamingResponse:
- return ChildAgentsResourceWithStreamingResponse(self._agents.child_agents)
+ def routes(self) -> RoutesResourceWithStreamingResponse:
+ return RoutesResourceWithStreamingResponse(self._agents.routes)
class AsyncAgentsResourceWithStreamingResponse:
@@ -1113,5 +1113,5 @@ def knowledge_bases(self) -> AsyncKnowledgeBasesResourceWithStreamingResponse:
return AsyncKnowledgeBasesResourceWithStreamingResponse(self._agents.knowledge_bases)
@cached_property
- def child_agents(self) -> AsyncChildAgentsResourceWithStreamingResponse:
- return AsyncChildAgentsResourceWithStreamingResponse(self._agents.child_agents)
+ def routes(self) -> AsyncRoutesResourceWithStreamingResponse:
+ return AsyncRoutesResourceWithStreamingResponse(self._agents.routes)
diff --git a/src/do_gradientai/resources/agents/child_agents.py b/src/do_gradientai/resources/agents/routes.py
similarity index 86%
rename from src/do_gradientai/resources/agents/child_agents.py
rename to src/do_gradientai/resources/agents/routes.py
index ad30f106..ed25d795 100644
--- a/src/do_gradientai/resources/agents/child_agents.py
+++ b/src/do_gradientai/resources/agents/routes.py
@@ -15,34 +15,34 @@
async_to_streamed_response_wrapper,
)
from ..._base_client import make_request_options
-from ...types.agents import child_agent_add_params, child_agent_update_params
-from ...types.agents.child_agent_add_response import ChildAgentAddResponse
-from ...types.agents.child_agent_view_response import ChildAgentViewResponse
-from ...types.agents.child_agent_delete_response import ChildAgentDeleteResponse
-from ...types.agents.child_agent_update_response import ChildAgentUpdateResponse
+from ...types.agents import route_add_params, route_update_params
+from ...types.agents.route_add_response import RouteAddResponse
+from ...types.agents.route_view_response import RouteViewResponse
+from ...types.agents.route_delete_response import RouteDeleteResponse
+from ...types.agents.route_update_response import RouteUpdateResponse
-__all__ = ["ChildAgentsResource", "AsyncChildAgentsResource"]
+__all__ = ["RoutesResource", "AsyncRoutesResource"]
-class ChildAgentsResource(SyncAPIResource):
+class RoutesResource(SyncAPIResource):
@cached_property
- def with_raw_response(self) -> ChildAgentsResourceWithRawResponse:
+ def with_raw_response(self) -> RoutesResourceWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
"""
- return ChildAgentsResourceWithRawResponse(self)
+ return RoutesResourceWithRawResponse(self)
@cached_property
- def with_streaming_response(self) -> ChildAgentsResourceWithStreamingResponse:
+ def with_streaming_response(self) -> RoutesResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
"""
- return ChildAgentsResourceWithStreamingResponse(self)
+ return RoutesResourceWithStreamingResponse(self)
def update(
self,
@@ -60,7 +60,7 @@ def update(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ChildAgentUpdateResponse:
+ ) -> RouteUpdateResponse:
"""
To update an agent route for an agent, send a PUT request to
`/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`.
@@ -96,12 +96,12 @@ def update(
"route_name": route_name,
"uuid": uuid,
},
- child_agent_update_params.ChildAgentUpdateParams,
+ route_update_params.RouteUpdateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=ChildAgentUpdateResponse,
+ cast_to=RouteUpdateResponse,
)
def delete(
@@ -115,7 +115,7 @@ def delete(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ChildAgentDeleteResponse:
+ ) -> RouteDeleteResponse:
"""
To delete an agent route from a parent agent, send a DELETE request to
`/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`.
@@ -140,7 +140,7 @@ def delete(
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=ChildAgentDeleteResponse,
+ cast_to=RouteDeleteResponse,
)
def add(
@@ -158,7 +158,7 @@ def add(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ChildAgentAddResponse:
+ ) -> RouteAddResponse:
"""
To add an agent route to an agent, send a POST request to
`/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`.
@@ -193,12 +193,12 @@ def add(
"body_parent_agent_uuid": body_parent_agent_uuid,
"route_name": route_name,
},
- child_agent_add_params.ChildAgentAddParams,
+ route_add_params.RouteAddParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=ChildAgentAddResponse,
+ cast_to=RouteAddResponse,
)
def view(
@@ -211,7 +211,7 @@ def view(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ChildAgentViewResponse:
+ ) -> RouteViewResponse:
"""
To view agent routes for an agent, send a GET requtest to
`/v2/gen-ai/agents/{uuid}/child_agents`.
@@ -234,29 +234,29 @@ def view(
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=ChildAgentViewResponse,
+ cast_to=RouteViewResponse,
)
-class AsyncChildAgentsResource(AsyncAPIResource):
+class AsyncRoutesResource(AsyncAPIResource):
@cached_property
- def with_raw_response(self) -> AsyncChildAgentsResourceWithRawResponse:
+ def with_raw_response(self) -> AsyncRoutesResourceWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
"""
- return AsyncChildAgentsResourceWithRawResponse(self)
+ return AsyncRoutesResourceWithRawResponse(self)
@cached_property
- def with_streaming_response(self) -> AsyncChildAgentsResourceWithStreamingResponse:
+ def with_streaming_response(self) -> AsyncRoutesResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
"""
- return AsyncChildAgentsResourceWithStreamingResponse(self)
+ return AsyncRoutesResourceWithStreamingResponse(self)
async def update(
self,
@@ -274,7 +274,7 @@ async def update(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ChildAgentUpdateResponse:
+ ) -> RouteUpdateResponse:
"""
To update an agent route for an agent, send a PUT request to
`/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`.
@@ -310,12 +310,12 @@ async def update(
"route_name": route_name,
"uuid": uuid,
},
- child_agent_update_params.ChildAgentUpdateParams,
+ route_update_params.RouteUpdateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=ChildAgentUpdateResponse,
+ cast_to=RouteUpdateResponse,
)
async def delete(
@@ -329,7 +329,7 @@ async def delete(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ChildAgentDeleteResponse:
+ ) -> RouteDeleteResponse:
"""
To delete an agent route from a parent agent, send a DELETE request to
`/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`.
@@ -354,7 +354,7 @@ async def delete(
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=ChildAgentDeleteResponse,
+ cast_to=RouteDeleteResponse,
)
async def add(
@@ -372,7 +372,7 @@ async def add(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ChildAgentAddResponse:
+ ) -> RouteAddResponse:
"""
To add an agent route to an agent, send a POST request to
`/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`.
@@ -407,12 +407,12 @@ async def add(
"body_parent_agent_uuid": body_parent_agent_uuid,
"route_name": route_name,
},
- child_agent_add_params.ChildAgentAddParams,
+ route_add_params.RouteAddParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=ChildAgentAddResponse,
+ cast_to=RouteAddResponse,
)
async def view(
@@ -425,7 +425,7 @@ async def view(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ChildAgentViewResponse:
+ ) -> RouteViewResponse:
"""
To view agent routes for an agent, send a GET requtest to
`/v2/gen-ai/agents/{uuid}/child_agents`.
@@ -448,77 +448,77 @@ async def view(
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=ChildAgentViewResponse,
+ cast_to=RouteViewResponse,
)
-class ChildAgentsResourceWithRawResponse:
- def __init__(self, child_agents: ChildAgentsResource) -> None:
- self._child_agents = child_agents
+class RoutesResourceWithRawResponse:
+ def __init__(self, routes: RoutesResource) -> None:
+ self._routes = routes
self.update = to_raw_response_wrapper(
- child_agents.update,
+ routes.update,
)
self.delete = to_raw_response_wrapper(
- child_agents.delete,
+ routes.delete,
)
self.add = to_raw_response_wrapper(
- child_agents.add,
+ routes.add,
)
self.view = to_raw_response_wrapper(
- child_agents.view,
+ routes.view,
)
-class AsyncChildAgentsResourceWithRawResponse:
- def __init__(self, child_agents: AsyncChildAgentsResource) -> None:
- self._child_agents = child_agents
+class AsyncRoutesResourceWithRawResponse:
+ def __init__(self, routes: AsyncRoutesResource) -> None:
+ self._routes = routes
self.update = async_to_raw_response_wrapper(
- child_agents.update,
+ routes.update,
)
self.delete = async_to_raw_response_wrapper(
- child_agents.delete,
+ routes.delete,
)
self.add = async_to_raw_response_wrapper(
- child_agents.add,
+ routes.add,
)
self.view = async_to_raw_response_wrapper(
- child_agents.view,
+ routes.view,
)
-class ChildAgentsResourceWithStreamingResponse:
- def __init__(self, child_agents: ChildAgentsResource) -> None:
- self._child_agents = child_agents
+class RoutesResourceWithStreamingResponse:
+ def __init__(self, routes: RoutesResource) -> None:
+ self._routes = routes
self.update = to_streamed_response_wrapper(
- child_agents.update,
+ routes.update,
)
self.delete = to_streamed_response_wrapper(
- child_agents.delete,
+ routes.delete,
)
self.add = to_streamed_response_wrapper(
- child_agents.add,
+ routes.add,
)
self.view = to_streamed_response_wrapper(
- child_agents.view,
+ routes.view,
)
-class AsyncChildAgentsResourceWithStreamingResponse:
- def __init__(self, child_agents: AsyncChildAgentsResource) -> None:
- self._child_agents = child_agents
+class AsyncRoutesResourceWithStreamingResponse:
+ def __init__(self, routes: AsyncRoutesResource) -> None:
+ self._routes = routes
self.update = async_to_streamed_response_wrapper(
- child_agents.update,
+ routes.update,
)
self.delete = async_to_streamed_response_wrapper(
- child_agents.delete,
+ routes.delete,
)
self.add = async_to_streamed_response_wrapper(
- child_agents.add,
+ routes.add,
)
self.view = async_to_streamed_response_wrapper(
- child_agents.view,
+ routes.view,
)
diff --git a/src/do_gradientai/resources/providers/__init__.py b/src/do_gradientai/resources/model_providers/__init__.py
similarity index 65%
rename from src/do_gradientai/resources/providers/__init__.py
rename to src/do_gradientai/resources/model_providers/__init__.py
index 1731e057..3d91a86c 100644
--- a/src/do_gradientai/resources/providers/__init__.py
+++ b/src/do_gradientai/resources/model_providers/__init__.py
@@ -16,13 +16,13 @@
AnthropicResourceWithStreamingResponse,
AsyncAnthropicResourceWithStreamingResponse,
)
-from .providers import (
- ProvidersResource,
- AsyncProvidersResource,
- ProvidersResourceWithRawResponse,
- AsyncProvidersResourceWithRawResponse,
- ProvidersResourceWithStreamingResponse,
- AsyncProvidersResourceWithStreamingResponse,
+from .model_providers import (
+ ModelProvidersResource,
+ AsyncModelProvidersResource,
+ ModelProvidersResourceWithRawResponse,
+ AsyncModelProvidersResourceWithRawResponse,
+ ModelProvidersResourceWithStreamingResponse,
+ AsyncModelProvidersResourceWithStreamingResponse,
)
__all__ = [
@@ -38,10 +38,10 @@
"AsyncOpenAIResourceWithRawResponse",
"OpenAIResourceWithStreamingResponse",
"AsyncOpenAIResourceWithStreamingResponse",
- "ProvidersResource",
- "AsyncProvidersResource",
- "ProvidersResourceWithRawResponse",
- "AsyncProvidersResourceWithRawResponse",
- "ProvidersResourceWithStreamingResponse",
- "AsyncProvidersResourceWithStreamingResponse",
+ "ModelProvidersResource",
+ "AsyncModelProvidersResource",
+ "ModelProvidersResourceWithRawResponse",
+ "AsyncModelProvidersResourceWithRawResponse",
+ "ModelProvidersResourceWithStreamingResponse",
+ "AsyncModelProvidersResourceWithStreamingResponse",
]
diff --git a/src/do_gradientai/resources/providers/anthropic/__init__.py b/src/do_gradientai/resources/model_providers/anthropic/__init__.py
similarity index 100%
rename from src/do_gradientai/resources/providers/anthropic/__init__.py
rename to src/do_gradientai/resources/model_providers/anthropic/__init__.py
diff --git a/src/do_gradientai/resources/providers/anthropic/anthropic.py b/src/do_gradientai/resources/model_providers/anthropic/anthropic.py
similarity index 100%
rename from src/do_gradientai/resources/providers/anthropic/anthropic.py
rename to src/do_gradientai/resources/model_providers/anthropic/anthropic.py
diff --git a/src/do_gradientai/resources/providers/anthropic/keys.py b/src/do_gradientai/resources/model_providers/anthropic/keys.py
similarity index 97%
rename from src/do_gradientai/resources/providers/anthropic/keys.py
rename to src/do_gradientai/resources/model_providers/anthropic/keys.py
index d1a33290..4d884655 100644
--- a/src/do_gradientai/resources/providers/anthropic/keys.py
+++ b/src/do_gradientai/resources/model_providers/anthropic/keys.py
@@ -15,13 +15,18 @@
async_to_streamed_response_wrapper,
)
from ...._base_client import make_request_options
-from ....types.providers.anthropic import key_list_params, key_create_params, key_update_params, key_list_agents_params
-from ....types.providers.anthropic.key_list_response import KeyListResponse
-from ....types.providers.anthropic.key_create_response import KeyCreateResponse
-from ....types.providers.anthropic.key_delete_response import KeyDeleteResponse
-from ....types.providers.anthropic.key_update_response import KeyUpdateResponse
-from ....types.providers.anthropic.key_retrieve_response import KeyRetrieveResponse
-from ....types.providers.anthropic.key_list_agents_response import KeyListAgentsResponse
+from ....types.model_providers.anthropic import (
+ key_list_params,
+ key_create_params,
+ key_update_params,
+ key_list_agents_params,
+)
+from ....types.model_providers.anthropic.key_list_response import KeyListResponse
+from ....types.model_providers.anthropic.key_create_response import KeyCreateResponse
+from ....types.model_providers.anthropic.key_delete_response import KeyDeleteResponse
+from ....types.model_providers.anthropic.key_update_response import KeyUpdateResponse
+from ....types.model_providers.anthropic.key_retrieve_response import KeyRetrieveResponse
+from ....types.model_providers.anthropic.key_list_agents_response import KeyListAgentsResponse
__all__ = ["KeysResource", "AsyncKeysResource"]
diff --git a/src/do_gradientai/resources/providers/providers.py b/src/do_gradientai/resources/model_providers/model_providers.py
similarity index 61%
rename from src/do_gradientai/resources/providers/providers.py
rename to src/do_gradientai/resources/model_providers/model_providers.py
index ef942f73..cf710ecf 100644
--- a/src/do_gradientai/resources/providers/providers.py
+++ b/src/do_gradientai/resources/model_providers/model_providers.py
@@ -21,10 +21,10 @@
AsyncAnthropicResourceWithStreamingResponse,
)
-__all__ = ["ProvidersResource", "AsyncProvidersResource"]
+__all__ = ["ModelProvidersResource", "AsyncModelProvidersResource"]
-class ProvidersResource(SyncAPIResource):
+class ModelProvidersResource(SyncAPIResource):
@cached_property
def anthropic(self) -> AnthropicResource:
return AnthropicResource(self._client)
@@ -34,26 +34,26 @@ def openai(self) -> OpenAIResource:
return OpenAIResource(self._client)
@cached_property
- def with_raw_response(self) -> ProvidersResourceWithRawResponse:
+ def with_raw_response(self) -> ModelProvidersResourceWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
"""
- return ProvidersResourceWithRawResponse(self)
+ return ModelProvidersResourceWithRawResponse(self)
@cached_property
- def with_streaming_response(self) -> ProvidersResourceWithStreamingResponse:
+ def with_streaming_response(self) -> ModelProvidersResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
"""
- return ProvidersResourceWithStreamingResponse(self)
+ return ModelProvidersResourceWithStreamingResponse(self)
-class AsyncProvidersResource(AsyncAPIResource):
+class AsyncModelProvidersResource(AsyncAPIResource):
@cached_property
def anthropic(self) -> AsyncAnthropicResource:
return AsyncAnthropicResource(self._client)
@@ -63,72 +63,72 @@ def openai(self) -> AsyncOpenAIResource:
return AsyncOpenAIResource(self._client)
@cached_property
- def with_raw_response(self) -> AsyncProvidersResourceWithRawResponse:
+ def with_raw_response(self) -> AsyncModelProvidersResourceWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
"""
- return AsyncProvidersResourceWithRawResponse(self)
+ return AsyncModelProvidersResourceWithRawResponse(self)
@cached_property
- def with_streaming_response(self) -> AsyncProvidersResourceWithStreamingResponse:
+ def with_streaming_response(self) -> AsyncModelProvidersResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
"""
- return AsyncProvidersResourceWithStreamingResponse(self)
+ return AsyncModelProvidersResourceWithStreamingResponse(self)
-class ProvidersResourceWithRawResponse:
- def __init__(self, providers: ProvidersResource) -> None:
- self._providers = providers
+class ModelProvidersResourceWithRawResponse:
+ def __init__(self, model_providers: ModelProvidersResource) -> None:
+ self._model_providers = model_providers
@cached_property
def anthropic(self) -> AnthropicResourceWithRawResponse:
- return AnthropicResourceWithRawResponse(self._providers.anthropic)
+ return AnthropicResourceWithRawResponse(self._model_providers.anthropic)
@cached_property
def openai(self) -> OpenAIResourceWithRawResponse:
- return OpenAIResourceWithRawResponse(self._providers.openai)
+ return OpenAIResourceWithRawResponse(self._model_providers.openai)
-class AsyncProvidersResourceWithRawResponse:
- def __init__(self, providers: AsyncProvidersResource) -> None:
- self._providers = providers
+class AsyncModelProvidersResourceWithRawResponse:
+ def __init__(self, model_providers: AsyncModelProvidersResource) -> None:
+ self._model_providers = model_providers
@cached_property
def anthropic(self) -> AsyncAnthropicResourceWithRawResponse:
- return AsyncAnthropicResourceWithRawResponse(self._providers.anthropic)
+ return AsyncAnthropicResourceWithRawResponse(self._model_providers.anthropic)
@cached_property
def openai(self) -> AsyncOpenAIResourceWithRawResponse:
- return AsyncOpenAIResourceWithRawResponse(self._providers.openai)
+ return AsyncOpenAIResourceWithRawResponse(self._model_providers.openai)
-class ProvidersResourceWithStreamingResponse:
- def __init__(self, providers: ProvidersResource) -> None:
- self._providers = providers
+class ModelProvidersResourceWithStreamingResponse:
+ def __init__(self, model_providers: ModelProvidersResource) -> None:
+ self._model_providers = model_providers
@cached_property
def anthropic(self) -> AnthropicResourceWithStreamingResponse:
- return AnthropicResourceWithStreamingResponse(self._providers.anthropic)
+ return AnthropicResourceWithStreamingResponse(self._model_providers.anthropic)
@cached_property
def openai(self) -> OpenAIResourceWithStreamingResponse:
- return OpenAIResourceWithStreamingResponse(self._providers.openai)
+ return OpenAIResourceWithStreamingResponse(self._model_providers.openai)
-class AsyncProvidersResourceWithStreamingResponse:
- def __init__(self, providers: AsyncProvidersResource) -> None:
- self._providers = providers
+class AsyncModelProvidersResourceWithStreamingResponse:
+ def __init__(self, model_providers: AsyncModelProvidersResource) -> None:
+ self._model_providers = model_providers
@cached_property
def anthropic(self) -> AsyncAnthropicResourceWithStreamingResponse:
- return AsyncAnthropicResourceWithStreamingResponse(self._providers.anthropic)
+ return AsyncAnthropicResourceWithStreamingResponse(self._model_providers.anthropic)
@cached_property
def openai(self) -> AsyncOpenAIResourceWithStreamingResponse:
- return AsyncOpenAIResourceWithStreamingResponse(self._providers.openai)
+ return AsyncOpenAIResourceWithStreamingResponse(self._model_providers.openai)
diff --git a/src/do_gradientai/resources/providers/openai/__init__.py b/src/do_gradientai/resources/model_providers/openai/__init__.py
similarity index 100%
rename from src/do_gradientai/resources/providers/openai/__init__.py
rename to src/do_gradientai/resources/model_providers/openai/__init__.py
diff --git a/src/do_gradientai/resources/providers/openai/keys.py b/src/do_gradientai/resources/model_providers/openai/keys.py
similarity index 97%
rename from src/do_gradientai/resources/providers/openai/keys.py
rename to src/do_gradientai/resources/model_providers/openai/keys.py
index 01cfee75..fb974808 100644
--- a/src/do_gradientai/resources/providers/openai/keys.py
+++ b/src/do_gradientai/resources/model_providers/openai/keys.py
@@ -15,13 +15,18 @@
async_to_streamed_response_wrapper,
)
from ...._base_client import make_request_options
-from ....types.providers.openai import key_list_params, key_create_params, key_update_params, key_retrieve_agents_params
-from ....types.providers.openai.key_list_response import KeyListResponse
-from ....types.providers.openai.key_create_response import KeyCreateResponse
-from ....types.providers.openai.key_delete_response import KeyDeleteResponse
-from ....types.providers.openai.key_update_response import KeyUpdateResponse
-from ....types.providers.openai.key_retrieve_response import KeyRetrieveResponse
-from ....types.providers.openai.key_retrieve_agents_response import KeyRetrieveAgentsResponse
+from ....types.model_providers.openai import (
+ key_list_params,
+ key_create_params,
+ key_update_params,
+ key_retrieve_agents_params,
+)
+from ....types.model_providers.openai.key_list_response import KeyListResponse
+from ....types.model_providers.openai.key_create_response import KeyCreateResponse
+from ....types.model_providers.openai.key_delete_response import KeyDeleteResponse
+from ....types.model_providers.openai.key_update_response import KeyUpdateResponse
+from ....types.model_providers.openai.key_retrieve_response import KeyRetrieveResponse
+from ....types.model_providers.openai.key_retrieve_agents_response import KeyRetrieveAgentsResponse
__all__ = ["KeysResource", "AsyncKeysResource"]
diff --git a/src/do_gradientai/resources/providers/openai/openai.py b/src/do_gradientai/resources/model_providers/openai/openai.py
similarity index 100%
rename from src/do_gradientai/resources/providers/openai/openai.py
rename to src/do_gradientai/resources/model_providers/openai/openai.py
diff --git a/src/do_gradientai/types/agents/__init__.py b/src/do_gradientai/types/agents/__init__.py
index 7e100741..1dd18511 100644
--- a/src/do_gradientai/types/agents/__init__.py
+++ b/src/do_gradientai/types/agents/__init__.py
@@ -5,8 +5,12 @@
from .api_meta import APIMeta as APIMeta
from .api_links import APILinks as APILinks
from .api_star_metric import APIStarMetric as APIStarMetric
+from .route_add_params import RouteAddParams as RouteAddParams
from .api_evaluation_run import APIEvaluationRun as APIEvaluationRun
+from .route_add_response import RouteAddResponse as RouteAddResponse
from .api_key_list_params import APIKeyListParams as APIKeyListParams
+from .route_update_params import RouteUpdateParams as RouteUpdateParams
+from .route_view_response import RouteViewResponse as RouteViewResponse
from .version_list_params import VersionListParams as VersionListParams
from .api_evaluation_metric import APIEvaluationMetric as APIEvaluationMetric
from .api_evaluation_prompt import APIEvaluationPrompt as APIEvaluationPrompt
@@ -14,9 +18,10 @@
from .api_key_list_response import APIKeyListResponse as APIKeyListResponse
from .api_key_update_params import APIKeyUpdateParams as APIKeyUpdateParams
from .api_star_metric_param import APIStarMetricParam as APIStarMetricParam
+from .route_delete_response import RouteDeleteResponse as RouteDeleteResponse
+from .route_update_response import RouteUpdateResponse as RouteUpdateResponse
from .version_list_response import VersionListResponse as VersionListResponse
from .version_update_params import VersionUpdateParams as VersionUpdateParams
-from .child_agent_add_params import ChildAgentAddParams as ChildAgentAddParams
from .function_create_params import FunctionCreateParams as FunctionCreateParams
from .function_update_params import FunctionUpdateParams as FunctionUpdateParams
from .api_key_create_response import APIKeyCreateResponse as APIKeyCreateResponse
@@ -24,15 +29,10 @@
from .api_key_update_response import APIKeyUpdateResponse as APIKeyUpdateResponse
from .version_update_response import VersionUpdateResponse as VersionUpdateResponse
from .api_evaluation_test_case import APIEvaluationTestCase as APIEvaluationTestCase
-from .child_agent_add_response import ChildAgentAddResponse as ChildAgentAddResponse
from .function_create_response import FunctionCreateResponse as FunctionCreateResponse
from .function_delete_response import FunctionDeleteResponse as FunctionDeleteResponse
from .function_update_response import FunctionUpdateResponse as FunctionUpdateResponse
-from .child_agent_update_params import ChildAgentUpdateParams as ChildAgentUpdateParams
-from .child_agent_view_response import ChildAgentViewResponse as ChildAgentViewResponse
from .api_key_regenerate_response import APIKeyRegenerateResponse as APIKeyRegenerateResponse
-from .child_agent_delete_response import ChildAgentDeleteResponse as ChildAgentDeleteResponse
-from .child_agent_update_response import ChildAgentUpdateResponse as ChildAgentUpdateResponse
from .api_evaluation_metric_result import APIEvaluationMetricResult as APIEvaluationMetricResult
from .evaluation_run_create_params import EvaluationRunCreateParams as EvaluationRunCreateParams
from .api_link_knowledge_base_output import APILinkKnowledgeBaseOutput as APILinkKnowledgeBaseOutput
diff --git a/src/do_gradientai/types/agents/child_agent_add_params.py b/src/do_gradientai/types/agents/route_add_params.py
similarity index 87%
rename from src/do_gradientai/types/agents/child_agent_add_params.py
rename to src/do_gradientai/types/agents/route_add_params.py
index 001baa6f..b4fcb417 100644
--- a/src/do_gradientai/types/agents/child_agent_add_params.py
+++ b/src/do_gradientai/types/agents/route_add_params.py
@@ -6,10 +6,10 @@
from ..._utils import PropertyInfo
-__all__ = ["ChildAgentAddParams"]
+__all__ = ["RouteAddParams"]
-class ChildAgentAddParams(TypedDict, total=False):
+class RouteAddParams(TypedDict, total=False):
path_parent_agent_uuid: Required[Annotated[str, PropertyInfo(alias="parent_agent_uuid")]]
body_child_agent_uuid: Annotated[str, PropertyInfo(alias="child_agent_uuid")]
diff --git a/src/do_gradientai/types/agents/child_agent_add_response.py b/src/do_gradientai/types/agents/route_add_response.py
similarity index 79%
rename from src/do_gradientai/types/agents/child_agent_add_response.py
rename to src/do_gradientai/types/agents/route_add_response.py
index baccec10..cd3bb16a 100644
--- a/src/do_gradientai/types/agents/child_agent_add_response.py
+++ b/src/do_gradientai/types/agents/route_add_response.py
@@ -4,10 +4,10 @@
from ..._models import BaseModel
-__all__ = ["ChildAgentAddResponse"]
+__all__ = ["RouteAddResponse"]
-class ChildAgentAddResponse(BaseModel):
+class RouteAddResponse(BaseModel):
child_agent_uuid: Optional[str] = None
parent_agent_uuid: Optional[str] = None
diff --git a/src/do_gradientai/types/agents/child_agent_delete_response.py b/src/do_gradientai/types/agents/route_delete_response.py
similarity index 74%
rename from src/do_gradientai/types/agents/child_agent_delete_response.py
rename to src/do_gradientai/types/agents/route_delete_response.py
index b50fb024..07105a62 100644
--- a/src/do_gradientai/types/agents/child_agent_delete_response.py
+++ b/src/do_gradientai/types/agents/route_delete_response.py
@@ -4,10 +4,10 @@
from ..._models import BaseModel
-__all__ = ["ChildAgentDeleteResponse"]
+__all__ = ["RouteDeleteResponse"]
-class ChildAgentDeleteResponse(BaseModel):
+class RouteDeleteResponse(BaseModel):
child_agent_uuid: Optional[str] = None
parent_agent_uuid: Optional[str] = None
diff --git a/src/do_gradientai/types/agents/child_agent_update_params.py b/src/do_gradientai/types/agents/route_update_params.py
similarity index 86%
rename from src/do_gradientai/types/agents/child_agent_update_params.py
rename to src/do_gradientai/types/agents/route_update_params.py
index 2f009a52..cb6d6391 100644
--- a/src/do_gradientai/types/agents/child_agent_update_params.py
+++ b/src/do_gradientai/types/agents/route_update_params.py
@@ -6,10 +6,10 @@
from ..._utils import PropertyInfo
-__all__ = ["ChildAgentUpdateParams"]
+__all__ = ["RouteUpdateParams"]
-class ChildAgentUpdateParams(TypedDict, total=False):
+class RouteUpdateParams(TypedDict, total=False):
path_parent_agent_uuid: Required[Annotated[str, PropertyInfo(alias="parent_agent_uuid")]]
body_child_agent_uuid: Annotated[str, PropertyInfo(alias="child_agent_uuid")]
diff --git a/src/do_gradientai/types/agents/child_agent_update_response.py b/src/do_gradientai/types/agents/route_update_response.py
similarity index 81%
rename from src/do_gradientai/types/agents/child_agent_update_response.py
rename to src/do_gradientai/types/agents/route_update_response.py
index 48a13c72..75e1eda5 100644
--- a/src/do_gradientai/types/agents/child_agent_update_response.py
+++ b/src/do_gradientai/types/agents/route_update_response.py
@@ -4,10 +4,10 @@
from ..._models import BaseModel
-__all__ = ["ChildAgentUpdateResponse"]
+__all__ = ["RouteUpdateResponse"]
-class ChildAgentUpdateResponse(BaseModel):
+class RouteUpdateResponse(BaseModel):
child_agent_uuid: Optional[str] = None
parent_agent_uuid: Optional[str] = None
diff --git a/src/do_gradientai/types/agents/child_agent_view_response.py b/src/do_gradientai/types/agents/route_view_response.py
similarity index 78%
rename from src/do_gradientai/types/agents/child_agent_view_response.py
rename to src/do_gradientai/types/agents/route_view_response.py
index ffbaef12..dd9af70b 100644
--- a/src/do_gradientai/types/agents/child_agent_view_response.py
+++ b/src/do_gradientai/types/agents/route_view_response.py
@@ -6,10 +6,10 @@
from ..._models import BaseModel
-__all__ = ["ChildAgentViewResponse"]
+__all__ = ["RouteViewResponse"]
-class ChildAgentViewResponse(BaseModel):
+class RouteViewResponse(BaseModel):
children: Optional[List["APIAgent"]] = None
diff --git a/src/do_gradientai/types/providers/__init__.py b/src/do_gradientai/types/model_providers/__init__.py
similarity index 100%
rename from src/do_gradientai/types/providers/__init__.py
rename to src/do_gradientai/types/model_providers/__init__.py
diff --git a/src/do_gradientai/types/providers/anthropic/__init__.py b/src/do_gradientai/types/model_providers/anthropic/__init__.py
similarity index 100%
rename from src/do_gradientai/types/providers/anthropic/__init__.py
rename to src/do_gradientai/types/model_providers/anthropic/__init__.py
diff --git a/src/do_gradientai/types/providers/anthropic/key_create_params.py b/src/do_gradientai/types/model_providers/anthropic/key_create_params.py
similarity index 100%
rename from src/do_gradientai/types/providers/anthropic/key_create_params.py
rename to src/do_gradientai/types/model_providers/anthropic/key_create_params.py
diff --git a/src/do_gradientai/types/providers/anthropic/key_create_response.py b/src/do_gradientai/types/model_providers/anthropic/key_create_response.py
similarity index 100%
rename from src/do_gradientai/types/providers/anthropic/key_create_response.py
rename to src/do_gradientai/types/model_providers/anthropic/key_create_response.py
diff --git a/src/do_gradientai/types/providers/anthropic/key_delete_response.py b/src/do_gradientai/types/model_providers/anthropic/key_delete_response.py
similarity index 100%
rename from src/do_gradientai/types/providers/anthropic/key_delete_response.py
rename to src/do_gradientai/types/model_providers/anthropic/key_delete_response.py
diff --git a/src/do_gradientai/types/providers/anthropic/key_list_agents_params.py b/src/do_gradientai/types/model_providers/anthropic/key_list_agents_params.py
similarity index 100%
rename from src/do_gradientai/types/providers/anthropic/key_list_agents_params.py
rename to src/do_gradientai/types/model_providers/anthropic/key_list_agents_params.py
diff --git a/src/do_gradientai/types/providers/anthropic/key_list_agents_response.py b/src/do_gradientai/types/model_providers/anthropic/key_list_agents_response.py
similarity index 100%
rename from src/do_gradientai/types/providers/anthropic/key_list_agents_response.py
rename to src/do_gradientai/types/model_providers/anthropic/key_list_agents_response.py
diff --git a/src/do_gradientai/types/providers/anthropic/key_list_params.py b/src/do_gradientai/types/model_providers/anthropic/key_list_params.py
similarity index 100%
rename from src/do_gradientai/types/providers/anthropic/key_list_params.py
rename to src/do_gradientai/types/model_providers/anthropic/key_list_params.py
diff --git a/src/do_gradientai/types/providers/anthropic/key_list_response.py b/src/do_gradientai/types/model_providers/anthropic/key_list_response.py
similarity index 100%
rename from src/do_gradientai/types/providers/anthropic/key_list_response.py
rename to src/do_gradientai/types/model_providers/anthropic/key_list_response.py
diff --git a/src/do_gradientai/types/providers/anthropic/key_retrieve_response.py b/src/do_gradientai/types/model_providers/anthropic/key_retrieve_response.py
similarity index 100%
rename from src/do_gradientai/types/providers/anthropic/key_retrieve_response.py
rename to src/do_gradientai/types/model_providers/anthropic/key_retrieve_response.py
diff --git a/src/do_gradientai/types/providers/anthropic/key_update_params.py b/src/do_gradientai/types/model_providers/anthropic/key_update_params.py
similarity index 100%
rename from src/do_gradientai/types/providers/anthropic/key_update_params.py
rename to src/do_gradientai/types/model_providers/anthropic/key_update_params.py
diff --git a/src/do_gradientai/types/providers/anthropic/key_update_response.py b/src/do_gradientai/types/model_providers/anthropic/key_update_response.py
similarity index 100%
rename from src/do_gradientai/types/providers/anthropic/key_update_response.py
rename to src/do_gradientai/types/model_providers/anthropic/key_update_response.py
diff --git a/src/do_gradientai/types/providers/openai/__init__.py b/src/do_gradientai/types/model_providers/openai/__init__.py
similarity index 100%
rename from src/do_gradientai/types/providers/openai/__init__.py
rename to src/do_gradientai/types/model_providers/openai/__init__.py
diff --git a/src/do_gradientai/types/providers/openai/key_create_params.py b/src/do_gradientai/types/model_providers/openai/key_create_params.py
similarity index 100%
rename from src/do_gradientai/types/providers/openai/key_create_params.py
rename to src/do_gradientai/types/model_providers/openai/key_create_params.py
diff --git a/src/do_gradientai/types/providers/openai/key_create_response.py b/src/do_gradientai/types/model_providers/openai/key_create_response.py
similarity index 100%
rename from src/do_gradientai/types/providers/openai/key_create_response.py
rename to src/do_gradientai/types/model_providers/openai/key_create_response.py
diff --git a/src/do_gradientai/types/providers/openai/key_delete_response.py b/src/do_gradientai/types/model_providers/openai/key_delete_response.py
similarity index 100%
rename from src/do_gradientai/types/providers/openai/key_delete_response.py
rename to src/do_gradientai/types/model_providers/openai/key_delete_response.py
diff --git a/src/do_gradientai/types/providers/openai/key_list_params.py b/src/do_gradientai/types/model_providers/openai/key_list_params.py
similarity index 100%
rename from src/do_gradientai/types/providers/openai/key_list_params.py
rename to src/do_gradientai/types/model_providers/openai/key_list_params.py
diff --git a/src/do_gradientai/types/providers/openai/key_list_response.py b/src/do_gradientai/types/model_providers/openai/key_list_response.py
similarity index 100%
rename from src/do_gradientai/types/providers/openai/key_list_response.py
rename to src/do_gradientai/types/model_providers/openai/key_list_response.py
diff --git a/src/do_gradientai/types/providers/openai/key_retrieve_agents_params.py b/src/do_gradientai/types/model_providers/openai/key_retrieve_agents_params.py
similarity index 100%
rename from src/do_gradientai/types/providers/openai/key_retrieve_agents_params.py
rename to src/do_gradientai/types/model_providers/openai/key_retrieve_agents_params.py
diff --git a/src/do_gradientai/types/providers/openai/key_retrieve_agents_response.py b/src/do_gradientai/types/model_providers/openai/key_retrieve_agents_response.py
similarity index 100%
rename from src/do_gradientai/types/providers/openai/key_retrieve_agents_response.py
rename to src/do_gradientai/types/model_providers/openai/key_retrieve_agents_response.py
diff --git a/src/do_gradientai/types/providers/openai/key_retrieve_response.py b/src/do_gradientai/types/model_providers/openai/key_retrieve_response.py
similarity index 100%
rename from src/do_gradientai/types/providers/openai/key_retrieve_response.py
rename to src/do_gradientai/types/model_providers/openai/key_retrieve_response.py
diff --git a/src/do_gradientai/types/providers/openai/key_update_params.py b/src/do_gradientai/types/model_providers/openai/key_update_params.py
similarity index 100%
rename from src/do_gradientai/types/providers/openai/key_update_params.py
rename to src/do_gradientai/types/model_providers/openai/key_update_params.py
diff --git a/src/do_gradientai/types/providers/openai/key_update_response.py b/src/do_gradientai/types/model_providers/openai/key_update_response.py
similarity index 100%
rename from src/do_gradientai/types/providers/openai/key_update_response.py
rename to src/do_gradientai/types/model_providers/openai/key_update_response.py
diff --git a/tests/api_resources/agents/test_child_agents.py b/tests/api_resources/agents/test_routes.py
similarity index 69%
rename from tests/api_resources/agents/test_child_agents.py
rename to tests/api_resources/agents/test_routes.py
index c5108463..e2e85ab8 100644
--- a/tests/api_resources/agents/test_child_agents.py
+++ b/tests/api_resources/agents/test_routes.py
@@ -10,31 +10,31 @@
from tests.utils import assert_matches_type
from do_gradientai import GradientAI, AsyncGradientAI
from do_gradientai.types.agents import (
- ChildAgentAddResponse,
- ChildAgentViewResponse,
- ChildAgentDeleteResponse,
- ChildAgentUpdateResponse,
+ RouteAddResponse,
+ RouteViewResponse,
+ RouteDeleteResponse,
+ RouteUpdateResponse,
)
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-class TestChildAgents:
+class TestRoutes:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@pytest.mark.skip()
@parametrize
def test_method_update(self, client: GradientAI) -> None:
- child_agent = client.agents.child_agents.update(
+ route = client.agents.routes.update(
path_child_agent_uuid="child_agent_uuid",
path_parent_agent_uuid="parent_agent_uuid",
)
- assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"])
+ assert_matches_type(RouteUpdateResponse, route, path=["response"])
@pytest.mark.skip()
@parametrize
def test_method_update_with_all_params(self, client: GradientAI) -> None:
- child_agent = client.agents.child_agents.update(
+ route = client.agents.routes.update(
path_child_agent_uuid="child_agent_uuid",
path_parent_agent_uuid="parent_agent_uuid",
body_child_agent_uuid="child_agent_uuid",
@@ -43,33 +43,33 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None:
route_name="route_name",
uuid="uuid",
)
- assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"])
+ assert_matches_type(RouteUpdateResponse, route, path=["response"])
@pytest.mark.skip()
@parametrize
def test_raw_response_update(self, client: GradientAI) -> None:
- response = client.agents.child_agents.with_raw_response.update(
+ response = client.agents.routes.with_raw_response.update(
path_child_agent_uuid="child_agent_uuid",
path_parent_agent_uuid="parent_agent_uuid",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- child_agent = response.parse()
- assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"])
+ route = response.parse()
+ assert_matches_type(RouteUpdateResponse, route, path=["response"])
@pytest.mark.skip()
@parametrize
def test_streaming_response_update(self, client: GradientAI) -> None:
- with client.agents.child_agents.with_streaming_response.update(
+ with client.agents.routes.with_streaming_response.update(
path_child_agent_uuid="child_agent_uuid",
path_parent_agent_uuid="parent_agent_uuid",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- child_agent = response.parse()
- assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"])
+ route = response.parse()
+ assert_matches_type(RouteUpdateResponse, route, path=["response"])
assert cast(Any, response.is_closed) is True
@@ -79,13 +79,13 @@ def test_path_params_update(self, client: GradientAI) -> None:
with pytest.raises(
ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''"
):
- client.agents.child_agents.with_raw_response.update(
+ client.agents.routes.with_raw_response.update(
path_child_agent_uuid="child_agent_uuid",
path_parent_agent_uuid="",
)
with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"):
- client.agents.child_agents.with_raw_response.update(
+ client.agents.routes.with_raw_response.update(
path_child_agent_uuid="",
path_parent_agent_uuid="parent_agent_uuid",
)
@@ -93,37 +93,37 @@ def test_path_params_update(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_method_delete(self, client: GradientAI) -> None:
- child_agent = client.agents.child_agents.delete(
+ route = client.agents.routes.delete(
child_agent_uuid="child_agent_uuid",
parent_agent_uuid="parent_agent_uuid",
)
- assert_matches_type(ChildAgentDeleteResponse, child_agent, path=["response"])
+ assert_matches_type(RouteDeleteResponse, route, path=["response"])
@pytest.mark.skip()
@parametrize
def test_raw_response_delete(self, client: GradientAI) -> None:
- response = client.agents.child_agents.with_raw_response.delete(
+ response = client.agents.routes.with_raw_response.delete(
child_agent_uuid="child_agent_uuid",
parent_agent_uuid="parent_agent_uuid",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- child_agent = response.parse()
- assert_matches_type(ChildAgentDeleteResponse, child_agent, path=["response"])
+ route = response.parse()
+ assert_matches_type(RouteDeleteResponse, route, path=["response"])
@pytest.mark.skip()
@parametrize
def test_streaming_response_delete(self, client: GradientAI) -> None:
- with client.agents.child_agents.with_streaming_response.delete(
+ with client.agents.routes.with_streaming_response.delete(
child_agent_uuid="child_agent_uuid",
parent_agent_uuid="parent_agent_uuid",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- child_agent = response.parse()
- assert_matches_type(ChildAgentDeleteResponse, child_agent, path=["response"])
+ route = response.parse()
+ assert_matches_type(RouteDeleteResponse, route, path=["response"])
assert cast(Any, response.is_closed) is True
@@ -131,13 +131,13 @@ def test_streaming_response_delete(self, client: GradientAI) -> None:
@parametrize
def test_path_params_delete(self, client: GradientAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `parent_agent_uuid` but received ''"):
- client.agents.child_agents.with_raw_response.delete(
+ client.agents.routes.with_raw_response.delete(
child_agent_uuid="child_agent_uuid",
parent_agent_uuid="",
)
with pytest.raises(ValueError, match=r"Expected a non-empty value for `child_agent_uuid` but received ''"):
- client.agents.child_agents.with_raw_response.delete(
+ client.agents.routes.with_raw_response.delete(
child_agent_uuid="",
parent_agent_uuid="parent_agent_uuid",
)
@@ -145,16 +145,16 @@ def test_path_params_delete(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_method_add(self, client: GradientAI) -> None:
- child_agent = client.agents.child_agents.add(
+ route = client.agents.routes.add(
path_child_agent_uuid="child_agent_uuid",
path_parent_agent_uuid="parent_agent_uuid",
)
- assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"])
+ assert_matches_type(RouteAddResponse, route, path=["response"])
@pytest.mark.skip()
@parametrize
def test_method_add_with_all_params(self, client: GradientAI) -> None:
- child_agent = client.agents.child_agents.add(
+ route = client.agents.routes.add(
path_child_agent_uuid="child_agent_uuid",
path_parent_agent_uuid="parent_agent_uuid",
body_child_agent_uuid="child_agent_uuid",
@@ -162,33 +162,33 @@ def test_method_add_with_all_params(self, client: GradientAI) -> None:
body_parent_agent_uuid="parent_agent_uuid",
route_name="route_name",
)
- assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"])
+ assert_matches_type(RouteAddResponse, route, path=["response"])
@pytest.mark.skip()
@parametrize
def test_raw_response_add(self, client: GradientAI) -> None:
- response = client.agents.child_agents.with_raw_response.add(
+ response = client.agents.routes.with_raw_response.add(
path_child_agent_uuid="child_agent_uuid",
path_parent_agent_uuid="parent_agent_uuid",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- child_agent = response.parse()
- assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"])
+ route = response.parse()
+ assert_matches_type(RouteAddResponse, route, path=["response"])
@pytest.mark.skip()
@parametrize
def test_streaming_response_add(self, client: GradientAI) -> None:
- with client.agents.child_agents.with_streaming_response.add(
+ with client.agents.routes.with_streaming_response.add(
path_child_agent_uuid="child_agent_uuid",
path_parent_agent_uuid="parent_agent_uuid",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- child_agent = response.parse()
- assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"])
+ route = response.parse()
+ assert_matches_type(RouteAddResponse, route, path=["response"])
assert cast(Any, response.is_closed) is True
@@ -198,13 +198,13 @@ def test_path_params_add(self, client: GradientAI) -> None:
with pytest.raises(
ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''"
):
- client.agents.child_agents.with_raw_response.add(
+ client.agents.routes.with_raw_response.add(
path_child_agent_uuid="child_agent_uuid",
path_parent_agent_uuid="",
)
with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"):
- client.agents.child_agents.with_raw_response.add(
+ client.agents.routes.with_raw_response.add(
path_child_agent_uuid="",
path_parent_agent_uuid="parent_agent_uuid",
)
@@ -212,34 +212,34 @@ def test_path_params_add(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_method_view(self, client: GradientAI) -> None:
- child_agent = client.agents.child_agents.view(
+ route = client.agents.routes.view(
"uuid",
)
- assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"])
+ assert_matches_type(RouteViewResponse, route, path=["response"])
@pytest.mark.skip()
@parametrize
def test_raw_response_view(self, client: GradientAI) -> None:
- response = client.agents.child_agents.with_raw_response.view(
+ response = client.agents.routes.with_raw_response.view(
"uuid",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- child_agent = response.parse()
- assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"])
+ route = response.parse()
+ assert_matches_type(RouteViewResponse, route, path=["response"])
@pytest.mark.skip()
@parametrize
def test_streaming_response_view(self, client: GradientAI) -> None:
- with client.agents.child_agents.with_streaming_response.view(
+ with client.agents.routes.with_streaming_response.view(
"uuid",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- child_agent = response.parse()
- assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"])
+ route = response.parse()
+ assert_matches_type(RouteViewResponse, route, path=["response"])
assert cast(Any, response.is_closed) is True
@@ -247,12 +247,12 @@ def test_streaming_response_view(self, client: GradientAI) -> None:
@parametrize
def test_path_params_view(self, client: GradientAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
- client.agents.child_agents.with_raw_response.view(
+ client.agents.routes.with_raw_response.view(
"",
)
-class TestAsyncChildAgents:
+class TestAsyncRoutes:
parametrize = pytest.mark.parametrize(
"async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
)
@@ -260,16 +260,16 @@ class TestAsyncChildAgents:
@pytest.mark.skip()
@parametrize
async def test_method_update(self, async_client: AsyncGradientAI) -> None:
- child_agent = await async_client.agents.child_agents.update(
+ route = await async_client.agents.routes.update(
path_child_agent_uuid="child_agent_uuid",
path_parent_agent_uuid="parent_agent_uuid",
)
- assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"])
+ assert_matches_type(RouteUpdateResponse, route, path=["response"])
@pytest.mark.skip()
@parametrize
async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None:
- child_agent = await async_client.agents.child_agents.update(
+ route = await async_client.agents.routes.update(
path_child_agent_uuid="child_agent_uuid",
path_parent_agent_uuid="parent_agent_uuid",
body_child_agent_uuid="child_agent_uuid",
@@ -278,33 +278,33 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI
route_name="route_name",
uuid="uuid",
)
- assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"])
+ assert_matches_type(RouteUpdateResponse, route, path=["response"])
@pytest.mark.skip()
@parametrize
async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.agents.child_agents.with_raw_response.update(
+ response = await async_client.agents.routes.with_raw_response.update(
path_child_agent_uuid="child_agent_uuid",
path_parent_agent_uuid="parent_agent_uuid",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- child_agent = await response.parse()
- assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"])
+ route = await response.parse()
+ assert_matches_type(RouteUpdateResponse, route, path=["response"])
@pytest.mark.skip()
@parametrize
async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None:
- async with async_client.agents.child_agents.with_streaming_response.update(
+ async with async_client.agents.routes.with_streaming_response.update(
path_child_agent_uuid="child_agent_uuid",
path_parent_agent_uuid="parent_agent_uuid",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- child_agent = await response.parse()
- assert_matches_type(ChildAgentUpdateResponse, child_agent, path=["response"])
+ route = await response.parse()
+ assert_matches_type(RouteUpdateResponse, route, path=["response"])
assert cast(Any, response.is_closed) is True
@@ -314,13 +314,13 @@ async def test_path_params_update(self, async_client: AsyncGradientAI) -> None:
with pytest.raises(
ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''"
):
- await async_client.agents.child_agents.with_raw_response.update(
+ await async_client.agents.routes.with_raw_response.update(
path_child_agent_uuid="child_agent_uuid",
path_parent_agent_uuid="",
)
with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"):
- await async_client.agents.child_agents.with_raw_response.update(
+ await async_client.agents.routes.with_raw_response.update(
path_child_agent_uuid="",
path_parent_agent_uuid="parent_agent_uuid",
)
@@ -328,37 +328,37 @@ async def test_path_params_update(self, async_client: AsyncGradientAI) -> None:
@pytest.mark.skip()
@parametrize
async def test_method_delete(self, async_client: AsyncGradientAI) -> None:
- child_agent = await async_client.agents.child_agents.delete(
+ route = await async_client.agents.routes.delete(
child_agent_uuid="child_agent_uuid",
parent_agent_uuid="parent_agent_uuid",
)
- assert_matches_type(ChildAgentDeleteResponse, child_agent, path=["response"])
+ assert_matches_type(RouteDeleteResponse, route, path=["response"])
@pytest.mark.skip()
@parametrize
async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.agents.child_agents.with_raw_response.delete(
+ response = await async_client.agents.routes.with_raw_response.delete(
child_agent_uuid="child_agent_uuid",
parent_agent_uuid="parent_agent_uuid",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- child_agent = await response.parse()
- assert_matches_type(ChildAgentDeleteResponse, child_agent, path=["response"])
+ route = await response.parse()
+ assert_matches_type(RouteDeleteResponse, route, path=["response"])
@pytest.mark.skip()
@parametrize
async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None:
- async with async_client.agents.child_agents.with_streaming_response.delete(
+ async with async_client.agents.routes.with_streaming_response.delete(
child_agent_uuid="child_agent_uuid",
parent_agent_uuid="parent_agent_uuid",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- child_agent = await response.parse()
- assert_matches_type(ChildAgentDeleteResponse, child_agent, path=["response"])
+ route = await response.parse()
+ assert_matches_type(RouteDeleteResponse, route, path=["response"])
assert cast(Any, response.is_closed) is True
@@ -366,13 +366,13 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) ->
@parametrize
async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `parent_agent_uuid` but received ''"):
- await async_client.agents.child_agents.with_raw_response.delete(
+ await async_client.agents.routes.with_raw_response.delete(
child_agent_uuid="child_agent_uuid",
parent_agent_uuid="",
)
with pytest.raises(ValueError, match=r"Expected a non-empty value for `child_agent_uuid` but received ''"):
- await async_client.agents.child_agents.with_raw_response.delete(
+ await async_client.agents.routes.with_raw_response.delete(
child_agent_uuid="",
parent_agent_uuid="parent_agent_uuid",
)
@@ -380,16 +380,16 @@ async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None:
@pytest.mark.skip()
@parametrize
async def test_method_add(self, async_client: AsyncGradientAI) -> None:
- child_agent = await async_client.agents.child_agents.add(
+ route = await async_client.agents.routes.add(
path_child_agent_uuid="child_agent_uuid",
path_parent_agent_uuid="parent_agent_uuid",
)
- assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"])
+ assert_matches_type(RouteAddResponse, route, path=["response"])
@pytest.mark.skip()
@parametrize
async def test_method_add_with_all_params(self, async_client: AsyncGradientAI) -> None:
- child_agent = await async_client.agents.child_agents.add(
+ route = await async_client.agents.routes.add(
path_child_agent_uuid="child_agent_uuid",
path_parent_agent_uuid="parent_agent_uuid",
body_child_agent_uuid="child_agent_uuid",
@@ -397,33 +397,33 @@ async def test_method_add_with_all_params(self, async_client: AsyncGradientAI) -
body_parent_agent_uuid="parent_agent_uuid",
route_name="route_name",
)
- assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"])
+ assert_matches_type(RouteAddResponse, route, path=["response"])
@pytest.mark.skip()
@parametrize
async def test_raw_response_add(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.agents.child_agents.with_raw_response.add(
+ response = await async_client.agents.routes.with_raw_response.add(
path_child_agent_uuid="child_agent_uuid",
path_parent_agent_uuid="parent_agent_uuid",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- child_agent = await response.parse()
- assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"])
+ route = await response.parse()
+ assert_matches_type(RouteAddResponse, route, path=["response"])
@pytest.mark.skip()
@parametrize
async def test_streaming_response_add(self, async_client: AsyncGradientAI) -> None:
- async with async_client.agents.child_agents.with_streaming_response.add(
+ async with async_client.agents.routes.with_streaming_response.add(
path_child_agent_uuid="child_agent_uuid",
path_parent_agent_uuid="parent_agent_uuid",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- child_agent = await response.parse()
- assert_matches_type(ChildAgentAddResponse, child_agent, path=["response"])
+ route = await response.parse()
+ assert_matches_type(RouteAddResponse, route, path=["response"])
assert cast(Any, response.is_closed) is True
@@ -433,13 +433,13 @@ async def test_path_params_add(self, async_client: AsyncGradientAI) -> None:
with pytest.raises(
ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''"
):
- await async_client.agents.child_agents.with_raw_response.add(
+ await async_client.agents.routes.with_raw_response.add(
path_child_agent_uuid="child_agent_uuid",
path_parent_agent_uuid="",
)
with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"):
- await async_client.agents.child_agents.with_raw_response.add(
+ await async_client.agents.routes.with_raw_response.add(
path_child_agent_uuid="",
path_parent_agent_uuid="parent_agent_uuid",
)
@@ -447,34 +447,34 @@ async def test_path_params_add(self, async_client: AsyncGradientAI) -> None:
@pytest.mark.skip()
@parametrize
async def test_method_view(self, async_client: AsyncGradientAI) -> None:
- child_agent = await async_client.agents.child_agents.view(
+ route = await async_client.agents.routes.view(
"uuid",
)
- assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"])
+ assert_matches_type(RouteViewResponse, route, path=["response"])
@pytest.mark.skip()
@parametrize
async def test_raw_response_view(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.agents.child_agents.with_raw_response.view(
+ response = await async_client.agents.routes.with_raw_response.view(
"uuid",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- child_agent = await response.parse()
- assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"])
+ route = await response.parse()
+ assert_matches_type(RouteViewResponse, route, path=["response"])
@pytest.mark.skip()
@parametrize
async def test_streaming_response_view(self, async_client: AsyncGradientAI) -> None:
- async with async_client.agents.child_agents.with_streaming_response.view(
+ async with async_client.agents.routes.with_streaming_response.view(
"uuid",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- child_agent = await response.parse()
- assert_matches_type(ChildAgentViewResponse, child_agent, path=["response"])
+ route = await response.parse()
+ assert_matches_type(RouteViewResponse, route, path=["response"])
assert cast(Any, response.is_closed) is True
@@ -482,6 +482,6 @@ async def test_streaming_response_view(self, async_client: AsyncGradientAI) -> N
@parametrize
async def test_path_params_view(self, async_client: AsyncGradientAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
- await async_client.agents.child_agents.with_raw_response.view(
+ await async_client.agents.routes.with_raw_response.view(
"",
)
diff --git a/tests/api_resources/providers/__init__.py b/tests/api_resources/model_providers/__init__.py
similarity index 100%
rename from tests/api_resources/providers/__init__.py
rename to tests/api_resources/model_providers/__init__.py
diff --git a/tests/api_resources/providers/anthropic/__init__.py b/tests/api_resources/model_providers/anthropic/__init__.py
similarity index 100%
rename from tests/api_resources/providers/anthropic/__init__.py
rename to tests/api_resources/model_providers/anthropic/__init__.py
diff --git a/tests/api_resources/providers/anthropic/test_keys.py b/tests/api_resources/model_providers/anthropic/test_keys.py
similarity index 80%
rename from tests/api_resources/providers/anthropic/test_keys.py
rename to tests/api_resources/model_providers/anthropic/test_keys.py
index 7aa595f7..b6ba0e9a 100644
--- a/tests/api_resources/providers/anthropic/test_keys.py
+++ b/tests/api_resources/model_providers/anthropic/test_keys.py
@@ -9,7 +9,7 @@
from tests.utils import assert_matches_type
from do_gradientai import GradientAI, AsyncGradientAI
-from do_gradientai.types.providers.anthropic import (
+from do_gradientai.types.model_providers.anthropic import (
KeyListResponse,
KeyCreateResponse,
KeyDeleteResponse,
@@ -27,13 +27,13 @@ class TestKeys:
@pytest.mark.skip()
@parametrize
def test_method_create(self, client: GradientAI) -> None:
- key = client.providers.anthropic.keys.create()
+ key = client.model_providers.anthropic.keys.create()
assert_matches_type(KeyCreateResponse, key, path=["response"])
@pytest.mark.skip()
@parametrize
def test_method_create_with_all_params(self, client: GradientAI) -> None:
- key = client.providers.anthropic.keys.create(
+ key = client.model_providers.anthropic.keys.create(
api_key="api_key",
name="name",
)
@@ -42,7 +42,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_raw_response_create(self, client: GradientAI) -> None:
- response = client.providers.anthropic.keys.with_raw_response.create()
+ response = client.model_providers.anthropic.keys.with_raw_response.create()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -52,7 +52,7 @@ def test_raw_response_create(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_streaming_response_create(self, client: GradientAI) -> None:
- with client.providers.anthropic.keys.with_streaming_response.create() as response:
+ with client.model_providers.anthropic.keys.with_streaming_response.create() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -64,7 +64,7 @@ def test_streaming_response_create(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_method_retrieve(self, client: GradientAI) -> None:
- key = client.providers.anthropic.keys.retrieve(
+ key = client.model_providers.anthropic.keys.retrieve(
"api_key_uuid",
)
assert_matches_type(KeyRetrieveResponse, key, path=["response"])
@@ -72,7 +72,7 @@ def test_method_retrieve(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_raw_response_retrieve(self, client: GradientAI) -> None:
- response = client.providers.anthropic.keys.with_raw_response.retrieve(
+ response = client.model_providers.anthropic.keys.with_raw_response.retrieve(
"api_key_uuid",
)
@@ -84,7 +84,7 @@ def test_raw_response_retrieve(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_streaming_response_retrieve(self, client: GradientAI) -> None:
- with client.providers.anthropic.keys.with_streaming_response.retrieve(
+ with client.model_providers.anthropic.keys.with_streaming_response.retrieve(
"api_key_uuid",
) as response:
assert not response.is_closed
@@ -99,14 +99,14 @@ def test_streaming_response_retrieve(self, client: GradientAI) -> None:
@parametrize
def test_path_params_retrieve(self, client: GradientAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
- client.providers.anthropic.keys.with_raw_response.retrieve(
+ client.model_providers.anthropic.keys.with_raw_response.retrieve(
"",
)
@pytest.mark.skip()
@parametrize
def test_method_update(self, client: GradientAI) -> None:
- key = client.providers.anthropic.keys.update(
+ key = client.model_providers.anthropic.keys.update(
path_api_key_uuid="api_key_uuid",
)
assert_matches_type(KeyUpdateResponse, key, path=["response"])
@@ -114,7 +114,7 @@ def test_method_update(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_method_update_with_all_params(self, client: GradientAI) -> None:
- key = client.providers.anthropic.keys.update(
+ key = client.model_providers.anthropic.keys.update(
path_api_key_uuid="api_key_uuid",
api_key="api_key",
body_api_key_uuid="api_key_uuid",
@@ -125,7 +125,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_raw_response_update(self, client: GradientAI) -> None:
- response = client.providers.anthropic.keys.with_raw_response.update(
+ response = client.model_providers.anthropic.keys.with_raw_response.update(
path_api_key_uuid="api_key_uuid",
)
@@ -137,7 +137,7 @@ def test_raw_response_update(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_streaming_response_update(self, client: GradientAI) -> None:
- with client.providers.anthropic.keys.with_streaming_response.update(
+ with client.model_providers.anthropic.keys.with_streaming_response.update(
path_api_key_uuid="api_key_uuid",
) as response:
assert not response.is_closed
@@ -152,20 +152,20 @@ def test_streaming_response_update(self, client: GradientAI) -> None:
@parametrize
def test_path_params_update(self, client: GradientAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"):
- client.providers.anthropic.keys.with_raw_response.update(
+ client.model_providers.anthropic.keys.with_raw_response.update(
path_api_key_uuid="",
)
@pytest.mark.skip()
@parametrize
def test_method_list(self, client: GradientAI) -> None:
- key = client.providers.anthropic.keys.list()
+ key = client.model_providers.anthropic.keys.list()
assert_matches_type(KeyListResponse, key, path=["response"])
@pytest.mark.skip()
@parametrize
def test_method_list_with_all_params(self, client: GradientAI) -> None:
- key = client.providers.anthropic.keys.list(
+ key = client.model_providers.anthropic.keys.list(
page=0,
per_page=0,
)
@@ -174,7 +174,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_raw_response_list(self, client: GradientAI) -> None:
- response = client.providers.anthropic.keys.with_raw_response.list()
+ response = client.model_providers.anthropic.keys.with_raw_response.list()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -184,7 +184,7 @@ def test_raw_response_list(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_streaming_response_list(self, client: GradientAI) -> None:
- with client.providers.anthropic.keys.with_streaming_response.list() as response:
+ with client.model_providers.anthropic.keys.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -196,7 +196,7 @@ def test_streaming_response_list(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_method_delete(self, client: GradientAI) -> None:
- key = client.providers.anthropic.keys.delete(
+ key = client.model_providers.anthropic.keys.delete(
"api_key_uuid",
)
assert_matches_type(KeyDeleteResponse, key, path=["response"])
@@ -204,7 +204,7 @@ def test_method_delete(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_raw_response_delete(self, client: GradientAI) -> None:
- response = client.providers.anthropic.keys.with_raw_response.delete(
+ response = client.model_providers.anthropic.keys.with_raw_response.delete(
"api_key_uuid",
)
@@ -216,7 +216,7 @@ def test_raw_response_delete(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_streaming_response_delete(self, client: GradientAI) -> None:
- with client.providers.anthropic.keys.with_streaming_response.delete(
+ with client.model_providers.anthropic.keys.with_streaming_response.delete(
"api_key_uuid",
) as response:
assert not response.is_closed
@@ -231,14 +231,14 @@ def test_streaming_response_delete(self, client: GradientAI) -> None:
@parametrize
def test_path_params_delete(self, client: GradientAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
- client.providers.anthropic.keys.with_raw_response.delete(
+ client.model_providers.anthropic.keys.with_raw_response.delete(
"",
)
@pytest.mark.skip()
@parametrize
def test_method_list_agents(self, client: GradientAI) -> None:
- key = client.providers.anthropic.keys.list_agents(
+ key = client.model_providers.anthropic.keys.list_agents(
uuid="uuid",
)
assert_matches_type(KeyListAgentsResponse, key, path=["response"])
@@ -246,7 +246,7 @@ def test_method_list_agents(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_method_list_agents_with_all_params(self, client: GradientAI) -> None:
- key = client.providers.anthropic.keys.list_agents(
+ key = client.model_providers.anthropic.keys.list_agents(
uuid="uuid",
page=0,
per_page=0,
@@ -256,7 +256,7 @@ def test_method_list_agents_with_all_params(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_raw_response_list_agents(self, client: GradientAI) -> None:
- response = client.providers.anthropic.keys.with_raw_response.list_agents(
+ response = client.model_providers.anthropic.keys.with_raw_response.list_agents(
uuid="uuid",
)
@@ -268,7 +268,7 @@ def test_raw_response_list_agents(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_streaming_response_list_agents(self, client: GradientAI) -> None:
- with client.providers.anthropic.keys.with_streaming_response.list_agents(
+ with client.model_providers.anthropic.keys.with_streaming_response.list_agents(
uuid="uuid",
) as response:
assert not response.is_closed
@@ -283,7 +283,7 @@ def test_streaming_response_list_agents(self, client: GradientAI) -> None:
@parametrize
def test_path_params_list_agents(self, client: GradientAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
- client.providers.anthropic.keys.with_raw_response.list_agents(
+ client.model_providers.anthropic.keys.with_raw_response.list_agents(
uuid="",
)
@@ -296,13 +296,13 @@ class TestAsyncKeys:
@pytest.mark.skip()
@parametrize
async def test_method_create(self, async_client: AsyncGradientAI) -> None:
- key = await async_client.providers.anthropic.keys.create()
+ key = await async_client.model_providers.anthropic.keys.create()
assert_matches_type(KeyCreateResponse, key, path=["response"])
@pytest.mark.skip()
@parametrize
async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None:
- key = await async_client.providers.anthropic.keys.create(
+ key = await async_client.model_providers.anthropic.keys.create(
api_key="api_key",
name="name",
)
@@ -311,7 +311,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI
@pytest.mark.skip()
@parametrize
async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.providers.anthropic.keys.with_raw_response.create()
+ response = await async_client.model_providers.anthropic.keys.with_raw_response.create()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -321,7 +321,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None:
@pytest.mark.skip()
@parametrize
async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None:
- async with async_client.providers.anthropic.keys.with_streaming_response.create() as response:
+ async with async_client.model_providers.anthropic.keys.with_streaming_response.create() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -333,7 +333,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) ->
@pytest.mark.skip()
@parametrize
async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None:
- key = await async_client.providers.anthropic.keys.retrieve(
+ key = await async_client.model_providers.anthropic.keys.retrieve(
"api_key_uuid",
)
assert_matches_type(KeyRetrieveResponse, key, path=["response"])
@@ -341,7 +341,7 @@ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None:
@pytest.mark.skip()
@parametrize
async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.providers.anthropic.keys.with_raw_response.retrieve(
+ response = await async_client.model_providers.anthropic.keys.with_raw_response.retrieve(
"api_key_uuid",
)
@@ -353,7 +353,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> Non
@pytest.mark.skip()
@parametrize
async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None:
- async with async_client.providers.anthropic.keys.with_streaming_response.retrieve(
+ async with async_client.model_providers.anthropic.keys.with_streaming_response.retrieve(
"api_key_uuid",
) as response:
assert not response.is_closed
@@ -368,14 +368,14 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI)
@parametrize
async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
- await async_client.providers.anthropic.keys.with_raw_response.retrieve(
+ await async_client.model_providers.anthropic.keys.with_raw_response.retrieve(
"",
)
@pytest.mark.skip()
@parametrize
async def test_method_update(self, async_client: AsyncGradientAI) -> None:
- key = await async_client.providers.anthropic.keys.update(
+ key = await async_client.model_providers.anthropic.keys.update(
path_api_key_uuid="api_key_uuid",
)
assert_matches_type(KeyUpdateResponse, key, path=["response"])
@@ -383,7 +383,7 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None:
@pytest.mark.skip()
@parametrize
async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None:
- key = await async_client.providers.anthropic.keys.update(
+ key = await async_client.model_providers.anthropic.keys.update(
path_api_key_uuid="api_key_uuid",
api_key="api_key",
body_api_key_uuid="api_key_uuid",
@@ -394,7 +394,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI
@pytest.mark.skip()
@parametrize
async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.providers.anthropic.keys.with_raw_response.update(
+ response = await async_client.model_providers.anthropic.keys.with_raw_response.update(
path_api_key_uuid="api_key_uuid",
)
@@ -406,7 +406,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None:
@pytest.mark.skip()
@parametrize
async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None:
- async with async_client.providers.anthropic.keys.with_streaming_response.update(
+ async with async_client.model_providers.anthropic.keys.with_streaming_response.update(
path_api_key_uuid="api_key_uuid",
) as response:
assert not response.is_closed
@@ -421,20 +421,20 @@ async def test_streaming_response_update(self, async_client: AsyncGradientAI) ->
@parametrize
async def test_path_params_update(self, async_client: AsyncGradientAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"):
- await async_client.providers.anthropic.keys.with_raw_response.update(
+ await async_client.model_providers.anthropic.keys.with_raw_response.update(
path_api_key_uuid="",
)
@pytest.mark.skip()
@parametrize
async def test_method_list(self, async_client: AsyncGradientAI) -> None:
- key = await async_client.providers.anthropic.keys.list()
+ key = await async_client.model_providers.anthropic.keys.list()
assert_matches_type(KeyListResponse, key, path=["response"])
@pytest.mark.skip()
@parametrize
async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None:
- key = await async_client.providers.anthropic.keys.list(
+ key = await async_client.model_providers.anthropic.keys.list(
page=0,
per_page=0,
)
@@ -443,7 +443,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI)
@pytest.mark.skip()
@parametrize
async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.providers.anthropic.keys.with_raw_response.list()
+ response = await async_client.model_providers.anthropic.keys.with_raw_response.list()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -453,7 +453,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None:
@pytest.mark.skip()
@parametrize
async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None:
- async with async_client.providers.anthropic.keys.with_streaming_response.list() as response:
+ async with async_client.model_providers.anthropic.keys.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -465,7 +465,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N
@pytest.mark.skip()
@parametrize
async def test_method_delete(self, async_client: AsyncGradientAI) -> None:
- key = await async_client.providers.anthropic.keys.delete(
+ key = await async_client.model_providers.anthropic.keys.delete(
"api_key_uuid",
)
assert_matches_type(KeyDeleteResponse, key, path=["response"])
@@ -473,7 +473,7 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None:
@pytest.mark.skip()
@parametrize
async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.providers.anthropic.keys.with_raw_response.delete(
+ response = await async_client.model_providers.anthropic.keys.with_raw_response.delete(
"api_key_uuid",
)
@@ -485,7 +485,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None:
@pytest.mark.skip()
@parametrize
async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None:
- async with async_client.providers.anthropic.keys.with_streaming_response.delete(
+ async with async_client.model_providers.anthropic.keys.with_streaming_response.delete(
"api_key_uuid",
) as response:
assert not response.is_closed
@@ -500,14 +500,14 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) ->
@parametrize
async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
- await async_client.providers.anthropic.keys.with_raw_response.delete(
+ await async_client.model_providers.anthropic.keys.with_raw_response.delete(
"",
)
@pytest.mark.skip()
@parametrize
async def test_method_list_agents(self, async_client: AsyncGradientAI) -> None:
- key = await async_client.providers.anthropic.keys.list_agents(
+ key = await async_client.model_providers.anthropic.keys.list_agents(
uuid="uuid",
)
assert_matches_type(KeyListAgentsResponse, key, path=["response"])
@@ -515,7 +515,7 @@ async def test_method_list_agents(self, async_client: AsyncGradientAI) -> None:
@pytest.mark.skip()
@parametrize
async def test_method_list_agents_with_all_params(self, async_client: AsyncGradientAI) -> None:
- key = await async_client.providers.anthropic.keys.list_agents(
+ key = await async_client.model_providers.anthropic.keys.list_agents(
uuid="uuid",
page=0,
per_page=0,
@@ -525,7 +525,7 @@ async def test_method_list_agents_with_all_params(self, async_client: AsyncGradi
@pytest.mark.skip()
@parametrize
async def test_raw_response_list_agents(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.providers.anthropic.keys.with_raw_response.list_agents(
+ response = await async_client.model_providers.anthropic.keys.with_raw_response.list_agents(
uuid="uuid",
)
@@ -537,7 +537,7 @@ async def test_raw_response_list_agents(self, async_client: AsyncGradientAI) ->
@pytest.mark.skip()
@parametrize
async def test_streaming_response_list_agents(self, async_client: AsyncGradientAI) -> None:
- async with async_client.providers.anthropic.keys.with_streaming_response.list_agents(
+ async with async_client.model_providers.anthropic.keys.with_streaming_response.list_agents(
uuid="uuid",
) as response:
assert not response.is_closed
@@ -552,6 +552,6 @@ async def test_streaming_response_list_agents(self, async_client: AsyncGradientA
@parametrize
async def test_path_params_list_agents(self, async_client: AsyncGradientAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
- await async_client.providers.anthropic.keys.with_raw_response.list_agents(
+ await async_client.model_providers.anthropic.keys.with_raw_response.list_agents(
uuid="",
)
diff --git a/tests/api_resources/providers/openai/__init__.py b/tests/api_resources/model_providers/openai/__init__.py
similarity index 100%
rename from tests/api_resources/providers/openai/__init__.py
rename to tests/api_resources/model_providers/openai/__init__.py
diff --git a/tests/api_resources/providers/openai/test_keys.py b/tests/api_resources/model_providers/openai/test_keys.py
similarity index 81%
rename from tests/api_resources/providers/openai/test_keys.py
rename to tests/api_resources/model_providers/openai/test_keys.py
index 714dc4bd..b398f5cc 100644
--- a/tests/api_resources/providers/openai/test_keys.py
+++ b/tests/api_resources/model_providers/openai/test_keys.py
@@ -9,7 +9,7 @@
from tests.utils import assert_matches_type
from do_gradientai import GradientAI, AsyncGradientAI
-from do_gradientai.types.providers.openai import (
+from do_gradientai.types.model_providers.openai import (
KeyListResponse,
KeyCreateResponse,
KeyDeleteResponse,
@@ -27,13 +27,13 @@ class TestKeys:
@pytest.mark.skip()
@parametrize
def test_method_create(self, client: GradientAI) -> None:
- key = client.providers.openai.keys.create()
+ key = client.model_providers.openai.keys.create()
assert_matches_type(KeyCreateResponse, key, path=["response"])
@pytest.mark.skip()
@parametrize
def test_method_create_with_all_params(self, client: GradientAI) -> None:
- key = client.providers.openai.keys.create(
+ key = client.model_providers.openai.keys.create(
api_key="api_key",
name="name",
)
@@ -42,7 +42,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_raw_response_create(self, client: GradientAI) -> None:
- response = client.providers.openai.keys.with_raw_response.create()
+ response = client.model_providers.openai.keys.with_raw_response.create()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -52,7 +52,7 @@ def test_raw_response_create(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_streaming_response_create(self, client: GradientAI) -> None:
- with client.providers.openai.keys.with_streaming_response.create() as response:
+ with client.model_providers.openai.keys.with_streaming_response.create() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -64,7 +64,7 @@ def test_streaming_response_create(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_method_retrieve(self, client: GradientAI) -> None:
- key = client.providers.openai.keys.retrieve(
+ key = client.model_providers.openai.keys.retrieve(
"api_key_uuid",
)
assert_matches_type(KeyRetrieveResponse, key, path=["response"])
@@ -72,7 +72,7 @@ def test_method_retrieve(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_raw_response_retrieve(self, client: GradientAI) -> None:
- response = client.providers.openai.keys.with_raw_response.retrieve(
+ response = client.model_providers.openai.keys.with_raw_response.retrieve(
"api_key_uuid",
)
@@ -84,7 +84,7 @@ def test_raw_response_retrieve(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_streaming_response_retrieve(self, client: GradientAI) -> None:
- with client.providers.openai.keys.with_streaming_response.retrieve(
+ with client.model_providers.openai.keys.with_streaming_response.retrieve(
"api_key_uuid",
) as response:
assert not response.is_closed
@@ -99,14 +99,14 @@ def test_streaming_response_retrieve(self, client: GradientAI) -> None:
@parametrize
def test_path_params_retrieve(self, client: GradientAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
- client.providers.openai.keys.with_raw_response.retrieve(
+ client.model_providers.openai.keys.with_raw_response.retrieve(
"",
)
@pytest.mark.skip()
@parametrize
def test_method_update(self, client: GradientAI) -> None:
- key = client.providers.openai.keys.update(
+ key = client.model_providers.openai.keys.update(
path_api_key_uuid="api_key_uuid",
)
assert_matches_type(KeyUpdateResponse, key, path=["response"])
@@ -114,7 +114,7 @@ def test_method_update(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_method_update_with_all_params(self, client: GradientAI) -> None:
- key = client.providers.openai.keys.update(
+ key = client.model_providers.openai.keys.update(
path_api_key_uuid="api_key_uuid",
api_key="api_key",
body_api_key_uuid="api_key_uuid",
@@ -125,7 +125,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_raw_response_update(self, client: GradientAI) -> None:
- response = client.providers.openai.keys.with_raw_response.update(
+ response = client.model_providers.openai.keys.with_raw_response.update(
path_api_key_uuid="api_key_uuid",
)
@@ -137,7 +137,7 @@ def test_raw_response_update(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_streaming_response_update(self, client: GradientAI) -> None:
- with client.providers.openai.keys.with_streaming_response.update(
+ with client.model_providers.openai.keys.with_streaming_response.update(
path_api_key_uuid="api_key_uuid",
) as response:
assert not response.is_closed
@@ -152,20 +152,20 @@ def test_streaming_response_update(self, client: GradientAI) -> None:
@parametrize
def test_path_params_update(self, client: GradientAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"):
- client.providers.openai.keys.with_raw_response.update(
+ client.model_providers.openai.keys.with_raw_response.update(
path_api_key_uuid="",
)
@pytest.mark.skip()
@parametrize
def test_method_list(self, client: GradientAI) -> None:
- key = client.providers.openai.keys.list()
+ key = client.model_providers.openai.keys.list()
assert_matches_type(KeyListResponse, key, path=["response"])
@pytest.mark.skip()
@parametrize
def test_method_list_with_all_params(self, client: GradientAI) -> None:
- key = client.providers.openai.keys.list(
+ key = client.model_providers.openai.keys.list(
page=0,
per_page=0,
)
@@ -174,7 +174,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_raw_response_list(self, client: GradientAI) -> None:
- response = client.providers.openai.keys.with_raw_response.list()
+ response = client.model_providers.openai.keys.with_raw_response.list()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -184,7 +184,7 @@ def test_raw_response_list(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_streaming_response_list(self, client: GradientAI) -> None:
- with client.providers.openai.keys.with_streaming_response.list() as response:
+ with client.model_providers.openai.keys.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -196,7 +196,7 @@ def test_streaming_response_list(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_method_delete(self, client: GradientAI) -> None:
- key = client.providers.openai.keys.delete(
+ key = client.model_providers.openai.keys.delete(
"api_key_uuid",
)
assert_matches_type(KeyDeleteResponse, key, path=["response"])
@@ -204,7 +204,7 @@ def test_method_delete(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_raw_response_delete(self, client: GradientAI) -> None:
- response = client.providers.openai.keys.with_raw_response.delete(
+ response = client.model_providers.openai.keys.with_raw_response.delete(
"api_key_uuid",
)
@@ -216,7 +216,7 @@ def test_raw_response_delete(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_streaming_response_delete(self, client: GradientAI) -> None:
- with client.providers.openai.keys.with_streaming_response.delete(
+ with client.model_providers.openai.keys.with_streaming_response.delete(
"api_key_uuid",
) as response:
assert not response.is_closed
@@ -231,14 +231,14 @@ def test_streaming_response_delete(self, client: GradientAI) -> None:
@parametrize
def test_path_params_delete(self, client: GradientAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
- client.providers.openai.keys.with_raw_response.delete(
+ client.model_providers.openai.keys.with_raw_response.delete(
"",
)
@pytest.mark.skip()
@parametrize
def test_method_retrieve_agents(self, client: GradientAI) -> None:
- key = client.providers.openai.keys.retrieve_agents(
+ key = client.model_providers.openai.keys.retrieve_agents(
uuid="uuid",
)
assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"])
@@ -246,7 +246,7 @@ def test_method_retrieve_agents(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_method_retrieve_agents_with_all_params(self, client: GradientAI) -> None:
- key = client.providers.openai.keys.retrieve_agents(
+ key = client.model_providers.openai.keys.retrieve_agents(
uuid="uuid",
page=0,
per_page=0,
@@ -256,7 +256,7 @@ def test_method_retrieve_agents_with_all_params(self, client: GradientAI) -> Non
@pytest.mark.skip()
@parametrize
def test_raw_response_retrieve_agents(self, client: GradientAI) -> None:
- response = client.providers.openai.keys.with_raw_response.retrieve_agents(
+ response = client.model_providers.openai.keys.with_raw_response.retrieve_agents(
uuid="uuid",
)
@@ -268,7 +268,7 @@ def test_raw_response_retrieve_agents(self, client: GradientAI) -> None:
@pytest.mark.skip()
@parametrize
def test_streaming_response_retrieve_agents(self, client: GradientAI) -> None:
- with client.providers.openai.keys.with_streaming_response.retrieve_agents(
+ with client.model_providers.openai.keys.with_streaming_response.retrieve_agents(
uuid="uuid",
) as response:
assert not response.is_closed
@@ -283,7 +283,7 @@ def test_streaming_response_retrieve_agents(self, client: GradientAI) -> None:
@parametrize
def test_path_params_retrieve_agents(self, client: GradientAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
- client.providers.openai.keys.with_raw_response.retrieve_agents(
+ client.model_providers.openai.keys.with_raw_response.retrieve_agents(
uuid="",
)
@@ -296,13 +296,13 @@ class TestAsyncKeys:
@pytest.mark.skip()
@parametrize
async def test_method_create(self, async_client: AsyncGradientAI) -> None:
- key = await async_client.providers.openai.keys.create()
+ key = await async_client.model_providers.openai.keys.create()
assert_matches_type(KeyCreateResponse, key, path=["response"])
@pytest.mark.skip()
@parametrize
async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None:
- key = await async_client.providers.openai.keys.create(
+ key = await async_client.model_providers.openai.keys.create(
api_key="api_key",
name="name",
)
@@ -311,7 +311,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI
@pytest.mark.skip()
@parametrize
async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.providers.openai.keys.with_raw_response.create()
+ response = await async_client.model_providers.openai.keys.with_raw_response.create()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -321,7 +321,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None:
@pytest.mark.skip()
@parametrize
async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None:
- async with async_client.providers.openai.keys.with_streaming_response.create() as response:
+ async with async_client.model_providers.openai.keys.with_streaming_response.create() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -333,7 +333,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) ->
@pytest.mark.skip()
@parametrize
async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None:
- key = await async_client.providers.openai.keys.retrieve(
+ key = await async_client.model_providers.openai.keys.retrieve(
"api_key_uuid",
)
assert_matches_type(KeyRetrieveResponse, key, path=["response"])
@@ -341,7 +341,7 @@ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None:
@pytest.mark.skip()
@parametrize
async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.providers.openai.keys.with_raw_response.retrieve(
+ response = await async_client.model_providers.openai.keys.with_raw_response.retrieve(
"api_key_uuid",
)
@@ -353,7 +353,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> Non
@pytest.mark.skip()
@parametrize
async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None:
- async with async_client.providers.openai.keys.with_streaming_response.retrieve(
+ async with async_client.model_providers.openai.keys.with_streaming_response.retrieve(
"api_key_uuid",
) as response:
assert not response.is_closed
@@ -368,14 +368,14 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI)
@parametrize
async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
- await async_client.providers.openai.keys.with_raw_response.retrieve(
+ await async_client.model_providers.openai.keys.with_raw_response.retrieve(
"",
)
@pytest.mark.skip()
@parametrize
async def test_method_update(self, async_client: AsyncGradientAI) -> None:
- key = await async_client.providers.openai.keys.update(
+ key = await async_client.model_providers.openai.keys.update(
path_api_key_uuid="api_key_uuid",
)
assert_matches_type(KeyUpdateResponse, key, path=["response"])
@@ -383,7 +383,7 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None:
@pytest.mark.skip()
@parametrize
async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None:
- key = await async_client.providers.openai.keys.update(
+ key = await async_client.model_providers.openai.keys.update(
path_api_key_uuid="api_key_uuid",
api_key="api_key",
body_api_key_uuid="api_key_uuid",
@@ -394,7 +394,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI
@pytest.mark.skip()
@parametrize
async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.providers.openai.keys.with_raw_response.update(
+ response = await async_client.model_providers.openai.keys.with_raw_response.update(
path_api_key_uuid="api_key_uuid",
)
@@ -406,7 +406,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None:
@pytest.mark.skip()
@parametrize
async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None:
- async with async_client.providers.openai.keys.with_streaming_response.update(
+ async with async_client.model_providers.openai.keys.with_streaming_response.update(
path_api_key_uuid="api_key_uuid",
) as response:
assert not response.is_closed
@@ -421,20 +421,20 @@ async def test_streaming_response_update(self, async_client: AsyncGradientAI) ->
@parametrize
async def test_path_params_update(self, async_client: AsyncGradientAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"):
- await async_client.providers.openai.keys.with_raw_response.update(
+ await async_client.model_providers.openai.keys.with_raw_response.update(
path_api_key_uuid="",
)
@pytest.mark.skip()
@parametrize
async def test_method_list(self, async_client: AsyncGradientAI) -> None:
- key = await async_client.providers.openai.keys.list()
+ key = await async_client.model_providers.openai.keys.list()
assert_matches_type(KeyListResponse, key, path=["response"])
@pytest.mark.skip()
@parametrize
async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None:
- key = await async_client.providers.openai.keys.list(
+ key = await async_client.model_providers.openai.keys.list(
page=0,
per_page=0,
)
@@ -443,7 +443,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI)
@pytest.mark.skip()
@parametrize
async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.providers.openai.keys.with_raw_response.list()
+ response = await async_client.model_providers.openai.keys.with_raw_response.list()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -453,7 +453,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None:
@pytest.mark.skip()
@parametrize
async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None:
- async with async_client.providers.openai.keys.with_streaming_response.list() as response:
+ async with async_client.model_providers.openai.keys.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -465,7 +465,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N
@pytest.mark.skip()
@parametrize
async def test_method_delete(self, async_client: AsyncGradientAI) -> None:
- key = await async_client.providers.openai.keys.delete(
+ key = await async_client.model_providers.openai.keys.delete(
"api_key_uuid",
)
assert_matches_type(KeyDeleteResponse, key, path=["response"])
@@ -473,7 +473,7 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None:
@pytest.mark.skip()
@parametrize
async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.providers.openai.keys.with_raw_response.delete(
+ response = await async_client.model_providers.openai.keys.with_raw_response.delete(
"api_key_uuid",
)
@@ -485,7 +485,7 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None:
@pytest.mark.skip()
@parametrize
async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None:
- async with async_client.providers.openai.keys.with_streaming_response.delete(
+ async with async_client.model_providers.openai.keys.with_streaming_response.delete(
"api_key_uuid",
) as response:
assert not response.is_closed
@@ -500,14 +500,14 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) ->
@parametrize
async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"):
- await async_client.providers.openai.keys.with_raw_response.delete(
+ await async_client.model_providers.openai.keys.with_raw_response.delete(
"",
)
@pytest.mark.skip()
@parametrize
async def test_method_retrieve_agents(self, async_client: AsyncGradientAI) -> None:
- key = await async_client.providers.openai.keys.retrieve_agents(
+ key = await async_client.model_providers.openai.keys.retrieve_agents(
uuid="uuid",
)
assert_matches_type(KeyRetrieveAgentsResponse, key, path=["response"])
@@ -515,7 +515,7 @@ async def test_method_retrieve_agents(self, async_client: AsyncGradientAI) -> No
@pytest.mark.skip()
@parametrize
async def test_method_retrieve_agents_with_all_params(self, async_client: AsyncGradientAI) -> None:
- key = await async_client.providers.openai.keys.retrieve_agents(
+ key = await async_client.model_providers.openai.keys.retrieve_agents(
uuid="uuid",
page=0,
per_page=0,
@@ -525,7 +525,7 @@ async def test_method_retrieve_agents_with_all_params(self, async_client: AsyncG
@pytest.mark.skip()
@parametrize
async def test_raw_response_retrieve_agents(self, async_client: AsyncGradientAI) -> None:
- response = await async_client.providers.openai.keys.with_raw_response.retrieve_agents(
+ response = await async_client.model_providers.openai.keys.with_raw_response.retrieve_agents(
uuid="uuid",
)
@@ -537,7 +537,7 @@ async def test_raw_response_retrieve_agents(self, async_client: AsyncGradientAI)
@pytest.mark.skip()
@parametrize
async def test_streaming_response_retrieve_agents(self, async_client: AsyncGradientAI) -> None:
- async with async_client.providers.openai.keys.with_streaming_response.retrieve_agents(
+ async with async_client.model_providers.openai.keys.with_streaming_response.retrieve_agents(
uuid="uuid",
) as response:
assert not response.is_closed
@@ -552,6 +552,6 @@ async def test_streaming_response_retrieve_agents(self, async_client: AsyncGradi
@parametrize
async def test_path_params_retrieve_agents(self, async_client: AsyncGradientAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
- await async_client.providers.openai.keys.with_raw_response.retrieve_agents(
+ await async_client.model_providers.openai.keys.with_raw_response.retrieve_agents(
uuid="",
)
From 299fd1b29b42f6f2581150e52dcf65fc73270862 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 25 Jun 2025 06:44:20 +0000
Subject: [PATCH 11/21] feat(api): update via SDK Studio
---
.stats.yml | 2 +-
README.md | 49 +++++++++++++++++++++++++++++++++++++------------
2 files changed, 38 insertions(+), 13 deletions(-)
diff --git a/.stats.yml b/.stats.yml
index d58c3c34..4fb13307 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 67
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml
openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667
-config_hash: a5bfbbd032355b26ddd41d659c93495b
+config_hash: fc55dd4870b7f5b1f319fffe9a0c5b74
diff --git a/README.md b/README.md
index e7715d74..7e25c206 100644
--- a/README.md
+++ b/README.md
@@ -31,10 +31,16 @@ client = GradientAI(
api_key=os.environ.get("GRADIENTAI_API_KEY"), # This is the default and can be omitted
)
-versions = client.agents.versions.list(
- uuid="REPLACE_ME",
+completion = client.chat.completions.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
)
-print(versions.agent_versions)
+print(completion.id)
```
While you can provide an `api_key` keyword argument,
@@ -57,10 +63,16 @@ client = AsyncGradientAI(
async def main() -> None:
- versions = await client.agents.versions.list(
- uuid="REPLACE_ME",
+ completion = await client.chat.completions.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
)
- print(versions.agent_versions)
+ print(completion.id)
asyncio.run(main())
@@ -93,10 +105,16 @@ async def main() -> None:
api_key=os.environ.get("GRADIENTAI_API_KEY"), # This is the default and can be omitted
http_client=DefaultAioHttpClient(),
) as client:
- versions = await client.agents.versions.list(
- uuid="REPLACE_ME",
+ completion = await client.chat.completions.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
)
- print(versions.agent_versions)
+ print(completion.id)
asyncio.run(main())
@@ -120,10 +138,17 @@ from do_gradientai import GradientAI
client = GradientAI()
-evaluation_test_case = client.agents.evaluation_test_cases.create(
- star_metric={},
+completion = client.chat.completions.create(
+ messages=[
+ {
+ "content": "string",
+ "role": "system",
+ }
+ ],
+ model="llama3-8b-instruct",
+ stream_options={},
)
-print(evaluation_test_case.star_metric)
+print(completion.stream_options)
```
## Handling errors
From 98424f4a2c7e00138fb5eecf94ca72e2ffcc1212 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 25 Jun 2025 07:35:42 +0000
Subject: [PATCH 12/21] feat(api): update via SDK Studio
---
.stats.yml | 2 +-
src/do_gradientai/_client.py | 65 +++++++--
tests/conftest.py | 11 +-
tests/test_client.py | 269 ++++++++++++++++++++++++++++-------
4 files changed, 280 insertions(+), 67 deletions(-)
diff --git a/.stats.yml b/.stats.yml
index 4fb13307..1246506f 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 67
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml
openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667
-config_hash: fc55dd4870b7f5b1f319fffe9a0c5b74
+config_hash: dd2b5f3f77ea08c6062115a56c3367ee
diff --git a/src/do_gradientai/_client.py b/src/do_gradientai/_client.py
index a57125ee..4dc1c952 100644
--- a/src/do_gradientai/_client.py
+++ b/src/do_gradientai/_client.py
@@ -13,6 +13,7 @@
from ._types import (
NOT_GIVEN,
Omit,
+ Headers,
Timeout,
NotGiven,
Transport,
@@ -23,7 +24,7 @@
from ._compat import cached_property
from ._version import __version__
from ._streaming import Stream as Stream, AsyncStream as AsyncStream
-from ._exceptions import APIStatusError, GradientAIError
+from ._exceptions import APIStatusError
from ._base_client import (
DEFAULT_MAX_RETRIES,
SyncAPIClient,
@@ -54,12 +55,14 @@
class GradientAI(SyncAPIClient):
# client options
- api_key: str
+ api_key: str | None
+ inference_key: str | None
def __init__(
self,
*,
api_key: str | None = None,
+ inference_key: str | None = None,
base_url: str | httpx.URL | None = None,
timeout: Union[float, Timeout, None, NotGiven] = NOT_GIVEN,
max_retries: int = DEFAULT_MAX_RETRIES,
@@ -81,16 +84,18 @@ def __init__(
) -> None:
"""Construct a new synchronous GradientAI client instance.
- This automatically infers the `api_key` argument from the `GRADIENTAI_API_KEY` environment variable if it is not provided.
+ This automatically infers the following arguments from their corresponding environment variables if they are not provided:
+ - `api_key` from `GRADIENTAI_API_KEY`
+ - `inference_key` from `GRADIENTAI_API_KEY`
"""
if api_key is None:
api_key = os.environ.get("GRADIENTAI_API_KEY")
- if api_key is None:
- raise GradientAIError(
- "The api_key client option must be set either by passing api_key to the client or by setting the GRADIENTAI_API_KEY environment variable"
- )
self.api_key = api_key
+ if inference_key is None:
+ inference_key = os.environ.get("GRADIENTAI_API_KEY")
+ self.inference_key = inference_key
+
if base_url is None:
base_url = os.environ.get("GRADIENT_AI_BASE_URL")
self._base_url_overridden = base_url is not None
@@ -167,6 +172,8 @@ def qs(self) -> Querystring:
@override
def auth_headers(self) -> dict[str, str]:
api_key = self.api_key
+ if api_key is None:
+ return {}
return {"Authorization": f"Bearer {api_key}"}
@property
@@ -178,10 +185,22 @@ def default_headers(self) -> dict[str, str | Omit]:
**self._custom_headers,
}
+ @override
+ def _validate_headers(self, headers: Headers, custom_headers: Headers) -> None:
+ if self.api_key and headers.get("Authorization"):
+ return
+ if isinstance(custom_headers.get("Authorization"), Omit):
+ return
+
+ raise TypeError(
+ '"Could not resolve authentication method. Expected the api_key to be set. Or for the `Authorization` headers to be explicitly omitted"'
+ )
+
def copy(
self,
*,
api_key: str | None = None,
+ inference_key: str | None = None,
base_url: str | httpx.URL | None = None,
timeout: float | Timeout | None | NotGiven = NOT_GIVEN,
http_client: httpx.Client | None = None,
@@ -216,6 +235,7 @@ def copy(
http_client = http_client or self._client
client = self.__class__(
api_key=api_key or self.api_key,
+ inference_key=inference_key or self.inference_key,
base_url=base_url or self.base_url,
timeout=self.timeout if isinstance(timeout, NotGiven) else timeout,
http_client=http_client,
@@ -267,12 +287,14 @@ def _make_status_error(
class AsyncGradientAI(AsyncAPIClient):
# client options
- api_key: str
+ api_key: str | None
+ inference_key: str | None
def __init__(
self,
*,
api_key: str | None = None,
+ inference_key: str | None = None,
base_url: str | httpx.URL | None = None,
timeout: Union[float, Timeout, None, NotGiven] = NOT_GIVEN,
max_retries: int = DEFAULT_MAX_RETRIES,
@@ -294,16 +316,18 @@ def __init__(
) -> None:
"""Construct a new async AsyncGradientAI client instance.
- This automatically infers the `api_key` argument from the `GRADIENTAI_API_KEY` environment variable if it is not provided.
+ This automatically infers the following arguments from their corresponding environment variables if they are not provided:
+ - `api_key` from `GRADIENTAI_API_KEY`
+ - `inference_key` from `GRADIENTAI_API_KEY`
"""
if api_key is None:
api_key = os.environ.get("GRADIENTAI_API_KEY")
- if api_key is None:
- raise GradientAIError(
- "The api_key client option must be set either by passing api_key to the client or by setting the GRADIENTAI_API_KEY environment variable"
- )
self.api_key = api_key
+ if inference_key is None:
+ inference_key = os.environ.get("GRADIENTAI_API_KEY")
+ self.inference_key = inference_key
+
if base_url is None:
base_url = os.environ.get("GRADIENT_AI_BASE_URL")
self._base_url_overridden = base_url is not None
@@ -380,6 +404,8 @@ def qs(self) -> Querystring:
@override
def auth_headers(self) -> dict[str, str]:
api_key = self.api_key
+ if api_key is None:
+ return {}
return {"Authorization": f"Bearer {api_key}"}
@property
@@ -391,10 +417,22 @@ def default_headers(self) -> dict[str, str | Omit]:
**self._custom_headers,
}
+ @override
+ def _validate_headers(self, headers: Headers, custom_headers: Headers) -> None:
+ if self.api_key and headers.get("Authorization"):
+ return
+ if isinstance(custom_headers.get("Authorization"), Omit):
+ return
+
+ raise TypeError(
+ '"Could not resolve authentication method. Expected the api_key to be set. Or for the `Authorization` headers to be explicitly omitted"'
+ )
+
def copy(
self,
*,
api_key: str | None = None,
+ inference_key: str | None = None,
base_url: str | httpx.URL | None = None,
timeout: float | Timeout | None | NotGiven = NOT_GIVEN,
http_client: httpx.AsyncClient | None = None,
@@ -429,6 +467,7 @@ def copy(
http_client = http_client or self._client
client = self.__class__(
api_key=api_key or self.api_key,
+ inference_key=inference_key or self.inference_key,
base_url=base_url or self.base_url,
timeout=self.timeout if isinstance(timeout, NotGiven) else timeout,
http_client=http_client,
diff --git a/tests/conftest.py b/tests/conftest.py
index daa5b955..6048de1a 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -46,6 +46,7 @@ def pytest_collection_modifyitems(items: list[pytest.Function]) -> None:
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
api_key = "My API Key"
+inference_key = "My Inference Key"
@pytest.fixture(scope="session")
@@ -54,7 +55,9 @@ def client(request: FixtureRequest) -> Iterator[GradientAI]:
if not isinstance(strict, bool):
raise TypeError(f"Unexpected fixture parameter type {type(strict)}, expected {bool}")
- with GradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=strict) as client:
+ with GradientAI(
+ base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=strict
+ ) as client:
yield client
@@ -79,6 +82,10 @@ async def async_client(request: FixtureRequest) -> AsyncIterator[AsyncGradientAI
raise TypeError(f"Unexpected fixture parameter type {type(param)}, expected bool or dict")
async with AsyncGradientAI(
- base_url=base_url, api_key=api_key, _strict_response_validation=strict, http_client=http_client
+ base_url=base_url,
+ api_key=api_key,
+ inference_key=inference_key,
+ _strict_response_validation=strict,
+ http_client=http_client,
) as client:
yield client
diff --git a/tests/test_client.py b/tests/test_client.py
index 920275ae..44dbc938 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -24,7 +24,7 @@
from do_gradientai import GradientAI, AsyncGradientAI, APIResponseValidationError
from do_gradientai._types import Omit
from do_gradientai._models import BaseModel, FinalRequestOptions
-from do_gradientai._exceptions import APIStatusError, APITimeoutError, GradientAIError, APIResponseValidationError
+from do_gradientai._exceptions import APIStatusError, APITimeoutError, APIResponseValidationError
from do_gradientai._base_client import (
DEFAULT_TIMEOUT,
HTTPX_DEFAULT_TIMEOUT,
@@ -38,6 +38,7 @@
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
api_key = "My API Key"
+inference_key = "My Inference Key"
def _get_params(client: BaseClient[Any, Any]) -> dict[str, str]:
@@ -59,7 +60,9 @@ def _get_open_connections(client: GradientAI | AsyncGradientAI) -> int:
class TestGradientAI:
- client = GradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=True)
+ client = GradientAI(
+ base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=True
+ )
@pytest.mark.respx(base_url=base_url)
def test_raw_response(self, respx_mock: MockRouter) -> None:
@@ -89,6 +92,10 @@ def test_copy(self) -> None:
assert copied.api_key == "another My API Key"
assert self.client.api_key == "My API Key"
+ copied = self.client.copy(inference_key="another My Inference Key")
+ assert copied.inference_key == "another My Inference Key"
+ assert self.client.inference_key == "My Inference Key"
+
def test_copy_default_options(self) -> None:
# options that have a default are overridden correctly
copied = self.client.copy(max_retries=7)
@@ -107,7 +114,11 @@ def test_copy_default_options(self) -> None:
def test_copy_default_headers(self) -> None:
client = GradientAI(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"}
+ base_url=base_url,
+ api_key=api_key,
+ inference_key=inference_key,
+ _strict_response_validation=True,
+ default_headers={"X-Foo": "bar"},
)
assert client.default_headers["X-Foo"] == "bar"
@@ -141,7 +152,11 @@ def test_copy_default_headers(self) -> None:
def test_copy_default_query(self) -> None:
client = GradientAI(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, default_query={"foo": "bar"}
+ base_url=base_url,
+ api_key=api_key,
+ inference_key=inference_key,
+ _strict_response_validation=True,
+ default_query={"foo": "bar"},
)
assert _get_params(client)["foo"] == "bar"
@@ -267,7 +282,11 @@ def test_request_timeout(self) -> None:
def test_client_timeout_option(self) -> None:
client = GradientAI(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, timeout=httpx.Timeout(0)
+ base_url=base_url,
+ api_key=api_key,
+ inference_key=inference_key,
+ _strict_response_validation=True,
+ timeout=httpx.Timeout(0),
)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
@@ -278,7 +297,11 @@ def test_http_client_timeout_option(self) -> None:
# custom timeout given to the httpx client should be used
with httpx.Client(timeout=None) as http_client:
client = GradientAI(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client
+ base_url=base_url,
+ api_key=api_key,
+ inference_key=inference_key,
+ _strict_response_validation=True,
+ http_client=http_client,
)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
@@ -288,7 +311,11 @@ def test_http_client_timeout_option(self) -> None:
# no timeout given to the httpx client should not use the httpx default
with httpx.Client() as http_client:
client = GradientAI(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client
+ base_url=base_url,
+ api_key=api_key,
+ inference_key=inference_key,
+ _strict_response_validation=True,
+ http_client=http_client,
)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
@@ -298,7 +325,11 @@ def test_http_client_timeout_option(self) -> None:
# explicitly passing the default timeout currently results in it being ignored
with httpx.Client(timeout=HTTPX_DEFAULT_TIMEOUT) as http_client:
client = GradientAI(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client
+ base_url=base_url,
+ api_key=api_key,
+ inference_key=inference_key,
+ _strict_response_validation=True,
+ http_client=http_client,
)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
@@ -311,13 +342,18 @@ async def test_invalid_http_client(self) -> None:
GradientAI(
base_url=base_url,
api_key=api_key,
+ inference_key=inference_key,
_strict_response_validation=True,
http_client=cast(Any, http_client),
)
def test_default_headers_option(self) -> None:
client = GradientAI(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"}
+ base_url=base_url,
+ api_key=api_key,
+ inference_key=inference_key,
+ _strict_response_validation=True,
+ default_headers={"X-Foo": "bar"},
)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
assert request.headers.get("x-foo") == "bar"
@@ -326,6 +362,7 @@ def test_default_headers_option(self) -> None:
client2 = GradientAI(
base_url=base_url,
api_key=api_key,
+ inference_key=inference_key,
_strict_response_validation=True,
default_headers={
"X-Foo": "stainless",
@@ -337,18 +374,35 @@ def test_default_headers_option(self) -> None:
assert request.headers.get("x-stainless-lang") == "my-overriding-header"
def test_validate_headers(self) -> None:
- client = GradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=True)
+ client = GradientAI(
+ base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=True
+ )
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
assert request.headers.get("Authorization") == f"Bearer {api_key}"
- with pytest.raises(GradientAIError):
- with update_env(**{"GRADIENTAI_API_KEY": Omit()}):
- client2 = GradientAI(base_url=base_url, api_key=None, _strict_response_validation=True)
- _ = client2
+ with update_env(**{"GRADIENTAI_API_KEY": Omit()}):
+ client2 = GradientAI(
+ base_url=base_url, api_key=None, inference_key=inference_key, _strict_response_validation=True
+ )
+
+ with pytest.raises(
+ TypeError,
+ match="Could not resolve authentication method. Expected the api_key to be set. Or for the `Authorization` headers to be explicitly omitted",
+ ):
+ client2._build_request(FinalRequestOptions(method="get", url="/foo"))
+
+ request2 = client2._build_request(
+ FinalRequestOptions(method="get", url="/foo", headers={"Authorization": Omit()})
+ )
+ assert request2.headers.get("Authorization") is None
def test_default_query_option(self) -> None:
client = GradientAI(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, default_query={"query_param": "bar"}
+ base_url=base_url,
+ api_key=api_key,
+ inference_key=inference_key,
+ _strict_response_validation=True,
+ default_query={"query_param": "bar"},
)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
url = httpx.URL(request.url)
@@ -548,7 +602,12 @@ class Model(BaseModel):
assert response.foo == 2
def test_base_url_setter(self) -> None:
- client = GradientAI(base_url="https://example.com/from_init", api_key=api_key, _strict_response_validation=True)
+ client = GradientAI(
+ base_url="https://example.com/from_init",
+ api_key=api_key,
+ inference_key=inference_key,
+ _strict_response_validation=True,
+ )
assert client.base_url == "https://example.com/from_init/"
client.base_url = "https://example.com/from_setter" # type: ignore[assignment]
@@ -557,18 +616,22 @@ def test_base_url_setter(self) -> None:
def test_base_url_env(self) -> None:
with update_env(GRADIENT_AI_BASE_URL="http://localhost:5000/from/env"):
- client = GradientAI(api_key=api_key, _strict_response_validation=True)
+ client = GradientAI(api_key=api_key, inference_key=inference_key, _strict_response_validation=True)
assert client.base_url == "http://localhost:5000/from/env/"
@pytest.mark.parametrize(
"client",
[
GradientAI(
- base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True
+ base_url="http://localhost:5000/custom/path/",
+ api_key=api_key,
+ inference_key=inference_key,
+ _strict_response_validation=True,
),
GradientAI(
base_url="http://localhost:5000/custom/path/",
api_key=api_key,
+ inference_key=inference_key,
_strict_response_validation=True,
http_client=httpx.Client(),
),
@@ -589,11 +652,15 @@ def test_base_url_trailing_slash(self, client: GradientAI) -> None:
"client",
[
GradientAI(
- base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True
+ base_url="http://localhost:5000/custom/path/",
+ api_key=api_key,
+ inference_key=inference_key,
+ _strict_response_validation=True,
),
GradientAI(
base_url="http://localhost:5000/custom/path/",
api_key=api_key,
+ inference_key=inference_key,
_strict_response_validation=True,
http_client=httpx.Client(),
),
@@ -614,11 +681,15 @@ def test_base_url_no_trailing_slash(self, client: GradientAI) -> None:
"client",
[
GradientAI(
- base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True
+ base_url="http://localhost:5000/custom/path/",
+ api_key=api_key,
+ inference_key=inference_key,
+ _strict_response_validation=True,
),
GradientAI(
base_url="http://localhost:5000/custom/path/",
api_key=api_key,
+ inference_key=inference_key,
_strict_response_validation=True,
http_client=httpx.Client(),
),
@@ -636,7 +707,9 @@ def test_absolute_request_url(self, client: GradientAI) -> None:
assert request.url == "https://myapi.com/foo"
def test_copied_client_does_not_close_http(self) -> None:
- client = GradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=True)
+ client = GradientAI(
+ base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=True
+ )
assert not client.is_closed()
copied = client.copy()
@@ -647,7 +720,9 @@ def test_copied_client_does_not_close_http(self) -> None:
assert not client.is_closed()
def test_client_context_manager(self) -> None:
- client = GradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=True)
+ client = GradientAI(
+ base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=True
+ )
with client as c2:
assert c2 is client
assert not c2.is_closed()
@@ -669,7 +744,11 @@ class Model(BaseModel):
def test_client_max_retries_validation(self) -> None:
with pytest.raises(TypeError, match=r"max_retries cannot be None"):
GradientAI(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, max_retries=cast(Any, None)
+ base_url=base_url,
+ api_key=api_key,
+ inference_key=inference_key,
+ _strict_response_validation=True,
+ max_retries=cast(Any, None),
)
@pytest.mark.respx(base_url=base_url)
@@ -679,12 +758,16 @@ class Model(BaseModel):
respx_mock.get("/foo").mock(return_value=httpx.Response(200, text="my-custom-format"))
- strict_client = GradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=True)
+ strict_client = GradientAI(
+ base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=True
+ )
with pytest.raises(APIResponseValidationError):
strict_client.get("/foo", cast_to=Model)
- client = GradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=False)
+ client = GradientAI(
+ base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=False
+ )
response = client.get("/foo", cast_to=Model)
assert isinstance(response, str) # type: ignore[unreachable]
@@ -712,7 +795,9 @@ class Model(BaseModel):
)
@mock.patch("time.time", mock.MagicMock(return_value=1696004797))
def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str, timeout: float) -> None:
- client = GradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=True)
+ client = GradientAI(
+ base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=True
+ )
headers = httpx.Headers({"retry-after": retry_after})
options = FinalRequestOptions(method="get", url="/foo", max_retries=3)
@@ -870,7 +955,9 @@ def test_follow_redirects_disabled(self, respx_mock: MockRouter) -> None:
class TestAsyncGradientAI:
- client = AsyncGradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=True)
+ client = AsyncGradientAI(
+ base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=True
+ )
@pytest.mark.respx(base_url=base_url)
@pytest.mark.asyncio
@@ -902,6 +989,10 @@ def test_copy(self) -> None:
assert copied.api_key == "another My API Key"
assert self.client.api_key == "My API Key"
+ copied = self.client.copy(inference_key="another My Inference Key")
+ assert copied.inference_key == "another My Inference Key"
+ assert self.client.inference_key == "My Inference Key"
+
def test_copy_default_options(self) -> None:
# options that have a default are overridden correctly
copied = self.client.copy(max_retries=7)
@@ -920,7 +1011,11 @@ def test_copy_default_options(self) -> None:
def test_copy_default_headers(self) -> None:
client = AsyncGradientAI(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"}
+ base_url=base_url,
+ api_key=api_key,
+ inference_key=inference_key,
+ _strict_response_validation=True,
+ default_headers={"X-Foo": "bar"},
)
assert client.default_headers["X-Foo"] == "bar"
@@ -954,7 +1049,11 @@ def test_copy_default_headers(self) -> None:
def test_copy_default_query(self) -> None:
client = AsyncGradientAI(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, default_query={"foo": "bar"}
+ base_url=base_url,
+ api_key=api_key,
+ inference_key=inference_key,
+ _strict_response_validation=True,
+ default_query={"foo": "bar"},
)
assert _get_params(client)["foo"] == "bar"
@@ -1080,7 +1179,11 @@ async def test_request_timeout(self) -> None:
async def test_client_timeout_option(self) -> None:
client = AsyncGradientAI(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, timeout=httpx.Timeout(0)
+ base_url=base_url,
+ api_key=api_key,
+ inference_key=inference_key,
+ _strict_response_validation=True,
+ timeout=httpx.Timeout(0),
)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
@@ -1091,7 +1194,11 @@ async def test_http_client_timeout_option(self) -> None:
# custom timeout given to the httpx client should be used
async with httpx.AsyncClient(timeout=None) as http_client:
client = AsyncGradientAI(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client
+ base_url=base_url,
+ api_key=api_key,
+ inference_key=inference_key,
+ _strict_response_validation=True,
+ http_client=http_client,
)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
@@ -1101,7 +1208,11 @@ async def test_http_client_timeout_option(self) -> None:
# no timeout given to the httpx client should not use the httpx default
async with httpx.AsyncClient() as http_client:
client = AsyncGradientAI(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client
+ base_url=base_url,
+ api_key=api_key,
+ inference_key=inference_key,
+ _strict_response_validation=True,
+ http_client=http_client,
)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
@@ -1111,7 +1222,11 @@ async def test_http_client_timeout_option(self) -> None:
# explicitly passing the default timeout currently results in it being ignored
async with httpx.AsyncClient(timeout=HTTPX_DEFAULT_TIMEOUT) as http_client:
client = AsyncGradientAI(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client
+ base_url=base_url,
+ api_key=api_key,
+ inference_key=inference_key,
+ _strict_response_validation=True,
+ http_client=http_client,
)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
@@ -1124,13 +1239,18 @@ def test_invalid_http_client(self) -> None:
AsyncGradientAI(
base_url=base_url,
api_key=api_key,
+ inference_key=inference_key,
_strict_response_validation=True,
http_client=cast(Any, http_client),
)
def test_default_headers_option(self) -> None:
client = AsyncGradientAI(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"}
+ base_url=base_url,
+ api_key=api_key,
+ inference_key=inference_key,
+ _strict_response_validation=True,
+ default_headers={"X-Foo": "bar"},
)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
assert request.headers.get("x-foo") == "bar"
@@ -1139,6 +1259,7 @@ def test_default_headers_option(self) -> None:
client2 = AsyncGradientAI(
base_url=base_url,
api_key=api_key,
+ inference_key=inference_key,
_strict_response_validation=True,
default_headers={
"X-Foo": "stainless",
@@ -1150,18 +1271,35 @@ def test_default_headers_option(self) -> None:
assert request.headers.get("x-stainless-lang") == "my-overriding-header"
def test_validate_headers(self) -> None:
- client = AsyncGradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=True)
+ client = AsyncGradientAI(
+ base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=True
+ )
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
assert request.headers.get("Authorization") == f"Bearer {api_key}"
- with pytest.raises(GradientAIError):
- with update_env(**{"GRADIENTAI_API_KEY": Omit()}):
- client2 = AsyncGradientAI(base_url=base_url, api_key=None, _strict_response_validation=True)
- _ = client2
+ with update_env(**{"GRADIENTAI_API_KEY": Omit()}):
+ client2 = AsyncGradientAI(
+ base_url=base_url, api_key=None, inference_key=inference_key, _strict_response_validation=True
+ )
+
+ with pytest.raises(
+ TypeError,
+ match="Could not resolve authentication method. Expected the api_key to be set. Or for the `Authorization` headers to be explicitly omitted",
+ ):
+ client2._build_request(FinalRequestOptions(method="get", url="/foo"))
+
+ request2 = client2._build_request(
+ FinalRequestOptions(method="get", url="/foo", headers={"Authorization": Omit()})
+ )
+ assert request2.headers.get("Authorization") is None
def test_default_query_option(self) -> None:
client = AsyncGradientAI(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, default_query={"query_param": "bar"}
+ base_url=base_url,
+ api_key=api_key,
+ inference_key=inference_key,
+ _strict_response_validation=True,
+ default_query={"query_param": "bar"},
)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
url = httpx.URL(request.url)
@@ -1362,7 +1500,10 @@ class Model(BaseModel):
def test_base_url_setter(self) -> None:
client = AsyncGradientAI(
- base_url="https://example.com/from_init", api_key=api_key, _strict_response_validation=True
+ base_url="https://example.com/from_init",
+ api_key=api_key,
+ inference_key=inference_key,
+ _strict_response_validation=True,
)
assert client.base_url == "https://example.com/from_init/"
@@ -1372,18 +1513,22 @@ def test_base_url_setter(self) -> None:
def test_base_url_env(self) -> None:
with update_env(GRADIENT_AI_BASE_URL="http://localhost:5000/from/env"):
- client = AsyncGradientAI(api_key=api_key, _strict_response_validation=True)
+ client = AsyncGradientAI(api_key=api_key, inference_key=inference_key, _strict_response_validation=True)
assert client.base_url == "http://localhost:5000/from/env/"
@pytest.mark.parametrize(
"client",
[
AsyncGradientAI(
- base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True
+ base_url="http://localhost:5000/custom/path/",
+ api_key=api_key,
+ inference_key=inference_key,
+ _strict_response_validation=True,
),
AsyncGradientAI(
base_url="http://localhost:5000/custom/path/",
api_key=api_key,
+ inference_key=inference_key,
_strict_response_validation=True,
http_client=httpx.AsyncClient(),
),
@@ -1404,11 +1549,15 @@ def test_base_url_trailing_slash(self, client: AsyncGradientAI) -> None:
"client",
[
AsyncGradientAI(
- base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True
+ base_url="http://localhost:5000/custom/path/",
+ api_key=api_key,
+ inference_key=inference_key,
+ _strict_response_validation=True,
),
AsyncGradientAI(
base_url="http://localhost:5000/custom/path/",
api_key=api_key,
+ inference_key=inference_key,
_strict_response_validation=True,
http_client=httpx.AsyncClient(),
),
@@ -1429,11 +1578,15 @@ def test_base_url_no_trailing_slash(self, client: AsyncGradientAI) -> None:
"client",
[
AsyncGradientAI(
- base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True
+ base_url="http://localhost:5000/custom/path/",
+ api_key=api_key,
+ inference_key=inference_key,
+ _strict_response_validation=True,
),
AsyncGradientAI(
base_url="http://localhost:5000/custom/path/",
api_key=api_key,
+ inference_key=inference_key,
_strict_response_validation=True,
http_client=httpx.AsyncClient(),
),
@@ -1451,7 +1604,9 @@ def test_absolute_request_url(self, client: AsyncGradientAI) -> None:
assert request.url == "https://myapi.com/foo"
async def test_copied_client_does_not_close_http(self) -> None:
- client = AsyncGradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=True)
+ client = AsyncGradientAI(
+ base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=True
+ )
assert not client.is_closed()
copied = client.copy()
@@ -1463,7 +1618,9 @@ async def test_copied_client_does_not_close_http(self) -> None:
assert not client.is_closed()
async def test_client_context_manager(self) -> None:
- client = AsyncGradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=True)
+ client = AsyncGradientAI(
+ base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=True
+ )
async with client as c2:
assert c2 is client
assert not c2.is_closed()
@@ -1486,7 +1643,11 @@ class Model(BaseModel):
async def test_client_max_retries_validation(self) -> None:
with pytest.raises(TypeError, match=r"max_retries cannot be None"):
AsyncGradientAI(
- base_url=base_url, api_key=api_key, _strict_response_validation=True, max_retries=cast(Any, None)
+ base_url=base_url,
+ api_key=api_key,
+ inference_key=inference_key,
+ _strict_response_validation=True,
+ max_retries=cast(Any, None),
)
@pytest.mark.respx(base_url=base_url)
@@ -1497,12 +1658,16 @@ class Model(BaseModel):
respx_mock.get("/foo").mock(return_value=httpx.Response(200, text="my-custom-format"))
- strict_client = AsyncGradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=True)
+ strict_client = AsyncGradientAI(
+ base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=True
+ )
with pytest.raises(APIResponseValidationError):
await strict_client.get("/foo", cast_to=Model)
- client = AsyncGradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=False)
+ client = AsyncGradientAI(
+ base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=False
+ )
response = await client.get("/foo", cast_to=Model)
assert isinstance(response, str) # type: ignore[unreachable]
@@ -1531,7 +1696,9 @@ class Model(BaseModel):
@mock.patch("time.time", mock.MagicMock(return_value=1696004797))
@pytest.mark.asyncio
async def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str, timeout: float) -> None:
- client = AsyncGradientAI(base_url=base_url, api_key=api_key, _strict_response_validation=True)
+ client = AsyncGradientAI(
+ base_url=base_url, api_key=api_key, inference_key=inference_key, _strict_response_validation=True
+ )
headers = httpx.Headers({"retry-after": retry_after})
options = FinalRequestOptions(method="get", url="/foo", max_retries=3)
From 1ae76f78ce9e74f8fd555e3497299127e9aa6889 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 25 Jun 2025 07:36:05 +0000
Subject: [PATCH 13/21] feat(api): update via SDK Studio
---
.stats.yml | 2 +-
src/do_gradientai/_client.py | 8 ++++----
2 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/.stats.yml b/.stats.yml
index 1246506f..d96a651b 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 67
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml
openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667
-config_hash: dd2b5f3f77ea08c6062115a56c3367ee
+config_hash: a1224bdbf22a97483c0e2d813b24423c
diff --git a/src/do_gradientai/_client.py b/src/do_gradientai/_client.py
index 4dc1c952..0020ed16 100644
--- a/src/do_gradientai/_client.py
+++ b/src/do_gradientai/_client.py
@@ -86,14 +86,14 @@ def __init__(
This automatically infers the following arguments from their corresponding environment variables if they are not provided:
- `api_key` from `GRADIENTAI_API_KEY`
- - `inference_key` from `GRADIENTAI_API_KEY`
+ - `inference_key` from `GRADIENTAI_INFERENCE_KEY`
"""
if api_key is None:
api_key = os.environ.get("GRADIENTAI_API_KEY")
self.api_key = api_key
if inference_key is None:
- inference_key = os.environ.get("GRADIENTAI_API_KEY")
+ inference_key = os.environ.get("GRADIENTAI_INFERENCE_KEY")
self.inference_key = inference_key
if base_url is None:
@@ -318,14 +318,14 @@ def __init__(
This automatically infers the following arguments from their corresponding environment variables if they are not provided:
- `api_key` from `GRADIENTAI_API_KEY`
- - `inference_key` from `GRADIENTAI_API_KEY`
+ - `inference_key` from `GRADIENTAI_INFERENCE_KEY`
"""
if api_key is None:
api_key = os.environ.get("GRADIENTAI_API_KEY")
self.api_key = api_key
if inference_key is None:
- inference_key = os.environ.get("GRADIENTAI_API_KEY")
+ inference_key = os.environ.get("GRADIENTAI_INFERENCE_KEY")
self.inference_key = inference_key
if base_url is None:
From 66d146a7719a47e692e063a0c0197af93e66e385 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 25 Jun 2025 07:36:29 +0000
Subject: [PATCH 14/21] codegen metadata
---
.stats.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.stats.yml b/.stats.yml
index d96a651b..a3b45817 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 67
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml
openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667
-config_hash: a1224bdbf22a97483c0e2d813b24423c
+config_hash: 318e79d212eb460dc120bed99c778b1e
From 45c4a68a2db8a83d34a2558bc64df2509d5131ea Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 25 Jun 2025 07:36:52 +0000
Subject: [PATCH 15/21] codegen metadata
---
.stats.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.stats.yml b/.stats.yml
index a3b45817..24272762 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 67
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml
openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667
-config_hash: 318e79d212eb460dc120bed99c778b1e
+config_hash: 7fec1a24eb493bd03fc0375fbbd5e5a7
From 8d87001b51de17dd1a36419c0e926cef119f20b8 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 25 Jun 2025 14:00:26 +0000
Subject: [PATCH 16/21] feat(api): define api links and meta as shared models
---
.stats.yml | 2 +-
api.md | 8 +++++++-
src/do_gradientai/types/__init__.py | 1 +
src/do_gradientai/types/agent_list_response.py | 4 ++--
src/do_gradientai/types/agents/__init__.py | 2 --
src/do_gradientai/types/agents/api_key_list_response.py | 4 ++--
src/do_gradientai/types/agents/version_list_response.py | 4 ++--
.../types/inference/api_key_list_response.py | 4 ++--
src/do_gradientai/types/knowledge_base_list_response.py | 4 ++--
.../types/knowledge_bases/data_source_list_response.py | 4 ++--
.../types/knowledge_bases/indexing_job_list_response.py | 4 ++--
src/do_gradientai/types/model_list_response.py | 4 ++--
.../model_providers/anthropic/key_list_agents_response.py | 4 ++--
.../types/model_providers/anthropic/key_list_response.py | 4 ++--
.../types/model_providers/openai/key_list_response.py | 4 ++--
.../openai/key_retrieve_agents_response.py | 4 ++--
src/do_gradientai/types/shared/__init__.py | 4 ++++
src/do_gradientai/types/{agents => shared}/api_links.py | 0
src/do_gradientai/types/{agents => shared}/api_meta.py | 0
19 files changed, 37 insertions(+), 28 deletions(-)
create mode 100644 src/do_gradientai/types/shared/__init__.py
rename src/do_gradientai/types/{agents => shared}/api_links.py (100%)
rename src/do_gradientai/types/{agents => shared}/api_meta.py (100%)
diff --git a/.stats.yml b/.stats.yml
index 24272762..876bab8e 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 67
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml
openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667
-config_hash: 7fec1a24eb493bd03fc0375fbbd5e5a7
+config_hash: 70cce9f06a7f98292ef13598418ed48d
diff --git a/api.md b/api.md
index 686761f9..3bce144e 100644
--- a/api.md
+++ b/api.md
@@ -1,3 +1,9 @@
+# Shared Types
+
+```python
+from do_gradientai.types import APILinks, APIMeta
+```
+
# Agents
Types:
@@ -149,7 +155,7 @@ Methods:
Types:
```python
-from do_gradientai.types.agents import APILinks, APIMeta, VersionUpdateResponse, VersionListResponse
+from do_gradientai.types.agents import VersionUpdateResponse, VersionListResponse
```
Methods:
diff --git a/src/do_gradientai/types/__init__.py b/src/do_gradientai/types/__init__.py
index dde7f848..23cf1802 100644
--- a/src/do_gradientai/types/__init__.py
+++ b/src/do_gradientai/types/__init__.py
@@ -2,6 +2,7 @@
from __future__ import annotations
+from .shared import APIMeta as APIMeta, APILinks as APILinks
from .api_agent import APIAgent as APIAgent
from .api_model import APIModel as APIModel
from .api_agreement import APIAgreement as APIAgreement
diff --git a/src/do_gradientai/types/agent_list_response.py b/src/do_gradientai/types/agent_list_response.py
index 97c0f0d5..397d9fd2 100644
--- a/src/do_gradientai/types/agent_list_response.py
+++ b/src/do_gradientai/types/agent_list_response.py
@@ -5,9 +5,9 @@
from typing_extensions import Literal
from .._models import BaseModel
-from .agents.api_meta import APIMeta
from .api_agent_model import APIAgentModel
-from .agents.api_links import APILinks
+from .shared.api_meta import APIMeta
+from .shared.api_links import APILinks
from .api_knowledge_base import APIKnowledgeBase
from .api_retrieval_method import APIRetrievalMethod
from .api_deployment_visibility import APIDeploymentVisibility
diff --git a/src/do_gradientai/types/agents/__init__.py b/src/do_gradientai/types/agents/__init__.py
index 1dd18511..0060e12c 100644
--- a/src/do_gradientai/types/agents/__init__.py
+++ b/src/do_gradientai/types/agents/__init__.py
@@ -2,8 +2,6 @@
from __future__ import annotations
-from .api_meta import APIMeta as APIMeta
-from .api_links import APILinks as APILinks
from .api_star_metric import APIStarMetric as APIStarMetric
from .route_add_params import RouteAddParams as RouteAddParams
from .api_evaluation_run import APIEvaluationRun as APIEvaluationRun
diff --git a/src/do_gradientai/types/agents/api_key_list_response.py b/src/do_gradientai/types/agents/api_key_list_response.py
index eff98649..aedb88ca 100644
--- a/src/do_gradientai/types/agents/api_key_list_response.py
+++ b/src/do_gradientai/types/agents/api_key_list_response.py
@@ -2,9 +2,9 @@
from typing import List, Optional
-from .api_meta import APIMeta
from ..._models import BaseModel
-from .api_links import APILinks
+from ..shared.api_meta import APIMeta
+from ..shared.api_links import APILinks
from ..api_agent_api_key_info import APIAgentAPIKeyInfo
__all__ = ["APIKeyListResponse"]
diff --git a/src/do_gradientai/types/agents/version_list_response.py b/src/do_gradientai/types/agents/version_list_response.py
index 1f3c3d69..af25150e 100644
--- a/src/do_gradientai/types/agents/version_list_response.py
+++ b/src/do_gradientai/types/agents/version_list_response.py
@@ -5,9 +5,9 @@
from pydantic import Field as FieldInfo
-from .api_meta import APIMeta
from ..._models import BaseModel
-from .api_links import APILinks
+from ..shared.api_meta import APIMeta
+from ..shared.api_links import APILinks
from ..api_retrieval_method import APIRetrievalMethod
__all__ = [
diff --git a/src/do_gradientai/types/inference/api_key_list_response.py b/src/do_gradientai/types/inference/api_key_list_response.py
index 535e2f96..3e937950 100644
--- a/src/do_gradientai/types/inference/api_key_list_response.py
+++ b/src/do_gradientai/types/inference/api_key_list_response.py
@@ -3,8 +3,8 @@
from typing import List, Optional
from ..._models import BaseModel
-from ..agents.api_meta import APIMeta
-from ..agents.api_links import APILinks
+from ..shared.api_meta import APIMeta
+from ..shared.api_links import APILinks
from .api_model_api_key_info import APIModelAPIKeyInfo
__all__ = ["APIKeyListResponse"]
diff --git a/src/do_gradientai/types/knowledge_base_list_response.py b/src/do_gradientai/types/knowledge_base_list_response.py
index 09ca1ad3..e8998b51 100644
--- a/src/do_gradientai/types/knowledge_base_list_response.py
+++ b/src/do_gradientai/types/knowledge_base_list_response.py
@@ -3,8 +3,8 @@
from typing import List, Optional
from .._models import BaseModel
-from .agents.api_meta import APIMeta
-from .agents.api_links import APILinks
+from .shared.api_meta import APIMeta
+from .shared.api_links import APILinks
from .api_knowledge_base import APIKnowledgeBase
__all__ = ["KnowledgeBaseListResponse"]
diff --git a/src/do_gradientai/types/knowledge_bases/data_source_list_response.py b/src/do_gradientai/types/knowledge_bases/data_source_list_response.py
index 78246ce1..2e5fc517 100644
--- a/src/do_gradientai/types/knowledge_bases/data_source_list_response.py
+++ b/src/do_gradientai/types/knowledge_bases/data_source_list_response.py
@@ -3,8 +3,8 @@
from typing import List, Optional
from ..._models import BaseModel
-from ..agents.api_meta import APIMeta
-from ..agents.api_links import APILinks
+from ..shared.api_meta import APIMeta
+from ..shared.api_links import APILinks
from .api_knowledge_base_data_source import APIKnowledgeBaseDataSource
__all__ = ["DataSourceListResponse"]
diff --git a/src/do_gradientai/types/knowledge_bases/indexing_job_list_response.py b/src/do_gradientai/types/knowledge_bases/indexing_job_list_response.py
index 4784c1a1..deea4562 100644
--- a/src/do_gradientai/types/knowledge_bases/indexing_job_list_response.py
+++ b/src/do_gradientai/types/knowledge_bases/indexing_job_list_response.py
@@ -3,9 +3,9 @@
from typing import List, Optional
from ..._models import BaseModel
-from ..agents.api_meta import APIMeta
+from ..shared.api_meta import APIMeta
from .api_indexing_job import APIIndexingJob
-from ..agents.api_links import APILinks
+from ..shared.api_links import APILinks
__all__ = ["IndexingJobListResponse"]
diff --git a/src/do_gradientai/types/model_list_response.py b/src/do_gradientai/types/model_list_response.py
index e6f5fad5..47651759 100644
--- a/src/do_gradientai/types/model_list_response.py
+++ b/src/do_gradientai/types/model_list_response.py
@@ -4,8 +4,8 @@
from .._models import BaseModel
from .api_model import APIModel
-from .agents.api_meta import APIMeta
-from .agents.api_links import APILinks
+from .shared.api_meta import APIMeta
+from .shared.api_links import APILinks
__all__ = ["ModelListResponse"]
diff --git a/src/do_gradientai/types/model_providers/anthropic/key_list_agents_response.py b/src/do_gradientai/types/model_providers/anthropic/key_list_agents_response.py
index ba6ca946..c9e74cf7 100644
--- a/src/do_gradientai/types/model_providers/anthropic/key_list_agents_response.py
+++ b/src/do_gradientai/types/model_providers/anthropic/key_list_agents_response.py
@@ -5,8 +5,8 @@
from typing import List, Optional
from ...._models import BaseModel
-from ...agents.api_meta import APIMeta
-from ...agents.api_links import APILinks
+from ...shared.api_meta import APIMeta
+from ...shared.api_links import APILinks
__all__ = ["KeyListAgentsResponse"]
diff --git a/src/do_gradientai/types/model_providers/anthropic/key_list_response.py b/src/do_gradientai/types/model_providers/anthropic/key_list_response.py
index d0b84e96..e3e3e5ef 100644
--- a/src/do_gradientai/types/model_providers/anthropic/key_list_response.py
+++ b/src/do_gradientai/types/model_providers/anthropic/key_list_response.py
@@ -3,8 +3,8 @@
from typing import List, Optional
from ...._models import BaseModel
-from ...agents.api_meta import APIMeta
-from ...agents.api_links import APILinks
+from ...shared.api_meta import APIMeta
+from ...shared.api_links import APILinks
from ...api_anthropic_api_key_info import APIAnthropicAPIKeyInfo
__all__ = ["KeyListResponse"]
diff --git a/src/do_gradientai/types/model_providers/openai/key_list_response.py b/src/do_gradientai/types/model_providers/openai/key_list_response.py
index c263cba3..362b5dd6 100644
--- a/src/do_gradientai/types/model_providers/openai/key_list_response.py
+++ b/src/do_gradientai/types/model_providers/openai/key_list_response.py
@@ -3,8 +3,8 @@
from typing import List, Optional
from ...._models import BaseModel
-from ...agents.api_meta import APIMeta
-from ...agents.api_links import APILinks
+from ...shared.api_meta import APIMeta
+from ...shared.api_links import APILinks
from ...api_openai_api_key_info import APIOpenAIAPIKeyInfo
__all__ = ["KeyListResponse"]
diff --git a/src/do_gradientai/types/model_providers/openai/key_retrieve_agents_response.py b/src/do_gradientai/types/model_providers/openai/key_retrieve_agents_response.py
index f42edea6..56808bac 100644
--- a/src/do_gradientai/types/model_providers/openai/key_retrieve_agents_response.py
+++ b/src/do_gradientai/types/model_providers/openai/key_retrieve_agents_response.py
@@ -5,8 +5,8 @@
from typing import List, Optional
from ...._models import BaseModel
-from ...agents.api_meta import APIMeta
-from ...agents.api_links import APILinks
+from ...shared.api_meta import APIMeta
+from ...shared.api_links import APILinks
__all__ = ["KeyRetrieveAgentsResponse"]
diff --git a/src/do_gradientai/types/shared/__init__.py b/src/do_gradientai/types/shared/__init__.py
new file mode 100644
index 00000000..5f02d62f
--- /dev/null
+++ b/src/do_gradientai/types/shared/__init__.py
@@ -0,0 +1,4 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .api_meta import APIMeta as APIMeta
+from .api_links import APILinks as APILinks
diff --git a/src/do_gradientai/types/agents/api_links.py b/src/do_gradientai/types/shared/api_links.py
similarity index 100%
rename from src/do_gradientai/types/agents/api_links.py
rename to src/do_gradientai/types/shared/api_links.py
diff --git a/src/do_gradientai/types/agents/api_meta.py b/src/do_gradientai/types/shared/api_meta.py
similarity index 100%
rename from src/do_gradientai/types/agents/api_meta.py
rename to src/do_gradientai/types/shared/api_meta.py
From e92c54b05f1025b6173945524724143fdafc7728 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 25 Jun 2025 19:14:35 +0000
Subject: [PATCH 17/21] feat(api): update OpenAI spec and add endpoint/smodels
---
.stats.yml | 8 +-
api.md | 52 +-
src/do_gradientai/resources/agents/agents.py | 16 +-
.../agents/evaluation_metrics/__init__.py | 33 +
.../evaluation_metrics.py | 44 +-
.../evaluation_metrics/workspaces/__init__.py | 33 +
.../evaluation_metrics/workspaces/agents.py | 324 +++++++++
.../workspaces/workspaces.py | 654 ++++++++++++++++++
.../resources/agents/evaluation_runs.py | 105 ++-
.../resources/agents/evaluation_test_cases.py | 25 +-
src/do_gradientai/resources/models.py | 182 +++--
src/do_gradientai/types/__init__.py | 3 +-
src/do_gradientai/types/agents/__init__.py | 4 +
.../types/agents/api_evaluation_run.py | 12 +
.../agents/evaluation_metrics/__init__.py | 14 +
.../workspace_create_params.py | 16 +
.../workspace_create_response.py | 16 +
.../workspace_delete_response.py | 11 +
...ace_list_evaluation_test_cases_response.py | 12 +
.../workspace_list_response.py | 16 +
.../workspace_retrieve_response.py | 16 +
.../workspace_update_params.py | 18 +
.../workspace_update_response.py | 16 +
.../evaluation_metrics/workspaces/__init__.py | 8 +
.../workspaces/agent_list_params.py | 26 +
.../workspaces/agent_list_response.py | 22 +
.../workspaces/agent_move_params.py | 16 +
.../workspaces/agent_move_response.py | 16 +
.../agents/evaluation_run_create_params.py | 5 +-
.../agents/evaluation_run_create_response.py | 4 +-
...valuation_run_retrieve_results_response.py | 12 +
.../evaluation_test_case_retrieve_params.py | 12 +
src/do_gradientai/types/api_model.py | 32 -
.../types/knowledge_bases/__init__.py | 1 +
.../api_indexed_data_source.py | 48 ++
.../types/knowledge_bases/api_indexing_job.py | 12 +
.../api_knowledge_base_data_source.py | 3 +
...xing_job_retrieve_data_sources_response.py | 46 +-
src/do_gradientai/types/model.py | 21 +
src/do_gradientai/types/model_list_params.py | 42 --
.../types/model_list_response.py | 13 +-
.../agents/evaluation_metrics/__init__.py | 1 +
.../evaluation_metrics/test_workspaces.py | 521 ++++++++++++++
.../evaluation_metrics/workspaces/__init__.py | 1 +
.../workspaces/test_agents.py | 239 +++++++
.../agents/test_evaluation_runs.py | 97 ++-
.../agents/test_evaluation_test_cases.py | 34 +-
tests/api_resources/test_models.py | 100 ++-
48 files changed, 2667 insertions(+), 295 deletions(-)
create mode 100644 src/do_gradientai/resources/agents/evaluation_metrics/__init__.py
rename src/do_gradientai/resources/agents/{ => evaluation_metrics}/evaluation_metrics.py (77%)
create mode 100644 src/do_gradientai/resources/agents/evaluation_metrics/workspaces/__init__.py
create mode 100644 src/do_gradientai/resources/agents/evaluation_metrics/workspaces/agents.py
create mode 100644 src/do_gradientai/resources/agents/evaluation_metrics/workspaces/workspaces.py
create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/__init__.py
create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/workspace_create_params.py
create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/workspace_create_response.py
create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/workspace_delete_response.py
create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/workspace_list_evaluation_test_cases_response.py
create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/workspace_list_response.py
create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/workspace_retrieve_response.py
create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/workspace_update_params.py
create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/workspace_update_response.py
create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/workspaces/__init__.py
create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_list_params.py
create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_list_response.py
create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_move_params.py
create mode 100644 src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_move_response.py
create mode 100644 src/do_gradientai/types/agents/evaluation_run_retrieve_results_response.py
create mode 100644 src/do_gradientai/types/agents/evaluation_test_case_retrieve_params.py
delete mode 100644 src/do_gradientai/types/api_model.py
create mode 100644 src/do_gradientai/types/knowledge_bases/api_indexed_data_source.py
create mode 100644 src/do_gradientai/types/model.py
delete mode 100644 src/do_gradientai/types/model_list_params.py
create mode 100644 tests/api_resources/agents/evaluation_metrics/__init__.py
create mode 100644 tests/api_resources/agents/evaluation_metrics/test_workspaces.py
create mode 100644 tests/api_resources/agents/evaluation_metrics/workspaces/__init__.py
create mode 100644 tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py
diff --git a/.stats.yml b/.stats.yml
index 876bab8e..291ef184 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 67
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e40feaac59c85aace6aa42d2749b20e0955dbbae58b06c3a650bc03adafcd7b5.yml
-openapi_spec_hash: 825c1a4816938e9f594b7a8c06692667
-config_hash: 70cce9f06a7f98292ef13598418ed48d
+configured_endpoints: 77
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-e8b3cbc80e18e4f7f277010349f25e1319156704f359911dc464cc21a0d077a6.yml
+openapi_spec_hash: c773d792724f5647ae25a5ae4ccec208
+config_hash: ecf128ea21a8fead9dabb9609c4dbce8
diff --git a/api.md b/api.md
index 3bce144e..78a81061 100644
--- a/api.md
+++ b/api.md
@@ -68,7 +68,47 @@ from do_gradientai.types.agents import EvaluationMetricListResponse
Methods:
-- client.agents.evaluation_metrics.list() -> EvaluationMetricListResponse
+- client.agents.evaluation_metrics.list() -> EvaluationMetricListResponse
+
+### Workspaces
+
+Types:
+
+```python
+from do_gradientai.types.agents.evaluation_metrics import (
+ WorkspaceCreateResponse,
+ WorkspaceRetrieveResponse,
+ WorkspaceUpdateResponse,
+ WorkspaceListResponse,
+ WorkspaceDeleteResponse,
+ WorkspaceListEvaluationTestCasesResponse,
+)
+```
+
+Methods:
+
+- client.agents.evaluation_metrics.workspaces.create(\*\*params) -> WorkspaceCreateResponse
+- client.agents.evaluation_metrics.workspaces.retrieve(workspace_uuid) -> WorkspaceRetrieveResponse
+- client.agents.evaluation_metrics.workspaces.update(path_workspace_uuid, \*\*params) -> WorkspaceUpdateResponse
+- client.agents.evaluation_metrics.workspaces.list() -> WorkspaceListResponse
+- client.agents.evaluation_metrics.workspaces.delete(workspace_uuid) -> WorkspaceDeleteResponse
+- client.agents.evaluation_metrics.workspaces.list_evaluation_test_cases(workspace_uuid) -> WorkspaceListEvaluationTestCasesResponse
+
+#### Agents
+
+Types:
+
+```python
+from do_gradientai.types.agents.evaluation_metrics.workspaces import (
+ AgentListResponse,
+ AgentMoveResponse,
+)
+```
+
+Methods:
+
+- client.agents.evaluation_metrics.workspaces.agents.list(workspace_uuid, \*\*params) -> AgentListResponse
+- client.agents.evaluation_metrics.workspaces.agents.move(path_workspace_uuid, \*\*params) -> AgentMoveResponse
## EvaluationRuns
@@ -83,6 +123,7 @@ from do_gradientai.types.agents import (
EvaluationRunCreateResponse,
EvaluationRunRetrieveResponse,
EvaluationRunListResultsResponse,
+ EvaluationRunRetrieveResultsResponse,
)
```
@@ -91,6 +132,7 @@ Methods:
- client.agents.evaluation_runs.create(\*\*params) -> EvaluationRunCreateResponse
- client.agents.evaluation_runs.retrieve(evaluation_run_uuid) -> EvaluationRunRetrieveResponse
- client.agents.evaluation_runs.list_results(evaluation_run_uuid) -> EvaluationRunListResultsResponse
+- client.agents.evaluation_runs.retrieve_results(prompt_id, \*, evaluation_run_uuid) -> EvaluationRunRetrieveResultsResponse
## EvaluationTestCases
@@ -111,7 +153,7 @@ from do_gradientai.types.agents import (
Methods:
- client.agents.evaluation_test_cases.create(\*\*params) -> EvaluationTestCaseCreateResponse
-- client.agents.evaluation_test_cases.retrieve(test_case_uuid) -> EvaluationTestCaseRetrieveResponse
+- client.agents.evaluation_test_cases.retrieve(test_case_uuid, \*\*params) -> EvaluationTestCaseRetrieveResponse
- client.agents.evaluation_test_cases.update(path_test_case_uuid, \*\*params) -> EvaluationTestCaseUpdateResponse
- client.agents.evaluation_test_cases.list() -> EvaluationTestCaseListResponse
- client.agents.evaluation_test_cases.list_evaluation_runs(evaluation_test_case_uuid, \*\*params) -> EvaluationTestCaseListEvaluationRunsResponse
@@ -315,6 +357,7 @@ Types:
```python
from do_gradientai.types.knowledge_bases import (
+ APIIndexedDataSource,
APIIndexingJob,
IndexingJobCreateResponse,
IndexingJobRetrieveResponse,
@@ -376,9 +419,10 @@ Methods:
Types:
```python
-from do_gradientai.types import APIAgreement, APIModel, APIModelVersion, ModelListResponse
+from do_gradientai.types import APIAgreement, APIModel, APIModelVersion, Model, ModelListResponse
```
Methods:
-- client.models.list(\*\*params) -> ModelListResponse
+- client.models.retrieve(model) -> Model
+- client.models.list() -> ModelListResponse
diff --git a/src/do_gradientai/resources/agents/agents.py b/src/do_gradientai/resources/agents/agents.py
index 5762139d..0a6e183c 100644
--- a/src/do_gradientai/resources/agents/agents.py
+++ b/src/do_gradientai/resources/agents/agents.py
@@ -73,14 +73,6 @@
KnowledgeBasesResourceWithStreamingResponse,
AsyncKnowledgeBasesResourceWithStreamingResponse,
)
-from .evaluation_metrics import (
- EvaluationMetricsResource,
- AsyncEvaluationMetricsResource,
- EvaluationMetricsResourceWithRawResponse,
- AsyncEvaluationMetricsResourceWithRawResponse,
- EvaluationMetricsResourceWithStreamingResponse,
- AsyncEvaluationMetricsResourceWithStreamingResponse,
-)
from .evaluation_datasets import (
EvaluationDatasetsResource,
AsyncEvaluationDatasetsResource,
@@ -105,6 +97,14 @@
from ...types.agent_retrieve_response import AgentRetrieveResponse
from ...types.api_deployment_visibility import APIDeploymentVisibility
from ...types.agent_update_status_response import AgentUpdateStatusResponse
+from .evaluation_metrics.evaluation_metrics import (
+ EvaluationMetricsResource,
+ AsyncEvaluationMetricsResource,
+ EvaluationMetricsResourceWithRawResponse,
+ AsyncEvaluationMetricsResourceWithRawResponse,
+ EvaluationMetricsResourceWithStreamingResponse,
+ AsyncEvaluationMetricsResourceWithStreamingResponse,
+)
__all__ = ["AgentsResource", "AsyncAgentsResource"]
diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/__init__.py b/src/do_gradientai/resources/agents/evaluation_metrics/__init__.py
new file mode 100644
index 00000000..1c0ec1ea
--- /dev/null
+++ b/src/do_gradientai/resources/agents/evaluation_metrics/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .workspaces import (
+ WorkspacesResource,
+ AsyncWorkspacesResource,
+ WorkspacesResourceWithRawResponse,
+ AsyncWorkspacesResourceWithRawResponse,
+ WorkspacesResourceWithStreamingResponse,
+ AsyncWorkspacesResourceWithStreamingResponse,
+)
+from .evaluation_metrics import (
+ EvaluationMetricsResource,
+ AsyncEvaluationMetricsResource,
+ EvaluationMetricsResourceWithRawResponse,
+ AsyncEvaluationMetricsResourceWithRawResponse,
+ EvaluationMetricsResourceWithStreamingResponse,
+ AsyncEvaluationMetricsResourceWithStreamingResponse,
+)
+
+__all__ = [
+ "WorkspacesResource",
+ "AsyncWorkspacesResource",
+ "WorkspacesResourceWithRawResponse",
+ "AsyncWorkspacesResourceWithRawResponse",
+ "WorkspacesResourceWithStreamingResponse",
+ "AsyncWorkspacesResourceWithStreamingResponse",
+ "EvaluationMetricsResource",
+ "AsyncEvaluationMetricsResource",
+ "EvaluationMetricsResourceWithRawResponse",
+ "AsyncEvaluationMetricsResourceWithRawResponse",
+ "EvaluationMetricsResourceWithStreamingResponse",
+ "AsyncEvaluationMetricsResourceWithStreamingResponse",
+]
diff --git a/src/do_gradientai/resources/agents/evaluation_metrics.py b/src/do_gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py
similarity index 77%
rename from src/do_gradientai/resources/agents/evaluation_metrics.py
rename to src/do_gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py
index c554be3e..ce549527 100644
--- a/src/do_gradientai/resources/agents/evaluation_metrics.py
+++ b/src/do_gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py
@@ -4,22 +4,34 @@
import httpx
-from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ..._compat import cached_property
-from ..._resource import SyncAPIResource, AsyncAPIResource
-from ..._response import (
+from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import (
to_raw_response_wrapper,
to_streamed_response_wrapper,
async_to_raw_response_wrapper,
async_to_streamed_response_wrapper,
)
-from ..._base_client import make_request_options
-from ...types.agents.evaluation_metric_list_response import EvaluationMetricListResponse
+from ...._base_client import make_request_options
+from .workspaces.workspaces import (
+ WorkspacesResource,
+ AsyncWorkspacesResource,
+ WorkspacesResourceWithRawResponse,
+ AsyncWorkspacesResourceWithRawResponse,
+ WorkspacesResourceWithStreamingResponse,
+ AsyncWorkspacesResourceWithStreamingResponse,
+)
+from ....types.agents.evaluation_metric_list_response import EvaluationMetricListResponse
__all__ = ["EvaluationMetricsResource", "AsyncEvaluationMetricsResource"]
class EvaluationMetricsResource(SyncAPIResource):
+ @cached_property
+ def workspaces(self) -> WorkspacesResource:
+ return WorkspacesResource(self._client)
+
@cached_property
def with_raw_response(self) -> EvaluationMetricsResourceWithRawResponse:
"""
@@ -65,6 +77,10 @@ def list(
class AsyncEvaluationMetricsResource(AsyncAPIResource):
+ @cached_property
+ def workspaces(self) -> AsyncWorkspacesResource:
+ return AsyncWorkspacesResource(self._client)
+
@cached_property
def with_raw_response(self) -> AsyncEvaluationMetricsResourceWithRawResponse:
"""
@@ -117,6 +133,10 @@ def __init__(self, evaluation_metrics: EvaluationMetricsResource) -> None:
evaluation_metrics.list,
)
+ @cached_property
+ def workspaces(self) -> WorkspacesResourceWithRawResponse:
+ return WorkspacesResourceWithRawResponse(self._evaluation_metrics.workspaces)
+
class AsyncEvaluationMetricsResourceWithRawResponse:
def __init__(self, evaluation_metrics: AsyncEvaluationMetricsResource) -> None:
@@ -126,6 +146,10 @@ def __init__(self, evaluation_metrics: AsyncEvaluationMetricsResource) -> None:
evaluation_metrics.list,
)
+ @cached_property
+ def workspaces(self) -> AsyncWorkspacesResourceWithRawResponse:
+ return AsyncWorkspacesResourceWithRawResponse(self._evaluation_metrics.workspaces)
+
class EvaluationMetricsResourceWithStreamingResponse:
def __init__(self, evaluation_metrics: EvaluationMetricsResource) -> None:
@@ -135,6 +159,10 @@ def __init__(self, evaluation_metrics: EvaluationMetricsResource) -> None:
evaluation_metrics.list,
)
+ @cached_property
+ def workspaces(self) -> WorkspacesResourceWithStreamingResponse:
+ return WorkspacesResourceWithStreamingResponse(self._evaluation_metrics.workspaces)
+
class AsyncEvaluationMetricsResourceWithStreamingResponse:
def __init__(self, evaluation_metrics: AsyncEvaluationMetricsResource) -> None:
@@ -143,3 +171,7 @@ def __init__(self, evaluation_metrics: AsyncEvaluationMetricsResource) -> None:
self.list = async_to_streamed_response_wrapper(
evaluation_metrics.list,
)
+
+ @cached_property
+ def workspaces(self) -> AsyncWorkspacesResourceWithStreamingResponse:
+ return AsyncWorkspacesResourceWithStreamingResponse(self._evaluation_metrics.workspaces)
diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/workspaces/__init__.py b/src/do_gradientai/resources/agents/evaluation_metrics/workspaces/__init__.py
new file mode 100644
index 00000000..79d75f90
--- /dev/null
+++ b/src/do_gradientai/resources/agents/evaluation_metrics/workspaces/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .agents import (
+ AgentsResource,
+ AsyncAgentsResource,
+ AgentsResourceWithRawResponse,
+ AsyncAgentsResourceWithRawResponse,
+ AgentsResourceWithStreamingResponse,
+ AsyncAgentsResourceWithStreamingResponse,
+)
+from .workspaces import (
+ WorkspacesResource,
+ AsyncWorkspacesResource,
+ WorkspacesResourceWithRawResponse,
+ AsyncWorkspacesResourceWithRawResponse,
+ WorkspacesResourceWithStreamingResponse,
+ AsyncWorkspacesResourceWithStreamingResponse,
+)
+
+__all__ = [
+ "AgentsResource",
+ "AsyncAgentsResource",
+ "AgentsResourceWithRawResponse",
+ "AsyncAgentsResourceWithRawResponse",
+ "AgentsResourceWithStreamingResponse",
+ "AsyncAgentsResourceWithStreamingResponse",
+ "WorkspacesResource",
+ "AsyncWorkspacesResource",
+ "WorkspacesResourceWithRawResponse",
+ "AsyncWorkspacesResourceWithRawResponse",
+ "WorkspacesResourceWithStreamingResponse",
+ "AsyncWorkspacesResourceWithStreamingResponse",
+]
diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/workspaces/agents.py b/src/do_gradientai/resources/agents/evaluation_metrics/workspaces/agents.py
new file mode 100644
index 00000000..1e11739f
--- /dev/null
+++ b/src/do_gradientai/resources/agents/evaluation_metrics/workspaces/agents.py
@@ -0,0 +1,324 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List
+
+import httpx
+
+from ....._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ....._utils import maybe_transform, async_maybe_transform
+from ....._compat import cached_property
+from ....._resource import SyncAPIResource, AsyncAPIResource
+from ....._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ....._base_client import make_request_options
+from .....types.agents.evaluation_metrics.workspaces import agent_list_params, agent_move_params
+from .....types.agents.evaluation_metrics.workspaces.agent_list_response import AgentListResponse
+from .....types.agents.evaluation_metrics.workspaces.agent_move_response import AgentMoveResponse
+
+__all__ = ["AgentsResource", "AsyncAgentsResource"]
+
+
+class AgentsResource(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AgentsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
+ """
+ return AgentsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AgentsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
+ """
+ return AgentsResourceWithStreamingResponse(self)
+
+ def list(
+ self,
+ workspace_uuid: str,
+ *,
+ field_mask: agent_list_params.FieldMask | NotGiven = NOT_GIVEN,
+ only_deployed: bool | NotGiven = NOT_GIVEN,
+ page: int | NotGiven = NOT_GIVEN,
+ per_page: int | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> AgentListResponse:
+ """
+ To list all agents by a Workspace, send a GET request to
+ `/v2/gen-ai/workspaces/{workspace_uuid}/agents`.
+
+ Args:
+ only_deployed: Only list agents that are deployed.
+
+ page: page number.
+
+ per_page: items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not workspace_uuid:
+ raise ValueError(f"Expected a non-empty value for `workspace_uuid` but received {workspace_uuid!r}")
+ return self._get(
+ f"/v2/gen-ai/workspaces/{workspace_uuid}/agents"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/workspaces/{workspace_uuid}/agents",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "field_mask": field_mask,
+ "only_deployed": only_deployed,
+ "page": page,
+ "per_page": per_page,
+ },
+ agent_list_params.AgentListParams,
+ ),
+ ),
+ cast_to=AgentListResponse,
+ )
+
+ def move(
+ self,
+ path_workspace_uuid: str,
+ *,
+ agent_uuids: List[str] | NotGiven = NOT_GIVEN,
+ body_workspace_uuid: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> AgentMoveResponse:
+ """
+ To move all listed agetns a given workspace, send a PUT request to
+ `/v2/gen-ai/workspaces/{workspace_uuid}/agents`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_workspace_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `path_workspace_uuid` but received {path_workspace_uuid!r}"
+ )
+ return self._put(
+ f"/v2/gen-ai/workspaces/{path_workspace_uuid}/agents"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/workspaces/{path_workspace_uuid}/agents",
+ body=maybe_transform(
+ {
+ "agent_uuids": agent_uuids,
+ "body_workspace_uuid": body_workspace_uuid,
+ },
+ agent_move_params.AgentMoveParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=AgentMoveResponse,
+ )
+
+
+class AsyncAgentsResource(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncAgentsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncAgentsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncAgentsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
+ """
+ return AsyncAgentsResourceWithStreamingResponse(self)
+
+ async def list(
+ self,
+ workspace_uuid: str,
+ *,
+ field_mask: agent_list_params.FieldMask | NotGiven = NOT_GIVEN,
+ only_deployed: bool | NotGiven = NOT_GIVEN,
+ page: int | NotGiven = NOT_GIVEN,
+ per_page: int | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> AgentListResponse:
+ """
+ To list all agents by a Workspace, send a GET request to
+ `/v2/gen-ai/workspaces/{workspace_uuid}/agents`.
+
+ Args:
+ only_deployed: Only list agents that are deployed.
+
+ page: page number.
+
+ per_page: items per page.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not workspace_uuid:
+ raise ValueError(f"Expected a non-empty value for `workspace_uuid` but received {workspace_uuid!r}")
+ return await self._get(
+ f"/v2/gen-ai/workspaces/{workspace_uuid}/agents"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/workspaces/{workspace_uuid}/agents",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "field_mask": field_mask,
+ "only_deployed": only_deployed,
+ "page": page,
+ "per_page": per_page,
+ },
+ agent_list_params.AgentListParams,
+ ),
+ ),
+ cast_to=AgentListResponse,
+ )
+
+ async def move(
+ self,
+ path_workspace_uuid: str,
+ *,
+ agent_uuids: List[str] | NotGiven = NOT_GIVEN,
+ body_workspace_uuid: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> AgentMoveResponse:
+ """
+ To move all listed agetns a given workspace, send a PUT request to
+ `/v2/gen-ai/workspaces/{workspace_uuid}/agents`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_workspace_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `path_workspace_uuid` but received {path_workspace_uuid!r}"
+ )
+ return await self._put(
+ f"/v2/gen-ai/workspaces/{path_workspace_uuid}/agents"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/workspaces/{path_workspace_uuid}/agents",
+ body=await async_maybe_transform(
+ {
+ "agent_uuids": agent_uuids,
+ "body_workspace_uuid": body_workspace_uuid,
+ },
+ agent_move_params.AgentMoveParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=AgentMoveResponse,
+ )
+
+
+class AgentsResourceWithRawResponse:
+ def __init__(self, agents: AgentsResource) -> None:
+ self._agents = agents
+
+ self.list = to_raw_response_wrapper(
+ agents.list,
+ )
+ self.move = to_raw_response_wrapper(
+ agents.move,
+ )
+
+
+class AsyncAgentsResourceWithRawResponse:
+ def __init__(self, agents: AsyncAgentsResource) -> None:
+ self._agents = agents
+
+ self.list = async_to_raw_response_wrapper(
+ agents.list,
+ )
+ self.move = async_to_raw_response_wrapper(
+ agents.move,
+ )
+
+
+class AgentsResourceWithStreamingResponse:
+ def __init__(self, agents: AgentsResource) -> None:
+ self._agents = agents
+
+ self.list = to_streamed_response_wrapper(
+ agents.list,
+ )
+ self.move = to_streamed_response_wrapper(
+ agents.move,
+ )
+
+
+class AsyncAgentsResourceWithStreamingResponse:
+ def __init__(self, agents: AsyncAgentsResource) -> None:
+ self._agents = agents
+
+ self.list = async_to_streamed_response_wrapper(
+ agents.list,
+ )
+ self.move = async_to_streamed_response_wrapper(
+ agents.move,
+ )
diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/workspaces/workspaces.py b/src/do_gradientai/resources/agents/evaluation_metrics/workspaces/workspaces.py
new file mode 100644
index 00000000..0f506118
--- /dev/null
+++ b/src/do_gradientai/resources/agents/evaluation_metrics/workspaces/workspaces.py
@@ -0,0 +1,654 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List
+
+import httpx
+
+from .agents import (
+ AgentsResource,
+ AsyncAgentsResource,
+ AgentsResourceWithRawResponse,
+ AsyncAgentsResourceWithRawResponse,
+ AgentsResourceWithStreamingResponse,
+ AsyncAgentsResourceWithStreamingResponse,
+)
+from ....._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ....._utils import maybe_transform, async_maybe_transform
+from ....._compat import cached_property
+from ....._resource import SyncAPIResource, AsyncAPIResource
+from ....._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ....._base_client import make_request_options
+from .....types.agents.evaluation_metrics import workspace_create_params, workspace_update_params
+from .....types.agents.evaluation_metrics.workspace_list_response import WorkspaceListResponse
+from .....types.agents.evaluation_metrics.workspace_create_response import WorkspaceCreateResponse
+from .....types.agents.evaluation_metrics.workspace_delete_response import WorkspaceDeleteResponse
+from .....types.agents.evaluation_metrics.workspace_update_response import WorkspaceUpdateResponse
+from .....types.agents.evaluation_metrics.workspace_retrieve_response import WorkspaceRetrieveResponse
+from .....types.agents.evaluation_metrics.workspace_list_evaluation_test_cases_response import (
+ WorkspaceListEvaluationTestCasesResponse,
+)
+
+__all__ = ["WorkspacesResource", "AsyncWorkspacesResource"]
+
+
+class WorkspacesResource(SyncAPIResource):
+ @cached_property
+ def agents(self) -> AgentsResource:
+ return AgentsResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> WorkspacesResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
+ """
+ return WorkspacesResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> WorkspacesResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
+ """
+ return WorkspacesResourceWithStreamingResponse(self)
+
+ def create(
+ self,
+ *,
+ agent_uuids: List[str] | NotGiven = NOT_GIVEN,
+ description: str | NotGiven = NOT_GIVEN,
+ name: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> WorkspaceCreateResponse:
+ """To create a new workspace, send a POST request to `/v2/gen-ai/workspaces`.
+
+ The
+ response body contains a JSON object with the newly created workspace object.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/v2/gen-ai/workspaces"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/workspaces",
+ body=maybe_transform(
+ {
+ "agent_uuids": agent_uuids,
+ "description": description,
+ "name": name,
+ },
+ workspace_create_params.WorkspaceCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=WorkspaceCreateResponse,
+ )
+
+ def retrieve(
+ self,
+ workspace_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> WorkspaceRetrieveResponse:
+ """
+ To retrieve details of a workspace, GET request to
+ `/v2/gen-ai/workspaces/{workspace_uuid}`. The response body is a JSON object
+ containing the workspace.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not workspace_uuid:
+ raise ValueError(f"Expected a non-empty value for `workspace_uuid` but received {workspace_uuid!r}")
+ return self._get(
+ f"/v2/gen-ai/workspaces/{workspace_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/workspaces/{workspace_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=WorkspaceRetrieveResponse,
+ )
+
+ def update(
+ self,
+ path_workspace_uuid: str,
+ *,
+ description: str | NotGiven = NOT_GIVEN,
+ name: str | NotGiven = NOT_GIVEN,
+ body_workspace_uuid: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> WorkspaceUpdateResponse:
+ """
+ To update a workspace, send a PUT request to
+ `/v2/gen-ai/workspaces/{workspace_uuid}`. The response body is a JSON object
+ containing the workspace.
+
+ Args:
+ body_workspace_uuid: Workspace UUID.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_workspace_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `path_workspace_uuid` but received {path_workspace_uuid!r}"
+ )
+ return self._put(
+ f"/v2/gen-ai/workspaces/{path_workspace_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/workspaces/{path_workspace_uuid}",
+ body=maybe_transform(
+ {
+ "description": description,
+ "name": name,
+ "body_workspace_uuid": body_workspace_uuid,
+ },
+ workspace_update_params.WorkspaceUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=WorkspaceUpdateResponse,
+ )
+
+ def list(
+ self,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> WorkspaceListResponse:
+ """To list all workspaces, send a GET request to `/v2/gen-ai/workspaces`."""
+ return self._get(
+ "/v2/gen-ai/workspaces"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/workspaces",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=WorkspaceListResponse,
+ )
+
+ def delete(
+ self,
+ workspace_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> WorkspaceDeleteResponse:
+ """
+ To delete a workspace, send a DELETE request to
+ `/v2/gen-ai/workspace/{workspace_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not workspace_uuid:
+ raise ValueError(f"Expected a non-empty value for `workspace_uuid` but received {workspace_uuid!r}")
+ return self._delete(
+ f"/v2/gen-ai/workspaces/{workspace_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/workspaces/{workspace_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=WorkspaceDeleteResponse,
+ )
+
+ def list_evaluation_test_cases(
+ self,
+ workspace_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> WorkspaceListEvaluationTestCasesResponse:
+ """
+ To list all evaluation test cases by a workspace, send a GET request to
+ `/v2/gen-ai/workspaces/{workspace_uuid}/evaluation_test_cases`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not workspace_uuid:
+ raise ValueError(f"Expected a non-empty value for `workspace_uuid` but received {workspace_uuid!r}")
+ return self._get(
+ f"/v2/gen-ai/workspaces/{workspace_uuid}/evaluation_test_cases"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/workspaces/{workspace_uuid}/evaluation_test_cases",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=WorkspaceListEvaluationTestCasesResponse,
+ )
+
+
+class AsyncWorkspacesResource(AsyncAPIResource):
+ @cached_property
+ def agents(self) -> AsyncAgentsResource:
+ return AsyncAgentsResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncWorkspacesResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncWorkspacesResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncWorkspacesResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response
+ """
+ return AsyncWorkspacesResourceWithStreamingResponse(self)
+
+ async def create(
+ self,
+ *,
+ agent_uuids: List[str] | NotGiven = NOT_GIVEN,
+ description: str | NotGiven = NOT_GIVEN,
+ name: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> WorkspaceCreateResponse:
+ """To create a new workspace, send a POST request to `/v2/gen-ai/workspaces`.
+
+ The
+ response body contains a JSON object with the newly created workspace object.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/v2/gen-ai/workspaces"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/workspaces",
+ body=await async_maybe_transform(
+ {
+ "agent_uuids": agent_uuids,
+ "description": description,
+ "name": name,
+ },
+ workspace_create_params.WorkspaceCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=WorkspaceCreateResponse,
+ )
+
+ async def retrieve(
+ self,
+ workspace_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> WorkspaceRetrieveResponse:
+ """
+ To retrieve details of a workspace, GET request to
+ `/v2/gen-ai/workspaces/{workspace_uuid}`. The response body is a JSON object
+ containing the workspace.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not workspace_uuid:
+ raise ValueError(f"Expected a non-empty value for `workspace_uuid` but received {workspace_uuid!r}")
+ return await self._get(
+ f"/v2/gen-ai/workspaces/{workspace_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/workspaces/{workspace_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=WorkspaceRetrieveResponse,
+ )
+
+ async def update(
+ self,
+ path_workspace_uuid: str,
+ *,
+ description: str | NotGiven = NOT_GIVEN,
+ name: str | NotGiven = NOT_GIVEN,
+ body_workspace_uuid: str | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> WorkspaceUpdateResponse:
+ """
+ To update a workspace, send a PUT request to
+ `/v2/gen-ai/workspaces/{workspace_uuid}`. The response body is a JSON object
+ containing the workspace.
+
+ Args:
+ body_workspace_uuid: Workspace UUID.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not path_workspace_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `path_workspace_uuid` but received {path_workspace_uuid!r}"
+ )
+ return await self._put(
+ f"/v2/gen-ai/workspaces/{path_workspace_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/workspaces/{path_workspace_uuid}",
+ body=await async_maybe_transform(
+ {
+ "description": description,
+ "name": name,
+ "body_workspace_uuid": body_workspace_uuid,
+ },
+ workspace_update_params.WorkspaceUpdateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=WorkspaceUpdateResponse,
+ )
+
+ async def list(
+ self,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> WorkspaceListResponse:
+ """To list all workspaces, send a GET request to `/v2/gen-ai/workspaces`."""
+ return await self._get(
+ "/v2/gen-ai/workspaces"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/workspaces",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=WorkspaceListResponse,
+ )
+
+ async def delete(
+ self,
+ workspace_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> WorkspaceDeleteResponse:
+ """
+ To delete a workspace, send a DELETE request to
+ `/v2/gen-ai/workspace/{workspace_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not workspace_uuid:
+ raise ValueError(f"Expected a non-empty value for `workspace_uuid` but received {workspace_uuid!r}")
+ return await self._delete(
+ f"/v2/gen-ai/workspaces/{workspace_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/workspaces/{workspace_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=WorkspaceDeleteResponse,
+ )
+
+ async def list_evaluation_test_cases(
+ self,
+ workspace_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> WorkspaceListEvaluationTestCasesResponse:
+ """
+ To list all evaluation test cases by a workspace, send a GET request to
+ `/v2/gen-ai/workspaces/{workspace_uuid}/evaluation_test_cases`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not workspace_uuid:
+ raise ValueError(f"Expected a non-empty value for `workspace_uuid` but received {workspace_uuid!r}")
+ return await self._get(
+ f"/v2/gen-ai/workspaces/{workspace_uuid}/evaluation_test_cases"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/workspaces/{workspace_uuid}/evaluation_test_cases",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=WorkspaceListEvaluationTestCasesResponse,
+ )
+
+
+class WorkspacesResourceWithRawResponse:
+ def __init__(self, workspaces: WorkspacesResource) -> None:
+ self._workspaces = workspaces
+
+ self.create = to_raw_response_wrapper(
+ workspaces.create,
+ )
+ self.retrieve = to_raw_response_wrapper(
+ workspaces.retrieve,
+ )
+ self.update = to_raw_response_wrapper(
+ workspaces.update,
+ )
+ self.list = to_raw_response_wrapper(
+ workspaces.list,
+ )
+ self.delete = to_raw_response_wrapper(
+ workspaces.delete,
+ )
+ self.list_evaluation_test_cases = to_raw_response_wrapper(
+ workspaces.list_evaluation_test_cases,
+ )
+
+ @cached_property
+ def agents(self) -> AgentsResourceWithRawResponse:
+ return AgentsResourceWithRawResponse(self._workspaces.agents)
+
+
+class AsyncWorkspacesResourceWithRawResponse:
+ def __init__(self, workspaces: AsyncWorkspacesResource) -> None:
+ self._workspaces = workspaces
+
+ self.create = async_to_raw_response_wrapper(
+ workspaces.create,
+ )
+ self.retrieve = async_to_raw_response_wrapper(
+ workspaces.retrieve,
+ )
+ self.update = async_to_raw_response_wrapper(
+ workspaces.update,
+ )
+ self.list = async_to_raw_response_wrapper(
+ workspaces.list,
+ )
+ self.delete = async_to_raw_response_wrapper(
+ workspaces.delete,
+ )
+ self.list_evaluation_test_cases = async_to_raw_response_wrapper(
+ workspaces.list_evaluation_test_cases,
+ )
+
+ @cached_property
+ def agents(self) -> AsyncAgentsResourceWithRawResponse:
+ return AsyncAgentsResourceWithRawResponse(self._workspaces.agents)
+
+
+class WorkspacesResourceWithStreamingResponse:
+ def __init__(self, workspaces: WorkspacesResource) -> None:
+ self._workspaces = workspaces
+
+ self.create = to_streamed_response_wrapper(
+ workspaces.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ workspaces.retrieve,
+ )
+ self.update = to_streamed_response_wrapper(
+ workspaces.update,
+ )
+ self.list = to_streamed_response_wrapper(
+ workspaces.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ workspaces.delete,
+ )
+ self.list_evaluation_test_cases = to_streamed_response_wrapper(
+ workspaces.list_evaluation_test_cases,
+ )
+
+ @cached_property
+ def agents(self) -> AgentsResourceWithStreamingResponse:
+ return AgentsResourceWithStreamingResponse(self._workspaces.agents)
+
+
+class AsyncWorkspacesResourceWithStreamingResponse:
+ def __init__(self, workspaces: AsyncWorkspacesResource) -> None:
+ self._workspaces = workspaces
+
+ self.create = async_to_streamed_response_wrapper(
+ workspaces.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ workspaces.retrieve,
+ )
+ self.update = async_to_streamed_response_wrapper(
+ workspaces.update,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ workspaces.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ workspaces.delete,
+ )
+ self.list_evaluation_test_cases = async_to_streamed_response_wrapper(
+ workspaces.list_evaluation_test_cases,
+ )
+
+ @cached_property
+ def agents(self) -> AsyncAgentsResourceWithStreamingResponse:
+ return AsyncAgentsResourceWithStreamingResponse(self._workspaces.agents)
diff --git a/src/do_gradientai/resources/agents/evaluation_runs.py b/src/do_gradientai/resources/agents/evaluation_runs.py
index 7e207e7d..47045132 100644
--- a/src/do_gradientai/resources/agents/evaluation_runs.py
+++ b/src/do_gradientai/resources/agents/evaluation_runs.py
@@ -2,6 +2,8 @@
from __future__ import annotations
+from typing import List
+
import httpx
from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
@@ -19,6 +21,7 @@
from ...types.agents.evaluation_run_create_response import EvaluationRunCreateResponse
from ...types.agents.evaluation_run_retrieve_response import EvaluationRunRetrieveResponse
from ...types.agents.evaluation_run_list_results_response import EvaluationRunListResultsResponse
+from ...types.agents.evaluation_run_retrieve_results_response import EvaluationRunRetrieveResultsResponse
__all__ = ["EvaluationRunsResource", "AsyncEvaluationRunsResource"]
@@ -46,7 +49,7 @@ def with_streaming_response(self) -> EvaluationRunsResourceWithStreamingResponse
def create(
self,
*,
- agent_uuid: str | NotGiven = NOT_GIVEN,
+ agent_uuids: List[str] | NotGiven = NOT_GIVEN,
run_name: str | NotGiven = NOT_GIVEN,
test_case_uuid: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -61,7 +64,7 @@ def create(
`/v2/gen-ai/evaluation_runs`.
Args:
- agent_uuid: Agent UUID to run the test case against.
+ agent_uuids: Agent UUIDs to run the test case against.
run_name: The name of the run.
@@ -79,7 +82,7 @@ def create(
else "https://api.digitalocean.com/v2/gen-ai/evaluation_runs",
body=maybe_transform(
{
- "agent_uuid": agent_uuid,
+ "agent_uuids": agent_uuids,
"run_name": run_name,
"test_case_uuid": test_case_uuid,
},
@@ -167,6 +170,45 @@ def list_results(
cast_to=EvaluationRunListResultsResponse,
)
+ def retrieve_results(
+ self,
+ prompt_id: int,
+ *,
+ evaluation_run_uuid: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> EvaluationRunRetrieveResultsResponse:
+ """
+ To retrieve results of an evaluation run, send a GET request to
+ `/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results/{prompt_id}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not evaluation_run_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `evaluation_run_uuid` but received {evaluation_run_uuid!r}"
+ )
+ return self._get(
+ f"/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results/{prompt_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results/{prompt_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=EvaluationRunRetrieveResultsResponse,
+ )
+
class AsyncEvaluationRunsResource(AsyncAPIResource):
@cached_property
@@ -191,7 +233,7 @@ def with_streaming_response(self) -> AsyncEvaluationRunsResourceWithStreamingRes
async def create(
self,
*,
- agent_uuid: str | NotGiven = NOT_GIVEN,
+ agent_uuids: List[str] | NotGiven = NOT_GIVEN,
run_name: str | NotGiven = NOT_GIVEN,
test_case_uuid: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -206,7 +248,7 @@ async def create(
`/v2/gen-ai/evaluation_runs`.
Args:
- agent_uuid: Agent UUID to run the test case against.
+ agent_uuids: Agent UUIDs to run the test case against.
run_name: The name of the run.
@@ -224,7 +266,7 @@ async def create(
else "https://api.digitalocean.com/v2/gen-ai/evaluation_runs",
body=await async_maybe_transform(
{
- "agent_uuid": agent_uuid,
+ "agent_uuids": agent_uuids,
"run_name": run_name,
"test_case_uuid": test_case_uuid,
},
@@ -312,6 +354,45 @@ async def list_results(
cast_to=EvaluationRunListResultsResponse,
)
+ async def retrieve_results(
+ self,
+ prompt_id: int,
+ *,
+ evaluation_run_uuid: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> EvaluationRunRetrieveResultsResponse:
+ """
+ To retrieve results of an evaluation run, send a GET request to
+ `/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results/{prompt_id}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not evaluation_run_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `evaluation_run_uuid` but received {evaluation_run_uuid!r}"
+ )
+ return await self._get(
+ f"/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results/{prompt_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results/{prompt_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=EvaluationRunRetrieveResultsResponse,
+ )
+
class EvaluationRunsResourceWithRawResponse:
def __init__(self, evaluation_runs: EvaluationRunsResource) -> None:
@@ -326,6 +407,9 @@ def __init__(self, evaluation_runs: EvaluationRunsResource) -> None:
self.list_results = to_raw_response_wrapper(
evaluation_runs.list_results,
)
+ self.retrieve_results = to_raw_response_wrapper(
+ evaluation_runs.retrieve_results,
+ )
class AsyncEvaluationRunsResourceWithRawResponse:
@@ -341,6 +425,9 @@ def __init__(self, evaluation_runs: AsyncEvaluationRunsResource) -> None:
self.list_results = async_to_raw_response_wrapper(
evaluation_runs.list_results,
)
+ self.retrieve_results = async_to_raw_response_wrapper(
+ evaluation_runs.retrieve_results,
+ )
class EvaluationRunsResourceWithStreamingResponse:
@@ -356,6 +443,9 @@ def __init__(self, evaluation_runs: EvaluationRunsResource) -> None:
self.list_results = to_streamed_response_wrapper(
evaluation_runs.list_results,
)
+ self.retrieve_results = to_streamed_response_wrapper(
+ evaluation_runs.retrieve_results,
+ )
class AsyncEvaluationRunsResourceWithStreamingResponse:
@@ -371,3 +461,6 @@ def __init__(self, evaluation_runs: AsyncEvaluationRunsResource) -> None:
self.list_results = async_to_streamed_response_wrapper(
evaluation_runs.list_results,
)
+ self.retrieve_results = async_to_streamed_response_wrapper(
+ evaluation_runs.retrieve_results,
+ )
diff --git a/src/do_gradientai/resources/agents/evaluation_test_cases.py b/src/do_gradientai/resources/agents/evaluation_test_cases.py
index 995df025..beff8752 100644
--- a/src/do_gradientai/resources/agents/evaluation_test_cases.py
+++ b/src/do_gradientai/resources/agents/evaluation_test_cases.py
@@ -20,6 +20,7 @@
from ...types.agents import (
evaluation_test_case_create_params,
evaluation_test_case_update_params,
+ evaluation_test_case_retrieve_params,
evaluation_test_case_list_evaluation_runs_params,
)
from ...types.agents.api_star_metric_param import APIStarMetricParam
@@ -118,6 +119,7 @@ def retrieve(
self,
test_case_uuid: str,
*,
+ evaluation_test_case_version: int | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -130,6 +132,8 @@ def retrieve(
request to `/v2/gen-ai/evaluation_test_case/{test_case_uuid}`.
Args:
+ evaluation_test_case_version: Version of the test case.
+
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -145,7 +149,14 @@ def retrieve(
if self._client._base_url_overridden
else f"https://api.digitalocean.com/v2/gen-ai/evaluation_test_cases/{test_case_uuid}",
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {"evaluation_test_case_version": evaluation_test_case_version},
+ evaluation_test_case_retrieve_params.EvaluationTestCaseRetrieveParams,
+ ),
),
cast_to=EvaluationTestCaseRetrieveResponse,
)
@@ -368,6 +379,7 @@ async def retrieve(
self,
test_case_uuid: str,
*,
+ evaluation_test_case_version: int | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -380,6 +392,8 @@ async def retrieve(
request to `/v2/gen-ai/evaluation_test_case/{test_case_uuid}`.
Args:
+ evaluation_test_case_version: Version of the test case.
+
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -395,7 +409,14 @@ async def retrieve(
if self._client._base_url_overridden
else f"https://api.digitalocean.com/v2/gen-ai/evaluation_test_cases/{test_case_uuid}",
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {"evaluation_test_case_version": evaluation_test_case_version},
+ evaluation_test_case_retrieve_params.EvaluationTestCaseRetrieveParams,
+ ),
),
cast_to=EvaluationTestCaseRetrieveResponse,
)
diff --git a/src/do_gradientai/resources/models.py b/src/do_gradientai/resources/models.py
index c8e78b9b..da5462ae 100644
--- a/src/do_gradientai/resources/models.py
+++ b/src/do_gradientai/resources/models.py
@@ -2,14 +2,9 @@
from __future__ import annotations
-from typing import List
-from typing_extensions import Literal
-
import httpx
-from ..types import model_list_params
from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from .._utils import maybe_transform, async_maybe_transform
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
from .._response import (
@@ -18,6 +13,7 @@
async_to_raw_response_wrapper,
async_to_streamed_response_wrapper,
)
+from ..types.model import Model
from .._base_client import make_request_options
from ..types.model_list_response import ModelListResponse
@@ -44,52 +40,22 @@ def with_streaming_response(self) -> ModelsResourceWithStreamingResponse:
"""
return ModelsResourceWithStreamingResponse(self)
- def list(
+ def retrieve(
self,
+ model: str,
*,
- page: int | NotGiven = NOT_GIVEN,
- per_page: int | NotGiven = NOT_GIVEN,
- public_only: bool | NotGiven = NOT_GIVEN,
- usecases: List[
- Literal[
- "MODEL_USECASE_UNKNOWN",
- "MODEL_USECASE_AGENT",
- "MODEL_USECASE_FINETUNED",
- "MODEL_USECASE_KNOWLEDGEBASE",
- "MODEL_USECASE_GUARDRAIL",
- "MODEL_USECASE_REASONING",
- "MODEL_USECASE_SERVERLESS",
- ]
- ]
- | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ModelListResponse:
+ ) -> Model:
"""
- To list all models, send a GET request to `/v2/gen-ai/models`.
+ Retrieves a model instance, providing basic information about the model such as
+ the owner and permissioning.
Args:
- page: page number.
-
- per_page: items per page.
-
- public_only: only include models that are publicly available.
-
- usecases: include only models defined for the listed usecases.
-
- - MODEL_USECASE_UNKNOWN: The use case of the model is unknown
- - MODEL_USECASE_AGENT: The model maybe used in an agent
- - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning
- - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases
- (embedding models)
- - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails
- - MODEL_USECASE_REASONING: The model usecase for reasoning
- - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference
-
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -98,24 +64,36 @@ def list(
timeout: Override the client-level default timeout for this request, in seconds
"""
+ if not model:
+ raise ValueError(f"Expected a non-empty value for `model` but received {model!r}")
return self._get(
- "/v2/gen-ai/models"
+ f"/models/{model}"
if self._client._base_url_overridden
- else "https://api.digitalocean.com/v2/gen-ai/models",
+ else f"https://inference.do-ai.run/v1/models/{model}",
options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=maybe_transform(
- {
- "page": page,
- "per_page": per_page,
- "public_only": public_only,
- "usecases": usecases,
- },
- model_list_params.ModelListParams,
- ),
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=Model,
+ )
+
+ def list(
+ self,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ModelListResponse:
+ """
+ Lists the currently available models, and provides basic information about each
+ one such as the owner and availability.
+ """
+ return self._get(
+ "/models" if self._client._base_url_overridden else "https://inference.do-ai.run/v1/models",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ModelListResponse,
)
@@ -141,52 +119,22 @@ def with_streaming_response(self) -> AsyncModelsResourceWithStreamingResponse:
"""
return AsyncModelsResourceWithStreamingResponse(self)
- async def list(
+ async def retrieve(
self,
+ model: str,
*,
- page: int | NotGiven = NOT_GIVEN,
- per_page: int | NotGiven = NOT_GIVEN,
- public_only: bool | NotGiven = NOT_GIVEN,
- usecases: List[
- Literal[
- "MODEL_USECASE_UNKNOWN",
- "MODEL_USECASE_AGENT",
- "MODEL_USECASE_FINETUNED",
- "MODEL_USECASE_KNOWLEDGEBASE",
- "MODEL_USECASE_GUARDRAIL",
- "MODEL_USECASE_REASONING",
- "MODEL_USECASE_SERVERLESS",
- ]
- ]
- | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> ModelListResponse:
+ ) -> Model:
"""
- To list all models, send a GET request to `/v2/gen-ai/models`.
+ Retrieves a model instance, providing basic information about the model such as
+ the owner and permissioning.
Args:
- page: page number.
-
- per_page: items per page.
-
- public_only: only include models that are publicly available.
-
- usecases: include only models defined for the listed usecases.
-
- - MODEL_USECASE_UNKNOWN: The use case of the model is unknown
- - MODEL_USECASE_AGENT: The model maybe used in an agent
- - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning
- - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases
- (embedding models)
- - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails
- - MODEL_USECASE_REASONING: The model usecase for reasoning
- - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference
-
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -195,24 +143,36 @@ async def list(
timeout: Override the client-level default timeout for this request, in seconds
"""
+ if not model:
+ raise ValueError(f"Expected a non-empty value for `model` but received {model!r}")
return await self._get(
- "/v2/gen-ai/models"
+ f"/models/{model}"
if self._client._base_url_overridden
- else "https://api.digitalocean.com/v2/gen-ai/models",
+ else f"https://inference.do-ai.run/v1/models/{model}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=Model,
+ )
+
+ async def list(
+ self,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ModelListResponse:
+ """
+ Lists the currently available models, and provides basic information about each
+ one such as the owner and availability.
+ """
+ return await self._get(
+ "/models" if self._client._base_url_overridden else "https://inference.do-ai.run/v1/models",
options=make_request_options(
- extra_headers=extra_headers,
- extra_query=extra_query,
- extra_body=extra_body,
- timeout=timeout,
- query=await async_maybe_transform(
- {
- "page": page,
- "per_page": per_page,
- "public_only": public_only,
- "usecases": usecases,
- },
- model_list_params.ModelListParams,
- ),
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ModelListResponse,
)
@@ -222,6 +182,9 @@ class ModelsResourceWithRawResponse:
def __init__(self, models: ModelsResource) -> None:
self._models = models
+ self.retrieve = to_raw_response_wrapper(
+ models.retrieve,
+ )
self.list = to_raw_response_wrapper(
models.list,
)
@@ -231,6 +194,9 @@ class AsyncModelsResourceWithRawResponse:
def __init__(self, models: AsyncModelsResource) -> None:
self._models = models
+ self.retrieve = async_to_raw_response_wrapper(
+ models.retrieve,
+ )
self.list = async_to_raw_response_wrapper(
models.list,
)
@@ -240,6 +206,9 @@ class ModelsResourceWithStreamingResponse:
def __init__(self, models: ModelsResource) -> None:
self._models = models
+ self.retrieve = to_streamed_response_wrapper(
+ models.retrieve,
+ )
self.list = to_streamed_response_wrapper(
models.list,
)
@@ -249,6 +218,9 @@ class AsyncModelsResourceWithStreamingResponse:
def __init__(self, models: AsyncModelsResource) -> None:
self._models = models
+ self.retrieve = async_to_streamed_response_wrapper(
+ models.retrieve,
+ )
self.list = async_to_streamed_response_wrapper(
models.list,
)
diff --git a/src/do_gradientai/types/__init__.py b/src/do_gradientai/types/__init__.py
index 23cf1802..626c3840 100644
--- a/src/do_gradientai/types/__init__.py
+++ b/src/do_gradientai/types/__init__.py
@@ -2,15 +2,14 @@
from __future__ import annotations
+from .model import Model as Model
from .shared import APIMeta as APIMeta, APILinks as APILinks
from .api_agent import APIAgent as APIAgent
-from .api_model import APIModel as APIModel
from .api_agreement import APIAgreement as APIAgreement
from .api_workspace import APIWorkspace as APIWorkspace
from .api_agent_model import APIAgentModel as APIAgentModel
from .agent_list_params import AgentListParams as AgentListParams
from .api_model_version import APIModelVersion as APIModelVersion
-from .model_list_params import ModelListParams as ModelListParams
from .api_knowledge_base import APIKnowledgeBase as APIKnowledgeBase
from .region_list_params import RegionListParams as RegionListParams
from .agent_create_params import AgentCreateParams as AgentCreateParams
diff --git a/src/do_gradientai/types/agents/__init__.py b/src/do_gradientai/types/agents/__init__.py
index 0060e12c..9c6508f6 100644
--- a/src/do_gradientai/types/agents/__init__.py
+++ b/src/do_gradientai/types/agents/__init__.py
@@ -45,10 +45,14 @@
from .evaluation_test_case_update_params import EvaluationTestCaseUpdateParams as EvaluationTestCaseUpdateParams
from .evaluation_run_list_results_response import EvaluationRunListResultsResponse as EvaluationRunListResultsResponse
from .evaluation_test_case_create_response import EvaluationTestCaseCreateResponse as EvaluationTestCaseCreateResponse
+from .evaluation_test_case_retrieve_params import EvaluationTestCaseRetrieveParams as EvaluationTestCaseRetrieveParams
from .evaluation_test_case_update_response import EvaluationTestCaseUpdateResponse as EvaluationTestCaseUpdateResponse
from .evaluation_test_case_retrieve_response import (
EvaluationTestCaseRetrieveResponse as EvaluationTestCaseRetrieveResponse,
)
+from .evaluation_run_retrieve_results_response import (
+ EvaluationRunRetrieveResultsResponse as EvaluationRunRetrieveResultsResponse,
+)
from .evaluation_test_case_list_evaluation_runs_params import (
EvaluationTestCaseListEvaluationRunsParams as EvaluationTestCaseListEvaluationRunsParams,
)
diff --git a/src/do_gradientai/types/agents/api_evaluation_run.py b/src/do_gradientai/types/agents/api_evaluation_run.py
index ae046d3e..b879f756 100644
--- a/src/do_gradientai/types/agents/api_evaluation_run.py
+++ b/src/do_gradientai/types/agents/api_evaluation_run.py
@@ -11,11 +11,23 @@
class APIEvaluationRun(BaseModel):
+ agent_deleted: Optional[bool] = None
+
+ agent_name: Optional[str] = None
+
agent_uuid: Optional[str] = None
"""Agent UUID."""
agent_version_hash: Optional[str] = None
+ agent_workspace_uuid: Optional[str] = None
+
+ created_by_user_email: Optional[str] = None
+
+ created_by_user_id: Optional[str] = None
+
+ error_description: Optional[str] = None
+
evaluation_run_uuid: Optional[str] = None
"""Evaluation run UUID."""
diff --git a/src/do_gradientai/types/agents/evaluation_metrics/__init__.py b/src/do_gradientai/types/agents/evaluation_metrics/__init__.py
new file mode 100644
index 00000000..7af9b074
--- /dev/null
+++ b/src/do_gradientai/types/agents/evaluation_metrics/__init__.py
@@ -0,0 +1,14 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .workspace_create_params import WorkspaceCreateParams as WorkspaceCreateParams
+from .workspace_list_response import WorkspaceListResponse as WorkspaceListResponse
+from .workspace_update_params import WorkspaceUpdateParams as WorkspaceUpdateParams
+from .workspace_create_response import WorkspaceCreateResponse as WorkspaceCreateResponse
+from .workspace_delete_response import WorkspaceDeleteResponse as WorkspaceDeleteResponse
+from .workspace_update_response import WorkspaceUpdateResponse as WorkspaceUpdateResponse
+from .workspace_retrieve_response import WorkspaceRetrieveResponse as WorkspaceRetrieveResponse
+from .workspace_list_evaluation_test_cases_response import (
+ WorkspaceListEvaluationTestCasesResponse as WorkspaceListEvaluationTestCasesResponse,
+)
diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspace_create_params.py b/src/do_gradientai/types/agents/evaluation_metrics/workspace_create_params.py
new file mode 100644
index 00000000..73f390be
--- /dev/null
+++ b/src/do_gradientai/types/agents/evaluation_metrics/workspace_create_params.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List
+from typing_extensions import TypedDict
+
+__all__ = ["WorkspaceCreateParams"]
+
+
+class WorkspaceCreateParams(TypedDict, total=False):
+ agent_uuids: List[str]
+
+ description: str
+
+ name: str
diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspace_create_response.py b/src/do_gradientai/types/agents/evaluation_metrics/workspace_create_response.py
new file mode 100644
index 00000000..419ec288
--- /dev/null
+++ b/src/do_gradientai/types/agents/evaluation_metrics/workspace_create_response.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+
+from ...._models import BaseModel
+
+__all__ = ["WorkspaceCreateResponse"]
+
+
+class WorkspaceCreateResponse(BaseModel):
+ workspace: Optional["APIWorkspace"] = None
+
+
+from ...api_workspace import APIWorkspace
diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspace_delete_response.py b/src/do_gradientai/types/agents/evaluation_metrics/workspace_delete_response.py
new file mode 100644
index 00000000..1fe7b5a2
--- /dev/null
+++ b/src/do_gradientai/types/agents/evaluation_metrics/workspace_delete_response.py
@@ -0,0 +1,11 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ...._models import BaseModel
+
+__all__ = ["WorkspaceDeleteResponse"]
+
+
+class WorkspaceDeleteResponse(BaseModel):
+ workspace_uuid: Optional[str] = None
diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspace_list_evaluation_test_cases_response.py b/src/do_gradientai/types/agents/evaluation_metrics/workspace_list_evaluation_test_cases_response.py
new file mode 100644
index 00000000..32c613f8
--- /dev/null
+++ b/src/do_gradientai/types/agents/evaluation_metrics/workspace_list_evaluation_test_cases_response.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from ...._models import BaseModel
+from ..api_evaluation_test_case import APIEvaluationTestCase
+
+__all__ = ["WorkspaceListEvaluationTestCasesResponse"]
+
+
+class WorkspaceListEvaluationTestCasesResponse(BaseModel):
+ evaluation_test_cases: Optional[List[APIEvaluationTestCase]] = None
diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspace_list_response.py b/src/do_gradientai/types/agents/evaluation_metrics/workspace_list_response.py
new file mode 100644
index 00000000..64f9a63c
--- /dev/null
+++ b/src/do_gradientai/types/agents/evaluation_metrics/workspace_list_response.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Optional
+
+from ...._models import BaseModel
+
+__all__ = ["WorkspaceListResponse"]
+
+
+class WorkspaceListResponse(BaseModel):
+ workspaces: Optional[List["APIWorkspace"]] = None
+
+
+from ...api_workspace import APIWorkspace
diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspace_retrieve_response.py b/src/do_gradientai/types/agents/evaluation_metrics/workspace_retrieve_response.py
new file mode 100644
index 00000000..fa4a567c
--- /dev/null
+++ b/src/do_gradientai/types/agents/evaluation_metrics/workspace_retrieve_response.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+
+from ...._models import BaseModel
+
+__all__ = ["WorkspaceRetrieveResponse"]
+
+
+class WorkspaceRetrieveResponse(BaseModel):
+ workspace: Optional["APIWorkspace"] = None
+
+
+from ...api_workspace import APIWorkspace
diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspace_update_params.py b/src/do_gradientai/types/agents/evaluation_metrics/workspace_update_params.py
new file mode 100644
index 00000000..fd09079e
--- /dev/null
+++ b/src/do_gradientai/types/agents/evaluation_metrics/workspace_update_params.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Annotated, TypedDict
+
+from ...._utils import PropertyInfo
+
+__all__ = ["WorkspaceUpdateParams"]
+
+
+class WorkspaceUpdateParams(TypedDict, total=False):
+ description: str
+
+ name: str
+
+ body_workspace_uuid: Annotated[str, PropertyInfo(alias="workspace_uuid")]
+ """Workspace UUID."""
diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspace_update_response.py b/src/do_gradientai/types/agents/evaluation_metrics/workspace_update_response.py
new file mode 100644
index 00000000..77dac88c
--- /dev/null
+++ b/src/do_gradientai/types/agents/evaluation_metrics/workspace_update_response.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+
+from ...._models import BaseModel
+
+__all__ = ["WorkspaceUpdateResponse"]
+
+
+class WorkspaceUpdateResponse(BaseModel):
+ workspace: Optional["APIWorkspace"] = None
+
+
+from ...api_workspace import APIWorkspace
diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspaces/__init__.py b/src/do_gradientai/types/agents/evaluation_metrics/workspaces/__init__.py
new file mode 100644
index 00000000..9f369c7c
--- /dev/null
+++ b/src/do_gradientai/types/agents/evaluation_metrics/workspaces/__init__.py
@@ -0,0 +1,8 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .agent_list_params import AgentListParams as AgentListParams
+from .agent_move_params import AgentMoveParams as AgentMoveParams
+from .agent_list_response import AgentListResponse as AgentListResponse
+from .agent_move_response import AgentMoveResponse as AgentMoveResponse
diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_list_params.py b/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_list_params.py
new file mode 100644
index 00000000..277274ed
--- /dev/null
+++ b/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_list_params.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List
+from typing_extensions import TypedDict
+
+__all__ = ["AgentListParams", "FieldMask"]
+
+
+class AgentListParams(TypedDict, total=False):
+ field_mask: FieldMask
+
+ only_deployed: bool
+ """Only list agents that are deployed."""
+
+ page: int
+ """page number."""
+
+ per_page: int
+ """items per page."""
+
+
+class FieldMask(TypedDict, total=False):
+ paths: List[str]
+ """The set of field mask paths."""
diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_list_response.py b/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_list_response.py
new file mode 100644
index 00000000..1e520736
--- /dev/null
+++ b/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_list_response.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Optional
+
+from ....._models import BaseModel
+from ....shared.api_meta import APIMeta
+from ....shared.api_links import APILinks
+
+__all__ = ["AgentListResponse"]
+
+
+class AgentListResponse(BaseModel):
+ agents: Optional[List["APIAgent"]] = None
+
+ links: Optional[APILinks] = None
+
+ meta: Optional[APIMeta] = None
+
+
+from ....api_agent import APIAgent
diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_move_params.py b/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_move_params.py
new file mode 100644
index 00000000..8e92503a
--- /dev/null
+++ b/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_move_params.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List
+from typing_extensions import Annotated, TypedDict
+
+from ....._utils import PropertyInfo
+
+__all__ = ["AgentMoveParams"]
+
+
+class AgentMoveParams(TypedDict, total=False):
+ agent_uuids: List[str]
+
+ body_workspace_uuid: Annotated[str, PropertyInfo(alias="workspace_uuid")]
diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_move_response.py b/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_move_response.py
new file mode 100644
index 00000000..d2d084d5
--- /dev/null
+++ b/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_move_response.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Optional
+
+from ....._models import BaseModel
+
+__all__ = ["AgentMoveResponse"]
+
+
+class AgentMoveResponse(BaseModel):
+ workspace: Optional["APIWorkspace"] = None
+
+
+from ....api_workspace import APIWorkspace
diff --git a/src/do_gradientai/types/agents/evaluation_run_create_params.py b/src/do_gradientai/types/agents/evaluation_run_create_params.py
index 1ae2dbbb..47bdabd6 100644
--- a/src/do_gradientai/types/agents/evaluation_run_create_params.py
+++ b/src/do_gradientai/types/agents/evaluation_run_create_params.py
@@ -2,14 +2,15 @@
from __future__ import annotations
+from typing import List
from typing_extensions import TypedDict
__all__ = ["EvaluationRunCreateParams"]
class EvaluationRunCreateParams(TypedDict, total=False):
- agent_uuid: str
- """Agent UUID to run the test case against."""
+ agent_uuids: List[str]
+ """Agent UUIDs to run the test case against."""
run_name: str
"""The name of the run."""
diff --git a/src/do_gradientai/types/agents/evaluation_run_create_response.py b/src/do_gradientai/types/agents/evaluation_run_create_response.py
index 36942c29..90da2e61 100644
--- a/src/do_gradientai/types/agents/evaluation_run_create_response.py
+++ b/src/do_gradientai/types/agents/evaluation_run_create_response.py
@@ -1,6 +1,6 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import Optional
+from typing import List, Optional
from ..._models import BaseModel
@@ -8,4 +8,4 @@
class EvaluationRunCreateResponse(BaseModel):
- evaluation_run_uuid: Optional[str] = None
+ evaluation_run_uuids: Optional[List[str]] = None
diff --git a/src/do_gradientai/types/agents/evaluation_run_retrieve_results_response.py b/src/do_gradientai/types/agents/evaluation_run_retrieve_results_response.py
new file mode 100644
index 00000000..4bb70732
--- /dev/null
+++ b/src/do_gradientai/types/agents/evaluation_run_retrieve_results_response.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+from .api_evaluation_prompt import APIEvaluationPrompt
+
+__all__ = ["EvaluationRunRetrieveResultsResponse"]
+
+
+class EvaluationRunRetrieveResultsResponse(BaseModel):
+ prompt: Optional[APIEvaluationPrompt] = None
diff --git a/src/do_gradientai/types/agents/evaluation_test_case_retrieve_params.py b/src/do_gradientai/types/agents/evaluation_test_case_retrieve_params.py
new file mode 100644
index 00000000..f84fe876
--- /dev/null
+++ b/src/do_gradientai/types/agents/evaluation_test_case_retrieve_params.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import TypedDict
+
+__all__ = ["EvaluationTestCaseRetrieveParams"]
+
+
+class EvaluationTestCaseRetrieveParams(TypedDict, total=False):
+ evaluation_test_case_version: int
+ """Version of the test case."""
diff --git a/src/do_gradientai/types/api_model.py b/src/do_gradientai/types/api_model.py
deleted file mode 100644
index c2bc1edd..00000000
--- a/src/do_gradientai/types/api_model.py
+++ /dev/null
@@ -1,32 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-from datetime import datetime
-
-from .._models import BaseModel
-from .api_agreement import APIAgreement
-from .api_model_version import APIModelVersion
-
-__all__ = ["APIModel"]
-
-
-class APIModel(BaseModel):
- agreement: Optional[APIAgreement] = None
-
- created_at: Optional[datetime] = None
-
- is_foundational: Optional[bool] = None
-
- name: Optional[str] = None
-
- parent_uuid: Optional[str] = None
-
- updated_at: Optional[datetime] = None
-
- upload_complete: Optional[bool] = None
-
- url: Optional[str] = None
-
- uuid: Optional[str] = None
-
- version: Optional[APIModelVersion] = None
diff --git a/src/do_gradientai/types/knowledge_bases/__init__.py b/src/do_gradientai/types/knowledge_bases/__init__.py
index 9fc915e5..b23053f2 100644
--- a/src/do_gradientai/types/knowledge_bases/__init__.py
+++ b/src/do_gradientai/types/knowledge_bases/__init__.py
@@ -5,6 +5,7 @@
from .api_indexing_job import APIIndexingJob as APIIndexingJob
from .aws_data_source_param import AwsDataSourceParam as AwsDataSourceParam
from .api_spaces_data_source import APISpacesDataSource as APISpacesDataSource
+from .api_indexed_data_source import APIIndexedDataSource as APIIndexedDataSource
from .data_source_list_params import DataSourceListParams as DataSourceListParams
from .indexing_job_list_params import IndexingJobListParams as IndexingJobListParams
from .data_source_create_params import DataSourceCreateParams as DataSourceCreateParams
diff --git a/src/do_gradientai/types/knowledge_bases/api_indexed_data_source.py b/src/do_gradientai/types/knowledge_bases/api_indexed_data_source.py
new file mode 100644
index 00000000..2449e9fd
--- /dev/null
+++ b/src/do_gradientai/types/knowledge_bases/api_indexed_data_source.py
@@ -0,0 +1,48 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from datetime import datetime
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["APIIndexedDataSource"]
+
+
+class APIIndexedDataSource(BaseModel):
+ completed_at: Optional[datetime] = None
+
+ data_source_uuid: Optional[str] = None
+
+ error_details: Optional[str] = None
+
+ error_msg: Optional[str] = None
+
+ failed_item_count: Optional[str] = None
+
+ indexed_file_count: Optional[str] = None
+
+ indexed_item_count: Optional[str] = None
+
+ removed_item_count: Optional[str] = None
+
+ skipped_item_count: Optional[str] = None
+
+ started_at: Optional[datetime] = None
+
+ status: Optional[
+ Literal[
+ "DATA_SOURCE_STATUS_UNKNOWN",
+ "DATA_SOURCE_STATUS_IN_PROGRESS",
+ "DATA_SOURCE_STATUS_UPDATED",
+ "DATA_SOURCE_STATUS_PARTIALLY_UPDATED",
+ "DATA_SOURCE_STATUS_NOT_UPDATED",
+ "DATA_SOURCE_STATUS_FAILED",
+ ]
+ ] = None
+
+ total_bytes: Optional[str] = None
+
+ total_bytes_indexed: Optional[str] = None
+
+ total_file_count: Optional[str] = None
diff --git a/src/do_gradientai/types/knowledge_bases/api_indexing_job.py b/src/do_gradientai/types/knowledge_bases/api_indexing_job.py
index 2809141c..573a7c4e 100644
--- a/src/do_gradientai/types/knowledge_bases/api_indexing_job.py
+++ b/src/do_gradientai/types/knowledge_bases/api_indexing_job.py
@@ -34,6 +34,18 @@ class APIIndexingJob(BaseModel):
started_at: Optional[datetime] = None
+ status: Optional[
+ Literal[
+ "INDEX_JOB_STATUS_UNKNOWN",
+ "INDEX_JOB_STATUS_PARTIAL",
+ "INDEX_JOB_STATUS_IN_PROGRESS",
+ "INDEX_JOB_STATUS_COMPLETED",
+ "INDEX_JOB_STATUS_FAILED",
+ "INDEX_JOB_STATUS_NO_CHANGES",
+ "INDEX_JOB_STATUS_PENDING",
+ ]
+ ] = None
+
tokens: Optional[int] = None
total_datasources: Optional[int] = None
diff --git a/src/do_gradientai/types/knowledge_bases/api_knowledge_base_data_source.py b/src/do_gradientai/types/knowledge_bases/api_knowledge_base_data_source.py
index ca24d6f0..202e4202 100644
--- a/src/do_gradientai/types/knowledge_bases/api_knowledge_base_data_source.py
+++ b/src/do_gradientai/types/knowledge_bases/api_knowledge_base_data_source.py
@@ -6,6 +6,7 @@
from ..._models import BaseModel
from .api_indexing_job import APIIndexingJob
from .api_spaces_data_source import APISpacesDataSource
+from .api_indexed_data_source import APIIndexedDataSource
from .api_file_upload_data_source import APIFileUploadDataSource
from .api_web_crawler_data_source import APIWebCrawlerDataSource
@@ -32,6 +33,8 @@ class APIKnowledgeBaseDataSource(BaseModel):
item_path: Optional[str] = None
+ last_datasource_indexing_job: Optional[APIIndexedDataSource] = None
+
last_indexing_job: Optional[APIIndexingJob] = None
region: Optional[str] = None
diff --git a/src/do_gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py b/src/do_gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py
index a9d0c2c0..dd0e317e 100644
--- a/src/do_gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py
+++ b/src/do_gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py
@@ -1,52 +1,12 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import List, Optional
-from datetime import datetime
-from typing_extensions import Literal
from ..._models import BaseModel
+from .api_indexed_data_source import APIIndexedDataSource
-__all__ = ["IndexingJobRetrieveDataSourcesResponse", "IndexedDataSource"]
-
-
-class IndexedDataSource(BaseModel):
- completed_at: Optional[datetime] = None
-
- data_source_uuid: Optional[str] = None
-
- error_details: Optional[str] = None
-
- error_msg: Optional[str] = None
-
- failed_item_count: Optional[str] = None
-
- indexed_file_count: Optional[str] = None
-
- indexed_item_count: Optional[str] = None
-
- removed_item_count: Optional[str] = None
-
- skipped_item_count: Optional[str] = None
-
- started_at: Optional[datetime] = None
-
- status: Optional[
- Literal[
- "DATA_SOURCE_STATUS_UNKNOWN",
- "DATA_SOURCE_STATUS_IN_PROGRESS",
- "DATA_SOURCE_STATUS_UPDATED",
- "DATA_SOURCE_STATUS_PARTIALLY_UPDATED",
- "DATA_SOURCE_STATUS_NOT_UPDATED",
- "DATA_SOURCE_STATUS_FAILED",
- ]
- ] = None
-
- total_bytes: Optional[str] = None
-
- total_bytes_indexed: Optional[str] = None
-
- total_file_count: Optional[str] = None
+__all__ = ["IndexingJobRetrieveDataSourcesResponse"]
class IndexingJobRetrieveDataSourcesResponse(BaseModel):
- indexed_data_sources: Optional[List[IndexedDataSource]] = None
+ indexed_data_sources: Optional[List[APIIndexedDataSource]] = None
diff --git a/src/do_gradientai/types/model.py b/src/do_gradientai/types/model.py
new file mode 100644
index 00000000..2631ee8d
--- /dev/null
+++ b/src/do_gradientai/types/model.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["Model"]
+
+
+class Model(BaseModel):
+ id: str
+ """The model identifier, which can be referenced in the API endpoints."""
+
+ created: int
+ """The Unix timestamp (in seconds) when the model was created."""
+
+ object: Literal["model"]
+ """The object type, which is always "model"."""
+
+ owned_by: str
+ """The organization that owns the model."""
diff --git a/src/do_gradientai/types/model_list_params.py b/src/do_gradientai/types/model_list_params.py
deleted file mode 100644
index 4abc1dc1..00000000
--- a/src/do_gradientai/types/model_list_params.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import List
-from typing_extensions import Literal, TypedDict
-
-__all__ = ["ModelListParams"]
-
-
-class ModelListParams(TypedDict, total=False):
- page: int
- """page number."""
-
- per_page: int
- """items per page."""
-
- public_only: bool
- """only include models that are publicly available."""
-
- usecases: List[
- Literal[
- "MODEL_USECASE_UNKNOWN",
- "MODEL_USECASE_AGENT",
- "MODEL_USECASE_FINETUNED",
- "MODEL_USECASE_KNOWLEDGEBASE",
- "MODEL_USECASE_GUARDRAIL",
- "MODEL_USECASE_REASONING",
- "MODEL_USECASE_SERVERLESS",
- ]
- ]
- """include only models defined for the listed usecases.
-
- - MODEL_USECASE_UNKNOWN: The use case of the model is unknown
- - MODEL_USECASE_AGENT: The model maybe used in an agent
- - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning
- - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases
- (embedding models)
- - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails
- - MODEL_USECASE_REASONING: The model usecase for reasoning
- - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference
- """
diff --git a/src/do_gradientai/types/model_list_response.py b/src/do_gradientai/types/model_list_response.py
index 47651759..8f835449 100644
--- a/src/do_gradientai/types/model_list_response.py
+++ b/src/do_gradientai/types/model_list_response.py
@@ -1,18 +1,15 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import List, Optional
+from typing import List
+from typing_extensions import Literal
+from .model import Model
from .._models import BaseModel
-from .api_model import APIModel
-from .shared.api_meta import APIMeta
-from .shared.api_links import APILinks
__all__ = ["ModelListResponse"]
class ModelListResponse(BaseModel):
- links: Optional[APILinks] = None
+ data: List[Model]
- meta: Optional[APIMeta] = None
-
- models: Optional[List[APIModel]] = None
+ object: Literal["list"]
diff --git a/tests/api_resources/agents/evaluation_metrics/__init__.py b/tests/api_resources/agents/evaluation_metrics/__init__.py
new file mode 100644
index 00000000..fd8019a9
--- /dev/null
+++ b/tests/api_resources/agents/evaluation_metrics/__init__.py
@@ -0,0 +1 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/agents/evaluation_metrics/test_workspaces.py b/tests/api_resources/agents/evaluation_metrics/test_workspaces.py
new file mode 100644
index 00000000..42bfa79f
--- /dev/null
+++ b/tests/api_resources/agents/evaluation_metrics/test_workspaces.py
@@ -0,0 +1,521 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from tests.utils import assert_matches_type
+from do_gradientai import GradientAI, AsyncGradientAI
+from do_gradientai.types.agents.evaluation_metrics import (
+ WorkspaceListResponse,
+ WorkspaceCreateResponse,
+ WorkspaceDeleteResponse,
+ WorkspaceUpdateResponse,
+ WorkspaceRetrieveResponse,
+ WorkspaceListEvaluationTestCasesResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestWorkspaces:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_create(self, client: GradientAI) -> None:
+ workspace = client.agents.evaluation_metrics.workspaces.create()
+ assert_matches_type(WorkspaceCreateResponse, workspace, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_create_with_all_params(self, client: GradientAI) -> None:
+ workspace = client.agents.evaluation_metrics.workspaces.create(
+ agent_uuids=["string"],
+ description="description",
+ name="name",
+ )
+ assert_matches_type(WorkspaceCreateResponse, workspace, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_create(self, client: GradientAI) -> None:
+ response = client.agents.evaluation_metrics.workspaces.with_raw_response.create()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ workspace = response.parse()
+ assert_matches_type(WorkspaceCreateResponse, workspace, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_create(self, client: GradientAI) -> None:
+ with client.agents.evaluation_metrics.workspaces.with_streaming_response.create() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ workspace = response.parse()
+ assert_matches_type(WorkspaceCreateResponse, workspace, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_retrieve(self, client: GradientAI) -> None:
+ workspace = client.agents.evaluation_metrics.workspaces.retrieve(
+ "workspace_uuid",
+ )
+ assert_matches_type(WorkspaceRetrieveResponse, workspace, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_retrieve(self, client: GradientAI) -> None:
+ response = client.agents.evaluation_metrics.workspaces.with_raw_response.retrieve(
+ "workspace_uuid",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ workspace = response.parse()
+ assert_matches_type(WorkspaceRetrieveResponse, workspace, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_retrieve(self, client: GradientAI) -> None:
+ with client.agents.evaluation_metrics.workspaces.with_streaming_response.retrieve(
+ "workspace_uuid",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ workspace = response.parse()
+ assert_matches_type(WorkspaceRetrieveResponse, workspace, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_path_params_retrieve(self, client: GradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `workspace_uuid` but received ''"):
+ client.agents.evaluation_metrics.workspaces.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_update(self, client: GradientAI) -> None:
+ workspace = client.agents.evaluation_metrics.workspaces.update(
+ path_workspace_uuid="workspace_uuid",
+ )
+ assert_matches_type(WorkspaceUpdateResponse, workspace, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_update_with_all_params(self, client: GradientAI) -> None:
+ workspace = client.agents.evaluation_metrics.workspaces.update(
+ path_workspace_uuid="workspace_uuid",
+ description="description",
+ name="name",
+ body_workspace_uuid="workspace_uuid",
+ )
+ assert_matches_type(WorkspaceUpdateResponse, workspace, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_update(self, client: GradientAI) -> None:
+ response = client.agents.evaluation_metrics.workspaces.with_raw_response.update(
+ path_workspace_uuid="workspace_uuid",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ workspace = response.parse()
+ assert_matches_type(WorkspaceUpdateResponse, workspace, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_update(self, client: GradientAI) -> None:
+ with client.agents.evaluation_metrics.workspaces.with_streaming_response.update(
+ path_workspace_uuid="workspace_uuid",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ workspace = response.parse()
+ assert_matches_type(WorkspaceUpdateResponse, workspace, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_path_params_update(self, client: GradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_workspace_uuid` but received ''"):
+ client.agents.evaluation_metrics.workspaces.with_raw_response.update(
+ path_workspace_uuid="",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_list(self, client: GradientAI) -> None:
+ workspace = client.agents.evaluation_metrics.workspaces.list()
+ assert_matches_type(WorkspaceListResponse, workspace, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_list(self, client: GradientAI) -> None:
+ response = client.agents.evaluation_metrics.workspaces.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ workspace = response.parse()
+ assert_matches_type(WorkspaceListResponse, workspace, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_list(self, client: GradientAI) -> None:
+ with client.agents.evaluation_metrics.workspaces.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ workspace = response.parse()
+ assert_matches_type(WorkspaceListResponse, workspace, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_delete(self, client: GradientAI) -> None:
+ workspace = client.agents.evaluation_metrics.workspaces.delete(
+ "workspace_uuid",
+ )
+ assert_matches_type(WorkspaceDeleteResponse, workspace, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_delete(self, client: GradientAI) -> None:
+ response = client.agents.evaluation_metrics.workspaces.with_raw_response.delete(
+ "workspace_uuid",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ workspace = response.parse()
+ assert_matches_type(WorkspaceDeleteResponse, workspace, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_delete(self, client: GradientAI) -> None:
+ with client.agents.evaluation_metrics.workspaces.with_streaming_response.delete(
+ "workspace_uuid",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ workspace = response.parse()
+ assert_matches_type(WorkspaceDeleteResponse, workspace, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_path_params_delete(self, client: GradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `workspace_uuid` but received ''"):
+ client.agents.evaluation_metrics.workspaces.with_raw_response.delete(
+ "",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_list_evaluation_test_cases(self, client: GradientAI) -> None:
+ workspace = client.agents.evaluation_metrics.workspaces.list_evaluation_test_cases(
+ "workspace_uuid",
+ )
+ assert_matches_type(WorkspaceListEvaluationTestCasesResponse, workspace, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_list_evaluation_test_cases(self, client: GradientAI) -> None:
+ response = client.agents.evaluation_metrics.workspaces.with_raw_response.list_evaluation_test_cases(
+ "workspace_uuid",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ workspace = response.parse()
+ assert_matches_type(WorkspaceListEvaluationTestCasesResponse, workspace, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_list_evaluation_test_cases(self, client: GradientAI) -> None:
+ with client.agents.evaluation_metrics.workspaces.with_streaming_response.list_evaluation_test_cases(
+ "workspace_uuid",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ workspace = response.parse()
+ assert_matches_type(WorkspaceListEvaluationTestCasesResponse, workspace, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_path_params_list_evaluation_test_cases(self, client: GradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `workspace_uuid` but received ''"):
+ client.agents.evaluation_metrics.workspaces.with_raw_response.list_evaluation_test_cases(
+ "",
+ )
+
+
+class TestAsyncWorkspaces:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_create(self, async_client: AsyncGradientAI) -> None:
+ workspace = await async_client.agents.evaluation_metrics.workspaces.create()
+ assert_matches_type(WorkspaceCreateResponse, workspace, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None:
+ workspace = await async_client.agents.evaluation_metrics.workspaces.create(
+ agent_uuids=["string"],
+ description="description",
+ name="name",
+ )
+ assert_matches_type(WorkspaceCreateResponse, workspace, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.agents.evaluation_metrics.workspaces.with_raw_response.create()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ workspace = await response.parse()
+ assert_matches_type(WorkspaceCreateResponse, workspace, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.agents.evaluation_metrics.workspaces.with_streaming_response.create() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ workspace = await response.parse()
+ assert_matches_type(WorkspaceCreateResponse, workspace, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None:
+ workspace = await async_client.agents.evaluation_metrics.workspaces.retrieve(
+ "workspace_uuid",
+ )
+ assert_matches_type(WorkspaceRetrieveResponse, workspace, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.agents.evaluation_metrics.workspaces.with_raw_response.retrieve(
+ "workspace_uuid",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ workspace = await response.parse()
+ assert_matches_type(WorkspaceRetrieveResponse, workspace, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.agents.evaluation_metrics.workspaces.with_streaming_response.retrieve(
+ "workspace_uuid",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ workspace = await response.parse()
+ assert_matches_type(WorkspaceRetrieveResponse, workspace, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `workspace_uuid` but received ''"):
+ await async_client.agents.evaluation_metrics.workspaces.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_update(self, async_client: AsyncGradientAI) -> None:
+ workspace = await async_client.agents.evaluation_metrics.workspaces.update(
+ path_workspace_uuid="workspace_uuid",
+ )
+ assert_matches_type(WorkspaceUpdateResponse, workspace, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None:
+ workspace = await async_client.agents.evaluation_metrics.workspaces.update(
+ path_workspace_uuid="workspace_uuid",
+ description="description",
+ name="name",
+ body_workspace_uuid="workspace_uuid",
+ )
+ assert_matches_type(WorkspaceUpdateResponse, workspace, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.agents.evaluation_metrics.workspaces.with_raw_response.update(
+ path_workspace_uuid="workspace_uuid",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ workspace = await response.parse()
+ assert_matches_type(WorkspaceUpdateResponse, workspace, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.agents.evaluation_metrics.workspaces.with_streaming_response.update(
+ path_workspace_uuid="workspace_uuid",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ workspace = await response.parse()
+ assert_matches_type(WorkspaceUpdateResponse, workspace, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_path_params_update(self, async_client: AsyncGradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_workspace_uuid` but received ''"):
+ await async_client.agents.evaluation_metrics.workspaces.with_raw_response.update(
+ path_workspace_uuid="",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_list(self, async_client: AsyncGradientAI) -> None:
+ workspace = await async_client.agents.evaluation_metrics.workspaces.list()
+ assert_matches_type(WorkspaceListResponse, workspace, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.agents.evaluation_metrics.workspaces.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ workspace = await response.parse()
+ assert_matches_type(WorkspaceListResponse, workspace, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.agents.evaluation_metrics.workspaces.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ workspace = await response.parse()
+ assert_matches_type(WorkspaceListResponse, workspace, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncGradientAI) -> None:
+ workspace = await async_client.agents.evaluation_metrics.workspaces.delete(
+ "workspace_uuid",
+ )
+ assert_matches_type(WorkspaceDeleteResponse, workspace, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.agents.evaluation_metrics.workspaces.with_raw_response.delete(
+ "workspace_uuid",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ workspace = await response.parse()
+ assert_matches_type(WorkspaceDeleteResponse, workspace, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.agents.evaluation_metrics.workspaces.with_streaming_response.delete(
+ "workspace_uuid",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ workspace = await response.parse()
+ assert_matches_type(WorkspaceDeleteResponse, workspace, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `workspace_uuid` but received ''"):
+ await async_client.agents.evaluation_metrics.workspaces.with_raw_response.delete(
+ "",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_list_evaluation_test_cases(self, async_client: AsyncGradientAI) -> None:
+ workspace = await async_client.agents.evaluation_metrics.workspaces.list_evaluation_test_cases(
+ "workspace_uuid",
+ )
+ assert_matches_type(WorkspaceListEvaluationTestCasesResponse, workspace, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_list_evaluation_test_cases(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.agents.evaluation_metrics.workspaces.with_raw_response.list_evaluation_test_cases(
+ "workspace_uuid",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ workspace = await response.parse()
+ assert_matches_type(WorkspaceListEvaluationTestCasesResponse, workspace, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_list_evaluation_test_cases(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.agents.evaluation_metrics.workspaces.with_streaming_response.list_evaluation_test_cases(
+ "workspace_uuid",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ workspace = await response.parse()
+ assert_matches_type(WorkspaceListEvaluationTestCasesResponse, workspace, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_path_params_list_evaluation_test_cases(self, async_client: AsyncGradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `workspace_uuid` but received ''"):
+ await async_client.agents.evaluation_metrics.workspaces.with_raw_response.list_evaluation_test_cases(
+ "",
+ )
diff --git a/tests/api_resources/agents/evaluation_metrics/workspaces/__init__.py b/tests/api_resources/agents/evaluation_metrics/workspaces/__init__.py
new file mode 100644
index 00000000..fd8019a9
--- /dev/null
+++ b/tests/api_resources/agents/evaluation_metrics/workspaces/__init__.py
@@ -0,0 +1 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py b/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py
new file mode 100644
index 00000000..e772d668
--- /dev/null
+++ b/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py
@@ -0,0 +1,239 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from tests.utils import assert_matches_type
+from do_gradientai import GradientAI, AsyncGradientAI
+from do_gradientai.types.agents.evaluation_metrics.workspaces import (
+ AgentListResponse,
+ AgentMoveResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestAgents:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_list(self, client: GradientAI) -> None:
+ agent = client.agents.evaluation_metrics.workspaces.agents.list(
+ workspace_uuid="workspace_uuid",
+ )
+ assert_matches_type(AgentListResponse, agent, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_list_with_all_params(self, client: GradientAI) -> None:
+ agent = client.agents.evaluation_metrics.workspaces.agents.list(
+ workspace_uuid="workspace_uuid",
+ field_mask={"paths": ["string"]},
+ only_deployed=True,
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(AgentListResponse, agent, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_list(self, client: GradientAI) -> None:
+ response = client.agents.evaluation_metrics.workspaces.agents.with_raw_response.list(
+ workspace_uuid="workspace_uuid",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ agent = response.parse()
+ assert_matches_type(AgentListResponse, agent, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_list(self, client: GradientAI) -> None:
+ with client.agents.evaluation_metrics.workspaces.agents.with_streaming_response.list(
+ workspace_uuid="workspace_uuid",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ agent = response.parse()
+ assert_matches_type(AgentListResponse, agent, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_path_params_list(self, client: GradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `workspace_uuid` but received ''"):
+ client.agents.evaluation_metrics.workspaces.agents.with_raw_response.list(
+ workspace_uuid="",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_move(self, client: GradientAI) -> None:
+ agent = client.agents.evaluation_metrics.workspaces.agents.move(
+ path_workspace_uuid="workspace_uuid",
+ )
+ assert_matches_type(AgentMoveResponse, agent, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_move_with_all_params(self, client: GradientAI) -> None:
+ agent = client.agents.evaluation_metrics.workspaces.agents.move(
+ path_workspace_uuid="workspace_uuid",
+ agent_uuids=["string"],
+ body_workspace_uuid="workspace_uuid",
+ )
+ assert_matches_type(AgentMoveResponse, agent, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_move(self, client: GradientAI) -> None:
+ response = client.agents.evaluation_metrics.workspaces.agents.with_raw_response.move(
+ path_workspace_uuid="workspace_uuid",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ agent = response.parse()
+ assert_matches_type(AgentMoveResponse, agent, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_move(self, client: GradientAI) -> None:
+ with client.agents.evaluation_metrics.workspaces.agents.with_streaming_response.move(
+ path_workspace_uuid="workspace_uuid",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ agent = response.parse()
+ assert_matches_type(AgentMoveResponse, agent, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_path_params_move(self, client: GradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_workspace_uuid` but received ''"):
+ client.agents.evaluation_metrics.workspaces.agents.with_raw_response.move(
+ path_workspace_uuid="",
+ )
+
+
+class TestAsyncAgents:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_list(self, async_client: AsyncGradientAI) -> None:
+ agent = await async_client.agents.evaluation_metrics.workspaces.agents.list(
+ workspace_uuid="workspace_uuid",
+ )
+ assert_matches_type(AgentListResponse, agent, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None:
+ agent = await async_client.agents.evaluation_metrics.workspaces.agents.list(
+ workspace_uuid="workspace_uuid",
+ field_mask={"paths": ["string"]},
+ only_deployed=True,
+ page=0,
+ per_page=0,
+ )
+ assert_matches_type(AgentListResponse, agent, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.agents.evaluation_metrics.workspaces.agents.with_raw_response.list(
+ workspace_uuid="workspace_uuid",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ agent = await response.parse()
+ assert_matches_type(AgentListResponse, agent, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.agents.evaluation_metrics.workspaces.agents.with_streaming_response.list(
+ workspace_uuid="workspace_uuid",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ agent = await response.parse()
+ assert_matches_type(AgentListResponse, agent, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_path_params_list(self, async_client: AsyncGradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `workspace_uuid` but received ''"):
+ await async_client.agents.evaluation_metrics.workspaces.agents.with_raw_response.list(
+ workspace_uuid="",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_move(self, async_client: AsyncGradientAI) -> None:
+ agent = await async_client.agents.evaluation_metrics.workspaces.agents.move(
+ path_workspace_uuid="workspace_uuid",
+ )
+ assert_matches_type(AgentMoveResponse, agent, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_move_with_all_params(self, async_client: AsyncGradientAI) -> None:
+ agent = await async_client.agents.evaluation_metrics.workspaces.agents.move(
+ path_workspace_uuid="workspace_uuid",
+ agent_uuids=["string"],
+ body_workspace_uuid="workspace_uuid",
+ )
+ assert_matches_type(AgentMoveResponse, agent, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_move(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.agents.evaluation_metrics.workspaces.agents.with_raw_response.move(
+ path_workspace_uuid="workspace_uuid",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ agent = await response.parse()
+ assert_matches_type(AgentMoveResponse, agent, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_move(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.agents.evaluation_metrics.workspaces.agents.with_streaming_response.move(
+ path_workspace_uuid="workspace_uuid",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ agent = await response.parse()
+ assert_matches_type(AgentMoveResponse, agent, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_path_params_move(self, async_client: AsyncGradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_workspace_uuid` but received ''"):
+ await async_client.agents.evaluation_metrics.workspaces.agents.with_raw_response.move(
+ path_workspace_uuid="",
+ )
diff --git a/tests/api_resources/agents/test_evaluation_runs.py b/tests/api_resources/agents/test_evaluation_runs.py
index 721be2a0..6bd3cfa5 100644
--- a/tests/api_resources/agents/test_evaluation_runs.py
+++ b/tests/api_resources/agents/test_evaluation_runs.py
@@ -13,6 +13,7 @@
EvaluationRunCreateResponse,
EvaluationRunRetrieveResponse,
EvaluationRunListResultsResponse,
+ EvaluationRunRetrieveResultsResponse,
)
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -31,7 +32,7 @@ def test_method_create(self, client: GradientAI) -> None:
@parametrize
def test_method_create_with_all_params(self, client: GradientAI) -> None:
evaluation_run = client.agents.evaluation_runs.create(
- agent_uuid="agent_uuid",
+ agent_uuids=["string"],
run_name="run_name",
test_case_uuid="test_case_uuid",
)
@@ -143,6 +144,52 @@ def test_path_params_list_results(self, client: GradientAI) -> None:
"",
)
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_retrieve_results(self, client: GradientAI) -> None:
+ evaluation_run = client.agents.evaluation_runs.retrieve_results(
+ prompt_id=0,
+ evaluation_run_uuid="evaluation_run_uuid",
+ )
+ assert_matches_type(EvaluationRunRetrieveResultsResponse, evaluation_run, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_retrieve_results(self, client: GradientAI) -> None:
+ response = client.agents.evaluation_runs.with_raw_response.retrieve_results(
+ prompt_id=0,
+ evaluation_run_uuid="evaluation_run_uuid",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ evaluation_run = response.parse()
+ assert_matches_type(EvaluationRunRetrieveResultsResponse, evaluation_run, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_retrieve_results(self, client: GradientAI) -> None:
+ with client.agents.evaluation_runs.with_streaming_response.retrieve_results(
+ prompt_id=0,
+ evaluation_run_uuid="evaluation_run_uuid",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ evaluation_run = response.parse()
+ assert_matches_type(EvaluationRunRetrieveResultsResponse, evaluation_run, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_path_params_retrieve_results(self, client: GradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"):
+ client.agents.evaluation_runs.with_raw_response.retrieve_results(
+ prompt_id=0,
+ evaluation_run_uuid="",
+ )
+
class TestAsyncEvaluationRuns:
parametrize = pytest.mark.parametrize(
@@ -159,7 +206,7 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None:
@parametrize
async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None:
evaluation_run = await async_client.agents.evaluation_runs.create(
- agent_uuid="agent_uuid",
+ agent_uuids=["string"],
run_name="run_name",
test_case_uuid="test_case_uuid",
)
@@ -270,3 +317,49 @@ async def test_path_params_list_results(self, async_client: AsyncGradientAI) ->
await async_client.agents.evaluation_runs.with_raw_response.list_results(
"",
)
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_retrieve_results(self, async_client: AsyncGradientAI) -> None:
+ evaluation_run = await async_client.agents.evaluation_runs.retrieve_results(
+ prompt_id=0,
+ evaluation_run_uuid="evaluation_run_uuid",
+ )
+ assert_matches_type(EvaluationRunRetrieveResultsResponse, evaluation_run, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_retrieve_results(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.agents.evaluation_runs.with_raw_response.retrieve_results(
+ prompt_id=0,
+ evaluation_run_uuid="evaluation_run_uuid",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ evaluation_run = await response.parse()
+ assert_matches_type(EvaluationRunRetrieveResultsResponse, evaluation_run, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_retrieve_results(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.agents.evaluation_runs.with_streaming_response.retrieve_results(
+ prompt_id=0,
+ evaluation_run_uuid="evaluation_run_uuid",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ evaluation_run = await response.parse()
+ assert_matches_type(EvaluationRunRetrieveResultsResponse, evaluation_run, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_path_params_retrieve_results(self, async_client: AsyncGradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"):
+ await async_client.agents.evaluation_runs.with_raw_response.retrieve_results(
+ prompt_id=0,
+ evaluation_run_uuid="",
+ )
diff --git a/tests/api_resources/agents/test_evaluation_test_cases.py b/tests/api_resources/agents/test_evaluation_test_cases.py
index 50b285bd..87f66b24 100644
--- a/tests/api_resources/agents/test_evaluation_test_cases.py
+++ b/tests/api_resources/agents/test_evaluation_test_cases.py
@@ -72,7 +72,16 @@ def test_streaming_response_create(self, client: GradientAI) -> None:
@parametrize
def test_method_retrieve(self, client: GradientAI) -> None:
evaluation_test_case = client.agents.evaluation_test_cases.retrieve(
- "test_case_uuid",
+ test_case_uuid="test_case_uuid",
+ )
+ assert_matches_type(EvaluationTestCaseRetrieveResponse, evaluation_test_case, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_retrieve_with_all_params(self, client: GradientAI) -> None:
+ evaluation_test_case = client.agents.evaluation_test_cases.retrieve(
+ test_case_uuid="test_case_uuid",
+ evaluation_test_case_version=0,
)
assert_matches_type(EvaluationTestCaseRetrieveResponse, evaluation_test_case, path=["response"])
@@ -80,7 +89,7 @@ def test_method_retrieve(self, client: GradientAI) -> None:
@parametrize
def test_raw_response_retrieve(self, client: GradientAI) -> None:
response = client.agents.evaluation_test_cases.with_raw_response.retrieve(
- "test_case_uuid",
+ test_case_uuid="test_case_uuid",
)
assert response.is_closed is True
@@ -92,7 +101,7 @@ def test_raw_response_retrieve(self, client: GradientAI) -> None:
@parametrize
def test_streaming_response_retrieve(self, client: GradientAI) -> None:
with client.agents.evaluation_test_cases.with_streaming_response.retrieve(
- "test_case_uuid",
+ test_case_uuid="test_case_uuid",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -107,7 +116,7 @@ def test_streaming_response_retrieve(self, client: GradientAI) -> None:
def test_path_params_retrieve(self, client: GradientAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `test_case_uuid` but received ''"):
client.agents.evaluation_test_cases.with_raw_response.retrieve(
- "",
+ test_case_uuid="",
)
@pytest.mark.skip()
@@ -306,7 +315,16 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) ->
@parametrize
async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None:
evaluation_test_case = await async_client.agents.evaluation_test_cases.retrieve(
- "test_case_uuid",
+ test_case_uuid="test_case_uuid",
+ )
+ assert_matches_type(EvaluationTestCaseRetrieveResponse, evaluation_test_case, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_retrieve_with_all_params(self, async_client: AsyncGradientAI) -> None:
+ evaluation_test_case = await async_client.agents.evaluation_test_cases.retrieve(
+ test_case_uuid="test_case_uuid",
+ evaluation_test_case_version=0,
)
assert_matches_type(EvaluationTestCaseRetrieveResponse, evaluation_test_case, path=["response"])
@@ -314,7 +332,7 @@ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None:
@parametrize
async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None:
response = await async_client.agents.evaluation_test_cases.with_raw_response.retrieve(
- "test_case_uuid",
+ test_case_uuid="test_case_uuid",
)
assert response.is_closed is True
@@ -326,7 +344,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> Non
@parametrize
async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None:
async with async_client.agents.evaluation_test_cases.with_streaming_response.retrieve(
- "test_case_uuid",
+ test_case_uuid="test_case_uuid",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -341,7 +359,7 @@ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI)
async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `test_case_uuid` but received ''"):
await async_client.agents.evaluation_test_cases.with_raw_response.retrieve(
- "",
+ test_case_uuid="",
)
@pytest.mark.skip()
diff --git a/tests/api_resources/test_models.py b/tests/api_resources/test_models.py
index f7e21015..e1f3457b 100644
--- a/tests/api_resources/test_models.py
+++ b/tests/api_resources/test_models.py
@@ -9,7 +9,7 @@
from tests.utils import assert_matches_type
from do_gradientai import GradientAI, AsyncGradientAI
-from do_gradientai.types import ModelListResponse
+from do_gradientai.types import Model, ModelListResponse
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -19,19 +19,50 @@ class TestModels:
@pytest.mark.skip()
@parametrize
- def test_method_list(self, client: GradientAI) -> None:
- model = client.models.list()
- assert_matches_type(ModelListResponse, model, path=["response"])
+ def test_method_retrieve(self, client: GradientAI) -> None:
+ model = client.models.retrieve(
+ "llama3-8b-instruct",
+ )
+ assert_matches_type(Model, model, path=["response"])
@pytest.mark.skip()
@parametrize
- def test_method_list_with_all_params(self, client: GradientAI) -> None:
- model = client.models.list(
- page=0,
- per_page=0,
- public_only=True,
- usecases=["MODEL_USECASE_UNKNOWN"],
+ def test_raw_response_retrieve(self, client: GradientAI) -> None:
+ response = client.models.with_raw_response.retrieve(
+ "llama3-8b-instruct",
)
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ model = response.parse()
+ assert_matches_type(Model, model, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_retrieve(self, client: GradientAI) -> None:
+ with client.models.with_streaming_response.retrieve(
+ "llama3-8b-instruct",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ model = response.parse()
+ assert_matches_type(Model, model, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_path_params_retrieve(self, client: GradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `model` but received ''"):
+ client.models.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_list(self, client: GradientAI) -> None:
+ model = client.models.list()
assert_matches_type(ModelListResponse, model, path=["response"])
@pytest.mark.skip()
@@ -64,19 +95,50 @@ class TestAsyncModels:
@pytest.mark.skip()
@parametrize
- async def test_method_list(self, async_client: AsyncGradientAI) -> None:
- model = await async_client.models.list()
- assert_matches_type(ModelListResponse, model, path=["response"])
+ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None:
+ model = await async_client.models.retrieve(
+ "llama3-8b-instruct",
+ )
+ assert_matches_type(Model, model, path=["response"])
@pytest.mark.skip()
@parametrize
- async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None:
- model = await async_client.models.list(
- page=0,
- per_page=0,
- public_only=True,
- usecases=["MODEL_USECASE_UNKNOWN"],
+ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None:
+ response = await async_client.models.with_raw_response.retrieve(
+ "llama3-8b-instruct",
)
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ model = await response.parse()
+ assert_matches_type(Model, model, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None:
+ async with async_client.models.with_streaming_response.retrieve(
+ "llama3-8b-instruct",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ model = await response.parse()
+ assert_matches_type(Model, model, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `model` but received ''"):
+ await async_client.models.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_list(self, async_client: AsyncGradientAI) -> None:
+ model = await async_client.models.list()
assert_matches_type(ModelListResponse, model, path=["response"])
@pytest.mark.skip()
From 5d38e2eb8604a0a4065d146ba71aa4a5a0e93d85 Mon Sep 17 00:00:00 2001
From: Samuel El-Borai
Date: Thu, 26 Jun 2025 20:17:02 +0200
Subject: [PATCH 18/21] feat: use inference key for chat.completions.create()
---
.../resources/chat/completions.py | 22 +++++++++++++++++--
1 file changed, 20 insertions(+), 2 deletions(-)
diff --git a/src/do_gradientai/resources/chat/completions.py b/src/do_gradientai/resources/chat/completions.py
index 2d7c94c3..3dc0343c 100644
--- a/src/do_gradientai/resources/chat/completions.py
+++ b/src/do_gradientai/resources/chat/completions.py
@@ -153,6 +153,15 @@ def create(
timeout: Override the client-level default timeout for this request, in seconds
"""
+
+ # This method requires an inference_key to be set via client argument or environment variable
+ if not hasattr(self._client, "inference_key") or not self._client.inference_key:
+ raise TypeError(
+ "Could not resolve authentication method. Expected the inference_key to be set for chat completions."
+ )
+ headers = extra_headers or {}
+ headers = {"Authorization": f"Bearer {self._client.inference_key}", **headers}
+
return self._post(
"/chat/completions"
if self._client._base_url_overridden
@@ -180,7 +189,7 @@ def create(
completion_create_params.CompletionCreateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=CompletionCreateResponse,
)
@@ -316,6 +325,15 @@ async def create(
timeout: Override the client-level default timeout for this request, in seconds
"""
+
+ # This method requires an inference_key to be set via client argument or environment variable
+ if not hasattr(self._client, "inference_key") or not self._client.inference_key:
+ raise TypeError(
+ "Could not resolve authentication method. Expected the inference_key to be set for chat completions."
+ )
+ headers = extra_headers or {}
+ headers = {"Authorization": f"Bearer {self._client.inference_key}", **headers}
+
return await self._post(
"/chat/completions"
if self._client._base_url_overridden
@@ -343,7 +361,7 @@ async def create(
completion_create_params.CompletionCreateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=CompletionCreateResponse,
)
From 4d2b3dcefdefc3830d631c5ac27b58778a299983 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Fri, 27 Jun 2025 02:40:08 +0000
Subject: [PATCH 19/21] =?UTF-8?q?fix(ci):=20release-doctor=20=E2=80=94=20r?=
=?UTF-8?q?eport=20correct=20token=20name?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
bin/check-release-environment | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/bin/check-release-environment b/bin/check-release-environment
index b1bd8969..b845b0f4 100644
--- a/bin/check-release-environment
+++ b/bin/check-release-environment
@@ -3,7 +3,7 @@
errors=()
if [ -z "${PYPI_TOKEN}" ]; then
- errors+=("The GRADIENT_AI_PYPI_TOKEN secret has not been set. Please set it in either this repository's secrets or your organization secrets.")
+ errors+=("The PYPI_TOKEN secret has not been set. Please set it in either this repository's secrets or your organization secrets.")
fi
lenErrors=${#errors[@]}
From 47fdf385a463ec0fae9c6ad82d76fb1739700c39 Mon Sep 17 00:00:00 2001
From: Sam El-Borai
Date: Fri, 27 Jun 2025 21:10:59 +0200
Subject: [PATCH 20/21] Update src/do_gradientai/resources/chat/completions.py
Co-authored-by: Robert Craigie
---
src/do_gradientai/resources/chat/completions.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/do_gradientai/resources/chat/completions.py b/src/do_gradientai/resources/chat/completions.py
index 3dc0343c..a3a47429 100644
--- a/src/do_gradientai/resources/chat/completions.py
+++ b/src/do_gradientai/resources/chat/completions.py
@@ -155,7 +155,7 @@ def create(
"""
# This method requires an inference_key to be set via client argument or environment variable
- if not hasattr(self._client, "inference_key") or not self._client.inference_key:
+ if not self._client.inference_key:
raise TypeError(
"Could not resolve authentication method. Expected the inference_key to be set for chat completions."
)
From dc5f45a7cd83e7769db1e96c438f9e11761d09c7 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Fri, 27 Jun 2025 21:10:11 +0000
Subject: [PATCH 21/21] release: 0.1.0-alpha.5
---
.release-please-manifest.json | 2 +-
CHANGELOG.md | 31 +++++++++++++++++++++++++++++++
pyproject.toml | 2 +-
src/do_gradientai/_version.py | 2 +-
4 files changed, 34 insertions(+), 3 deletions(-)
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index b56c3d0b..e8285b71 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "0.1.0-alpha.4"
+ ".": "0.1.0-alpha.5"
}
\ No newline at end of file
diff --git a/CHANGELOG.md b/CHANGELOG.md
index be25824a..18fcce2a 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,36 @@
# Changelog
+## 0.1.0-alpha.5 (2025-06-27)
+
+Full Changelog: [v0.1.0-alpha.4...v0.1.0-alpha.5](https://github.com/digitalocean/gradientai-python/compare/v0.1.0-alpha.4...v0.1.0-alpha.5)
+
+### Features
+
+* **api:** define api links and meta as shared models ([8d87001](https://github.com/digitalocean/gradientai-python/commit/8d87001b51de17dd1a36419c0e926cef119f20b8))
+* **api:** update OpenAI spec and add endpoint/smodels ([e92c54b](https://github.com/digitalocean/gradientai-python/commit/e92c54b05f1025b6173945524724143fdafc7728))
+* **api:** update via SDK Studio ([1ae76f7](https://github.com/digitalocean/gradientai-python/commit/1ae76f78ce9e74f8fd555e3497299127e9aa6889))
+* **api:** update via SDK Studio ([98424f4](https://github.com/digitalocean/gradientai-python/commit/98424f4a2c7e00138fb5eecf94ca72e2ffcc1212))
+* **api:** update via SDK Studio ([299fd1b](https://github.com/digitalocean/gradientai-python/commit/299fd1b29b42f6f2581150e52dcf65fc73270862))
+* **api:** update via SDK Studio ([9a45427](https://github.com/digitalocean/gradientai-python/commit/9a45427678644c34afe9792a2561f394718e64ff))
+* **api:** update via SDK Studio ([abe573f](https://github.com/digitalocean/gradientai-python/commit/abe573fcc2233c7d71f0a925eea8fa9dd4d0fb91))
+* **api:** update via SDK Studio ([e5ce590](https://github.com/digitalocean/gradientai-python/commit/e5ce59057792968892317215078ac2c11e811812))
+* **api:** update via SDK Studio ([1daa3f5](https://github.com/digitalocean/gradientai-python/commit/1daa3f55a49b5411d1b378fce30aea3ccbccb6d7))
+* **api:** update via SDK Studio ([1c702b3](https://github.com/digitalocean/gradientai-python/commit/1c702b340e4fd723393c0f02df2a87d03ca8c9bb))
+* **api:** update via SDK Studio ([891d6b3](https://github.com/digitalocean/gradientai-python/commit/891d6b32e5bdb07d23abf898cec17a60ee64f99d))
+* **api:** update via SDK Studio ([dcbe442](https://github.com/digitalocean/gradientai-python/commit/dcbe442efc67554e60b3b28360a4d9f7dcbb313a))
+* use inference key for chat.completions.create() ([5d38e2e](https://github.com/digitalocean/gradientai-python/commit/5d38e2eb8604a0a4065d146ba71aa4a5a0e93d85))
+
+
+### Bug Fixes
+
+* **ci:** release-doctor — report correct token name ([4d2b3dc](https://github.com/digitalocean/gradientai-python/commit/4d2b3dcefdefc3830d631c5ac27b58778a299983))
+
+
+### Chores
+
+* clean up pyproject ([78637e9](https://github.com/digitalocean/gradientai-python/commit/78637e99816d459c27b4f2fd2f6d79c8d32ecfbe))
+* **internal:** codegen related update ([58d7319](https://github.com/digitalocean/gradientai-python/commit/58d7319ce68c639c2151a3e96a5d522ec06ff96f))
+
## 0.1.0-alpha.4 (2025-06-25)
Full Changelog: [v0.1.0-alpha.3...v0.1.0-alpha.4](https://github.com/digitalocean/gradientai-python/compare/v0.1.0-alpha.3...v0.1.0-alpha.4)
diff --git a/pyproject.toml b/pyproject.toml
index c7b50822..768cf340 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python"
-version = "0.1.0-alpha.4"
+version = "0.1.0-alpha.5"
description = "The official Python library for GradientAI"
dynamic = ["readme"]
license = "Apache-2.0"
diff --git a/src/do_gradientai/_version.py b/src/do_gradientai/_version.py
index 83bf8865..12e8e17e 100644
--- a/src/do_gradientai/_version.py
+++ b/src/do_gradientai/_version.py
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
__title__ = "do_gradientai"
-__version__ = "0.1.0-alpha.4" # x-release-please-version
+__version__ = "0.1.0-alpha.5" # x-release-please-version