From 06a03f45c27ba01728543763f4aeb38d6329e543 Mon Sep 17 00:00:00 2001 From: meorphis Date: Thu, 17 Jul 2025 18:59:08 -0400 Subject: [PATCH 01/19] Revert "feat(api): update via SDK Studio" This reverts commit 6cdcc6a36b9dde2117295ee7bcb9a3bc15571779. --- .stats.yml | 2 +- CONTRIBUTING.md | 2 +- README.md | 48 +- api.md | 422 +++++++++--------- mypy.ini | 2 +- pyproject.toml | 8 +- release-please-config.json | 2 +- scripts/lint | 2 +- src/{do_gradientai => gradientai}/__init__.py | 4 +- .../_base_client.py | 2 +- src/{do_gradientai => gradientai}/_client.py | 0 src/{do_gradientai => gradientai}/_compat.py | 0 .../_constants.py | 0 .../_exceptions.py | 0 src/{do_gradientai => gradientai}/_files.py | 0 src/{do_gradientai => gradientai}/_models.py | 0 src/{do_gradientai => gradientai}/_qs.py | 0 .../_resource.py | 0 .../_response.py | 8 +- .../_streaming.py | 0 src/{do_gradientai => gradientai}/_types.py | 2 +- .../_utils/__init__.py | 0 .../_utils/_logs.py | 4 +- .../_utils/_proxy.py | 0 .../_utils/_reflection.py | 0 .../_utils/_resources_proxy.py | 8 +- .../_utils/_streams.py | 0 .../_utils/_sync.py | 0 .../_utils/_transform.py | 0 .../_utils/_typing.py | 0 .../_utils/_utils.py | 0 src/{do_gradientai => gradientai}/_version.py | 0 src/{do_gradientai => gradientai}/py.typed | 0 .../resources/__init__.py | 0 .../resources/account/__init__.py | 0 .../resources/account/account.py | 0 .../resources/account/keys.py | 0 .../resources/agents/__init__.py | 0 .../resources/agents/agents.py | 0 .../resources/agents/api_keys.py | 0 .../resources/agents/chat/__init__.py | 0 .../resources/agents/chat/chat.py | 0 .../resources/agents/chat/completions.py | 0 .../resources/agents/evaluation_datasets.py | 0 .../agents/evaluation_metrics/__init__.py | 0 .../evaluation_metrics/evaluation_metrics.py | 0 .../agents/evaluation_metrics/models.py | 0 .../evaluation_metrics/workspaces/__init__.py | 0 .../evaluation_metrics/workspaces/agents.py | 0 .../workspaces/workspaces.py | 0 .../resources/agents/evaluation_runs.py | 0 .../resources/agents/evaluation_test_cases.py | 0 .../resources/agents/functions.py | 0 .../resources/agents/knowledge_bases.py | 0 .../resources/agents/routes.py | 0 .../resources/agents/versions.py | 0 .../resources/chat/__init__.py | 0 .../resources/chat/chat.py | 0 .../resources/chat/completions.py | 0 .../resources/droplets/__init__.py | 0 .../resources/droplets/actions.py | 0 .../resources/droplets/autoscale.py | 0 .../resources/droplets/backups.py | 0 .../destroy_with_associated_resources.py | 0 .../resources/droplets/droplets.py | 0 .../resources/firewalls/__init__.py | 0 .../resources/firewalls/droplets.py | 0 .../resources/firewalls/firewalls.py | 0 .../resources/firewalls/rules.py | 0 .../resources/firewalls/tags.py | 0 .../resources/floating_ips/__init__.py | 0 .../resources/floating_ips/actions.py | 0 .../resources/floating_ips/floating_ips.py | 0 .../resources/images/__init__.py | 0 .../resources/images/actions.py | 0 .../resources/images/images.py | 0 .../resources/inference/__init__.py | 0 .../resources/inference/api_keys.py | 0 .../resources/inference/inference.py | 0 .../resources/knowledge_bases/__init__.py | 0 .../resources/knowledge_bases/data_sources.py | 0 .../knowledge_bases/indexing_jobs.py | 0 .../knowledge_bases/knowledge_bases.py | 0 .../resources/load_balancers/__init__.py | 0 .../resources/load_balancers/droplets.py | 0 .../load_balancers/forwarding_rules.py | 0 .../load_balancers/load_balancers.py | 0 .../resources/models/__init__.py | 0 .../resources/models/models.py | 0 .../resources/models/providers/__init__.py | 0 .../resources/models/providers/anthropic.py | 0 .../resources/models/providers/openai.py | 0 .../resources/models/providers/providers.py | 0 .../resources/regions.py | 0 .../resources/sizes.py | 0 .../resources/snapshots.py | 0 .../resources/volumes/__init__.py | 0 .../resources/volumes/actions.py | 0 .../resources/volumes/snapshots.py | 0 .../resources/volumes/volumes.py | 0 .../types/__init__.py | 0 .../types/account/__init__.py | 0 .../types/account/key_create_params.py | 0 .../types/account/key_create_response.py | 0 .../types/account/key_list_params.py | 0 .../types/account/key_list_response.py | 0 .../types/account/key_retrieve_response.py | 0 .../types/account/key_update_params.py | 0 .../types/account/key_update_response.py | 0 .../types/account_retrieve_response.py | 0 .../types/agent_create_params.py | 0 .../types/agent_create_response.py | 0 .../types/agent_delete_response.py | 0 .../types/agent_list_params.py | 0 .../types/agent_list_response.py | 0 .../types/agent_retrieve_response.py | 0 .../types/agent_update_params.py | 0 .../types/agent_update_response.py | 0 .../types/agent_update_status_params.py | 0 .../types/agent_update_status_response.py | 0 .../types/agents/__init__.py | 0 .../types/agents/api_evaluation_metric.py | 0 .../agents/api_evaluation_metric_result.py | 0 .../types/agents/api_evaluation_prompt.py | 0 .../types/agents/api_evaluation_run.py | 0 .../types/agents/api_evaluation_test_case.py | 0 .../types/agents/api_key_create_params.py | 0 .../types/agents/api_key_create_response.py | 0 .../types/agents/api_key_delete_response.py | 0 .../types/agents/api_key_list_params.py | 0 .../types/agents/api_key_list_response.py | 0 .../agents/api_key_regenerate_response.py | 0 .../types/agents/api_key_update_params.py | 0 .../types/agents/api_key_update_response.py | 0 .../agents/api_link_knowledge_base_output.py | 0 .../types/agents/api_star_metric.py | 0 .../types/agents/api_star_metric_param.py | 0 .../types/agents/chat/__init__.py | 0 .../agents/chat/completion_create_params.py | 0 .../agents/chat/completion_create_response.py | 0 ...reate_file_upload_presigned_urls_params.py | 0 ...ate_file_upload_presigned_urls_response.py | 0 .../evaluation_dataset_create_params.py | 0 .../evaluation_dataset_create_response.py | 0 .../evaluation_metric_list_regions_params.py | 0 ...evaluation_metric_list_regions_response.py | 0 .../agents/evaluation_metric_list_response.py | 0 .../agents/evaluation_metrics/__init__.py | 0 .../evaluation_metrics/model_list_params.py | 0 .../evaluation_metrics/model_list_response.py | 0 .../workspace_create_params.py | 0 .../workspace_create_response.py | 0 .../workspace_delete_response.py | 0 ...ace_list_evaluation_test_cases_response.py | 0 .../workspace_list_response.py | 0 .../workspace_retrieve_response.py | 0 .../workspace_update_params.py | 0 .../workspace_update_response.py | 0 .../evaluation_metrics/workspaces/__init__.py | 0 .../workspaces/agent_list_params.py | 0 .../workspaces/agent_list_response.py | 0 .../workspaces/agent_move_params.py | 0 .../workspaces/agent_move_response.py | 0 .../agents/evaluation_run_create_params.py | 0 .../agents/evaluation_run_create_response.py | 0 .../evaluation_run_list_results_params.py | 0 .../evaluation_run_list_results_response.py | 0 .../evaluation_run_retrieve_response.py | 0 ...valuation_run_retrieve_results_response.py | 0 .../evaluation_test_case_create_params.py | 0 .../evaluation_test_case_create_response.py | 0 ...n_test_case_list_evaluation_runs_params.py | 0 ...test_case_list_evaluation_runs_response.py | 0 .../evaluation_test_case_list_response.py | 0 .../evaluation_test_case_retrieve_params.py | 0 .../evaluation_test_case_retrieve_response.py | 0 .../evaluation_test_case_update_params.py | 0 .../evaluation_test_case_update_response.py | 0 .../types/agents/function_create_params.py | 0 .../types/agents/function_create_response.py | 0 .../types/agents/function_delete_response.py | 0 .../types/agents/function_update_params.py | 0 .../types/agents/function_update_response.py | 0 .../agents/knowledge_base_detach_response.py | 0 .../types/agents/route_add_params.py | 0 .../types/agents/route_add_response.py | 0 .../types/agents/route_delete_response.py | 0 .../types/agents/route_update_params.py | 0 .../types/agents/route_update_response.py | 0 .../types/agents/route_view_response.py | 0 .../types/agents/version_list_params.py | 0 .../types/agents/version_list_response.py | 0 .../types/agents/version_update_params.py | 0 .../types/agents/version_update_response.py | 0 .../types/api_agent.py | 0 .../types/api_agent_api_key_info.py | 0 .../types/api_agent_model.py | 0 .../types/api_agreement.py | 0 .../types/api_anthropic_api_key_info.py | 0 .../types/api_deployment_visibility.py | 0 .../types/api_knowledge_base.py | 0 .../types/api_model.py | 0 .../types/api_model_version.py | 0 .../types/api_openai_api_key_info.py | 0 .../types/api_retrieval_method.py | 0 .../types/api_workspace.py | 0 .../types/chat/__init__.py | 0 .../types/chat/completion_create_params.py | 0 .../types/chat/completion_create_response.py | 0 .../types/domains.py | 0 .../types/domains_param.py | 0 .../types/droplet_backup_policy.py | 0 .../types/droplet_backup_policy_param.py | 0 .../types/droplet_create_params.py | 0 .../types/droplet_create_response.py | 0 .../types/droplet_delete_by_tag_params.py | 0 .../types/droplet_list_firewalls_params.py | 0 .../types/droplet_list_firewalls_response.py | 0 .../types/droplet_list_kernels_params.py | 0 .../types/droplet_list_kernels_response.py | 0 .../types/droplet_list_neighbors_response.py | 0 .../types/droplet_list_params.py | 0 .../types/droplet_list_response.py | 0 .../types/droplet_list_snapshots_params.py | 0 .../types/droplet_list_snapshots_response.py | 0 .../types/droplet_retrieve_response.py | 0 .../types/droplets/__init__.py | 0 .../droplets/action_bulk_initiate_params.py | 0 .../droplets/action_bulk_initiate_response.py | 0 .../types/droplets/action_initiate_params.py | 0 .../droplets/action_initiate_response.py | 0 .../types/droplets/action_list_params.py | 0 .../types/droplets/action_list_response.py | 0 .../droplets/action_retrieve_response.py | 0 .../types/droplets/associated_resource.py | 0 .../types/droplets/autoscale_create_params.py | 0 .../droplets/autoscale_create_response.py | 0 .../droplets/autoscale_list_history_params.py | 0 .../autoscale_list_history_response.py | 0 .../droplets/autoscale_list_members_params.py | 0 .../autoscale_list_members_response.py | 0 .../types/droplets/autoscale_list_params.py | 0 .../types/droplets/autoscale_list_response.py | 0 .../types/droplets/autoscale_pool.py | 0 .../autoscale_pool_droplet_template.py | 0 .../autoscale_pool_droplet_template_param.py | 0 .../droplets/autoscale_pool_dynamic_config.py | 0 .../autoscale_pool_dynamic_config_param.py | 0 .../droplets/autoscale_pool_static_config.py | 0 .../autoscale_pool_static_config_param.py | 0 .../droplets/autoscale_retrieve_response.py | 0 .../types/droplets/autoscale_update_params.py | 0 .../droplets/autoscale_update_response.py | 0 .../types/droplets/backup_list_params.py | 0 .../droplets/backup_list_policies_params.py | 0 .../droplets/backup_list_policies_response.py | 0 .../types/droplets/backup_list_response.py | 0 ...backup_list_supported_policies_response.py | 0 .../backup_retrieve_policy_response.py | 0 .../types/droplets/current_utilization.py | 0 ...sociated_resource_check_status_response.py | 0 ...ciated_resource_delete_selective_params.py | 0 ..._with_associated_resource_list_response.py | 0 .../droplets/destroyed_associated_resource.py | 0 .../types/firewall.py | 0 .../types/firewall_create_params.py | 0 .../types/firewall_create_response.py | 0 .../types/firewall_list_params.py | 0 .../types/firewall_list_response.py | 0 .../types/firewall_param.py | 0 .../types/firewall_retrieve_response.py | 0 .../types/firewall_update_params.py | 0 .../types/firewall_update_response.py | 0 .../types/firewalls/__init__.py | 0 .../types/firewalls/droplet_add_params.py | 0 .../types/firewalls/droplet_remove_params.py | 0 .../types/firewalls/rule_add_params.py | 0 .../types/firewalls/rule_remove_params.py | 0 .../types/firewalls/tag_add_params.py | 0 .../types/firewalls/tag_remove_params.py | 0 .../types/floating_ip.py | 0 .../types/floating_ip_create_params.py | 0 .../types/floating_ip_create_response.py | 0 .../types/floating_ip_list_params.py | 0 .../types/floating_ip_list_response.py | 0 .../types/floating_ip_retrieve_response.py | 0 .../types/floating_ips/__init__.py | 0 .../floating_ips/action_create_params.py | 0 .../floating_ips/action_create_response.py | 0 .../floating_ips/action_list_response.py | 0 .../floating_ips/action_retrieve_response.py | 0 .../types/forwarding_rule.py | 0 .../types/forwarding_rule_param.py | 0 .../types/glb_settings.py | 0 .../types/glb_settings_param.py | 0 .../types/health_check.py | 0 .../types/health_check_param.py | 0 .../types/image_create_params.py | 0 .../types/image_create_response.py | 0 .../types/image_list_params.py | 0 .../types/image_list_response.py | 0 .../types/image_retrieve_response.py | 0 .../types/image_update_params.py | 0 .../types/image_update_response.py | 0 .../types/images/__init__.py | 0 .../types/images/action_create_params.py | 0 .../types/images/action_list_response.py | 0 .../types/inference/__init__.py | 0 .../types/inference/api_key_create_params.py | 0 .../inference/api_key_create_response.py | 0 .../inference/api_key_delete_response.py | 0 .../types/inference/api_key_list_params.py | 0 .../types/inference/api_key_list_response.py | 0 .../types/inference/api_key_update_params.py | 0 .../api_key_update_regenerate_response.py | 0 .../inference/api_key_update_response.py | 0 .../types/inference/api_model_api_key_info.py | 0 .../types/knowledge_base_create_params.py | 0 .../types/knowledge_base_create_response.py | 0 .../types/knowledge_base_delete_response.py | 0 .../types/knowledge_base_list_params.py | 0 .../types/knowledge_base_list_response.py | 0 .../types/knowledge_base_retrieve_response.py | 0 .../types/knowledge_base_update_params.py | 0 .../types/knowledge_base_update_response.py | 0 .../types/knowledge_bases/__init__.py | 0 .../api_file_upload_data_source.py | 0 .../api_file_upload_data_source_param.py | 0 .../api_indexed_data_source.py | 0 .../types/knowledge_bases/api_indexing_job.py | 0 .../api_knowledge_base_data_source.py | 0 .../knowledge_bases/api_spaces_data_source.py | 0 .../api_spaces_data_source_param.py | 0 .../api_web_crawler_data_source.py | 0 .../api_web_crawler_data_source_param.py | 0 .../knowledge_bases/aws_data_source_param.py | 0 .../data_source_create_params.py | 0 .../data_source_create_response.py | 0 .../data_source_delete_response.py | 0 .../data_source_list_params.py | 0 .../data_source_list_response.py | 0 .../indexing_job_create_params.py | 0 .../indexing_job_create_response.py | 0 .../indexing_job_list_params.py | 0 .../indexing_job_list_response.py | 0 ...xing_job_retrieve_data_sources_response.py | 0 .../indexing_job_retrieve_response.py | 0 .../indexing_job_update_cancel_params.py | 0 .../indexing_job_update_cancel_response.py | 0 .../types/lb_firewall.py | 0 .../types/lb_firewall_param.py | 0 .../types/load_balancer.py | 0 .../types/load_balancer_create_params.py | 0 .../types/load_balancer_create_response.py | 0 .../types/load_balancer_list_params.py | 0 .../types/load_balancer_list_response.py | 0 .../types/load_balancer_retrieve_response.py | 0 .../types/load_balancer_update_params.py | 0 .../types/load_balancer_update_response.py | 0 .../types/load_balancers/__init__.py | 0 .../load_balancers/droplet_add_params.py | 0 .../load_balancers/droplet_remove_params.py | 0 .../forwarding_rule_add_params.py | 0 .../forwarding_rule_remove_params.py | 0 .../types/model_list_response.py | 0 .../types/model_retrieve_response.py | 0 .../types/models/__init__.py | 0 .../types/models/providers/__init__.py | 0 .../providers/anthropic_create_params.py | 0 .../providers/anthropic_create_response.py | 0 .../providers/anthropic_delete_response.py | 0 .../providers/anthropic_list_agents_params.py | 0 .../anthropic_list_agents_response.py | 0 .../models/providers/anthropic_list_params.py | 0 .../providers/anthropic_list_response.py | 0 .../providers/anthropic_retrieve_response.py | 0 .../providers/anthropic_update_params.py | 0 .../providers/anthropic_update_response.py | 0 .../models/providers/openai_create_params.py | 0 .../providers/openai_create_response.py | 0 .../providers/openai_delete_response.py | 0 .../models/providers/openai_list_params.py | 0 .../models/providers/openai_list_response.py | 0 .../openai_retrieve_agents_params.py | 0 .../openai_retrieve_agents_response.py | 0 .../providers/openai_retrieve_response.py | 0 .../models/providers/openai_update_params.py | 0 .../providers/openai_update_response.py | 0 .../types/region_list_params.py | 0 .../types/region_list_response.py | 0 .../types/shared/__init__.py | 0 .../types/shared/action.py | 0 .../types/shared/action_link.py | 0 .../types/shared/api_links.py | 0 .../types/shared/api_meta.py | 0 .../types/shared/backward_links.py | 0 .../types/shared/chat_completion_chunk.py | 0 .../shared/chat_completion_token_logprob.py | 0 .../types/shared/completion_usage.py | 0 .../types/shared/disk_info.py | 0 .../types/shared/droplet.py | 0 .../shared/droplet_next_backup_window.py | 0 .../types/shared/firewall_rule_target.py | 0 .../types/shared/forward_links.py | 0 .../types/shared/garbage_collection.py | 0 .../types/shared/gpu_info.py | 0 .../types/shared/image.py | 0 .../types/shared/kernel.py | 0 .../types/shared/meta_properties.py | 0 .../types/shared/network_v4.py | 0 .../types/shared/network_v6.py | 0 .../types/shared/page_links.py | 0 .../types/shared/region.py | 0 .../types/shared/repository_blob.py | 0 .../types/shared/repository_manifest.py | 0 .../types/shared/repository_tag.py | 0 .../types/shared/size.py | 0 .../types/shared/snapshots.py | 0 .../types/shared/subscription.py | 0 .../types/shared/subscription_tier_base.py | 0 .../types/shared/vpc_peering.py | 0 .../types/shared_params/__init__.py | 0 .../shared_params/firewall_rule_target.py | 0 .../types/size_list_params.py | 0 .../types/size_list_response.py | 0 .../types/snapshot_list_params.py | 0 .../types/snapshot_list_response.py | 0 .../types/snapshot_retrieve_response.py | 0 .../types/sticky_sessions.py | 0 .../types/sticky_sessions_param.py | 0 .../types/volume_create_params.py | 0 .../types/volume_create_response.py | 0 .../types/volume_delete_by_name_params.py | 0 .../types/volume_list_params.py | 0 .../types/volume_list_response.py | 0 .../types/volume_retrieve_response.py | 0 .../types/volumes/__init__.py | 0 .../volumes/action_initiate_by_id_params.py | 0 .../volumes/action_initiate_by_id_response.py | 0 .../volumes/action_initiate_by_name_params.py | 0 .../action_initiate_by_name_response.py | 0 .../types/volumes/action_list_params.py | 0 .../types/volumes/action_list_response.py | 0 .../types/volumes/action_retrieve_params.py | 0 .../types/volumes/action_retrieve_response.py | 0 .../types/volumes/snapshot_create_params.py | 0 .../types/volumes/snapshot_create_response.py | 0 .../types/volumes/snapshot_list_params.py | 0 .../types/volumes/snapshot_list_response.py | 0 .../volumes/snapshot_retrieve_response.py | 0 .../types/volumes/volume_action.py | 0 tests/api_resources/account/test_keys.py | 4 +- .../agents/chat/test_completions.py | 4 +- .../agents/evaluation_metrics/test_models.py | 4 +- .../evaluation_metrics/test_workspaces.py | 4 +- .../workspaces/test_agents.py | 4 +- tests/api_resources/agents/test_api_keys.py | 4 +- .../agents/test_evaluation_datasets.py | 4 +- .../agents/test_evaluation_metrics.py | 4 +- .../agents/test_evaluation_runs.py | 4 +- .../agents/test_evaluation_test_cases.py | 4 +- tests/api_resources/agents/test_functions.py | 4 +- .../agents/test_knowledge_bases.py | 4 +- tests/api_resources/agents/test_routes.py | 4 +- tests/api_resources/agents/test_versions.py | 4 +- tests/api_resources/chat/test_completions.py | 4 +- tests/api_resources/droplets/test_actions.py | 4 +- .../api_resources/droplets/test_autoscale.py | 4 +- tests/api_resources/droplets/test_backups.py | 4 +- .../test_destroy_with_associated_resources.py | 4 +- .../api_resources/firewalls/test_droplets.py | 2 +- tests/api_resources/firewalls/test_rules.py | 2 +- tests/api_resources/firewalls/test_tags.py | 2 +- .../floating_ips/test_actions.py | 4 +- tests/api_resources/images/test_actions.py | 6 +- .../api_resources/inference/test_api_keys.py | 4 +- .../knowledge_bases/test_data_sources.py | 4 +- .../knowledge_bases/test_indexing_jobs.py | 4 +- .../load_balancers/test_droplets.py | 2 +- .../load_balancers/test_forwarding_rules.py | 2 +- .../models/providers/test_anthropic.py | 4 +- .../models/providers/test_openai.py | 4 +- tests/api_resources/test_account.py | 4 +- tests/api_resources/test_agents.py | 4 +- tests/api_resources/test_droplets.py | 4 +- tests/api_resources/test_firewalls.py | 4 +- tests/api_resources/test_floating_ips.py | 4 +- tests/api_resources/test_images.py | 4 +- tests/api_resources/test_knowledge_bases.py | 4 +- tests/api_resources/test_load_balancers.py | 4 +- tests/api_resources/test_models.py | 4 +- tests/api_resources/test_regions.py | 4 +- tests/api_resources/test_sizes.py | 4 +- tests/api_resources/test_snapshots.py | 4 +- tests/api_resources/test_volumes.py | 4 +- tests/api_resources/volumes/test_actions.py | 4 +- tests/api_resources/volumes/test_snapshots.py | 4 +- tests/conftest.py | 6 +- tests/test_client.py | 48 +- tests/test_deepcopy.py | 2 +- tests/test_extract_files.py | 4 +- tests/test_files.py | 2 +- tests/test_models.py | 6 +- tests/test_qs.py | 2 +- tests/test_required_args.py | 2 +- tests/test_response.py | 14 +- tests/test_streaming.py | 4 +- tests/test_transform.py | 8 +- tests/test_utils/test_proxy.py | 2 +- tests/test_utils/test_typing.py | 2 +- tests/utils.py | 8 +- 511 files changed, 401 insertions(+), 401 deletions(-) rename src/{do_gradientai => gradientai}/__init__.py (95%) rename src/{do_gradientai => gradientai}/_base_client.py (99%) rename src/{do_gradientai => gradientai}/_client.py (100%) rename src/{do_gradientai => gradientai}/_compat.py (100%) rename src/{do_gradientai => gradientai}/_constants.py (100%) rename src/{do_gradientai => gradientai}/_exceptions.py (100%) rename src/{do_gradientai => gradientai}/_files.py (100%) rename src/{do_gradientai => gradientai}/_models.py (100%) rename src/{do_gradientai => gradientai}/_qs.py (100%) rename src/{do_gradientai => gradientai}/_resource.py (100%) rename src/{do_gradientai => gradientai}/_response.py (99%) rename src/{do_gradientai => gradientai}/_streaming.py (100%) rename src/{do_gradientai => gradientai}/_types.py (99%) rename src/{do_gradientai => gradientai}/_utils/__init__.py (100%) rename src/{do_gradientai => gradientai}/_utils/_logs.py (75%) rename src/{do_gradientai => gradientai}/_utils/_proxy.py (100%) rename src/{do_gradientai => gradientai}/_utils/_reflection.py (100%) rename src/{do_gradientai => gradientai}/_utils/_resources_proxy.py (50%) rename src/{do_gradientai => gradientai}/_utils/_streams.py (100%) rename src/{do_gradientai => gradientai}/_utils/_sync.py (100%) rename src/{do_gradientai => gradientai}/_utils/_transform.py (100%) rename src/{do_gradientai => gradientai}/_utils/_typing.py (100%) rename src/{do_gradientai => gradientai}/_utils/_utils.py (100%) rename src/{do_gradientai => gradientai}/_version.py (100%) rename src/{do_gradientai => gradientai}/py.typed (100%) rename src/{do_gradientai => gradientai}/resources/__init__.py (100%) rename src/{do_gradientai => gradientai}/resources/account/__init__.py (100%) rename src/{do_gradientai => gradientai}/resources/account/account.py (100%) rename src/{do_gradientai => gradientai}/resources/account/keys.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/__init__.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/agents.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/api_keys.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/chat/__init__.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/chat/chat.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/chat/completions.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/evaluation_datasets.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/evaluation_metrics/__init__.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/evaluation_metrics/evaluation_metrics.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/evaluation_metrics/models.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/evaluation_metrics/workspaces/__init__.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/evaluation_metrics/workspaces/agents.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/evaluation_metrics/workspaces/workspaces.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/evaluation_runs.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/evaluation_test_cases.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/functions.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/knowledge_bases.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/routes.py (100%) rename src/{do_gradientai => gradientai}/resources/agents/versions.py (100%) rename src/{do_gradientai => gradientai}/resources/chat/__init__.py (100%) rename src/{do_gradientai => gradientai}/resources/chat/chat.py (100%) rename src/{do_gradientai => gradientai}/resources/chat/completions.py (100%) rename src/{do_gradientai => gradientai}/resources/droplets/__init__.py (100%) rename src/{do_gradientai => gradientai}/resources/droplets/actions.py (100%) rename src/{do_gradientai => gradientai}/resources/droplets/autoscale.py (100%) rename src/{do_gradientai => gradientai}/resources/droplets/backups.py (100%) rename src/{do_gradientai => gradientai}/resources/droplets/destroy_with_associated_resources.py (100%) rename src/{do_gradientai => gradientai}/resources/droplets/droplets.py (100%) rename src/{do_gradientai => gradientai}/resources/firewalls/__init__.py (100%) rename src/{do_gradientai => gradientai}/resources/firewalls/droplets.py (100%) rename src/{do_gradientai => gradientai}/resources/firewalls/firewalls.py (100%) rename src/{do_gradientai => gradientai}/resources/firewalls/rules.py (100%) rename src/{do_gradientai => gradientai}/resources/firewalls/tags.py (100%) rename src/{do_gradientai => gradientai}/resources/floating_ips/__init__.py (100%) rename src/{do_gradientai => gradientai}/resources/floating_ips/actions.py (100%) rename src/{do_gradientai => gradientai}/resources/floating_ips/floating_ips.py (100%) rename src/{do_gradientai => gradientai}/resources/images/__init__.py (100%) rename src/{do_gradientai => gradientai}/resources/images/actions.py (100%) rename src/{do_gradientai => gradientai}/resources/images/images.py (100%) rename src/{do_gradientai => gradientai}/resources/inference/__init__.py (100%) rename src/{do_gradientai => gradientai}/resources/inference/api_keys.py (100%) rename src/{do_gradientai => gradientai}/resources/inference/inference.py (100%) rename src/{do_gradientai => gradientai}/resources/knowledge_bases/__init__.py (100%) rename src/{do_gradientai => gradientai}/resources/knowledge_bases/data_sources.py (100%) rename src/{do_gradientai => gradientai}/resources/knowledge_bases/indexing_jobs.py (100%) rename src/{do_gradientai => gradientai}/resources/knowledge_bases/knowledge_bases.py (100%) rename src/{do_gradientai => gradientai}/resources/load_balancers/__init__.py (100%) rename src/{do_gradientai => gradientai}/resources/load_balancers/droplets.py (100%) rename src/{do_gradientai => gradientai}/resources/load_balancers/forwarding_rules.py (100%) rename src/{do_gradientai => gradientai}/resources/load_balancers/load_balancers.py (100%) rename src/{do_gradientai => gradientai}/resources/models/__init__.py (100%) rename src/{do_gradientai => gradientai}/resources/models/models.py (100%) rename src/{do_gradientai => gradientai}/resources/models/providers/__init__.py (100%) rename src/{do_gradientai => gradientai}/resources/models/providers/anthropic.py (100%) rename src/{do_gradientai => gradientai}/resources/models/providers/openai.py (100%) rename src/{do_gradientai => gradientai}/resources/models/providers/providers.py (100%) rename src/{do_gradientai => gradientai}/resources/regions.py (100%) rename src/{do_gradientai => gradientai}/resources/sizes.py (100%) rename src/{do_gradientai => gradientai}/resources/snapshots.py (100%) rename src/{do_gradientai => gradientai}/resources/volumes/__init__.py (100%) rename src/{do_gradientai => gradientai}/resources/volumes/actions.py (100%) rename src/{do_gradientai => gradientai}/resources/volumes/snapshots.py (100%) rename src/{do_gradientai => gradientai}/resources/volumes/volumes.py (100%) rename src/{do_gradientai => gradientai}/types/__init__.py (100%) rename src/{do_gradientai => gradientai}/types/account/__init__.py (100%) rename src/{do_gradientai => gradientai}/types/account/key_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/account/key_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/account/key_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/account/key_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/account/key_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/account/key_update_params.py (100%) rename src/{do_gradientai => gradientai}/types/account/key_update_response.py (100%) rename src/{do_gradientai => gradientai}/types/account_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/agent_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/agent_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/agent_delete_response.py (100%) rename src/{do_gradientai => gradientai}/types/agent_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/agent_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/agent_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/agent_update_params.py (100%) rename src/{do_gradientai => gradientai}/types/agent_update_response.py (100%) rename src/{do_gradientai => gradientai}/types/agent_update_status_params.py (100%) rename src/{do_gradientai => gradientai}/types/agent_update_status_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/__init__.py (100%) rename src/{do_gradientai => gradientai}/types/agents/api_evaluation_metric.py (100%) rename src/{do_gradientai => gradientai}/types/agents/api_evaluation_metric_result.py (100%) rename src/{do_gradientai => gradientai}/types/agents/api_evaluation_prompt.py (100%) rename src/{do_gradientai => gradientai}/types/agents/api_evaluation_run.py (100%) rename src/{do_gradientai => gradientai}/types/agents/api_evaluation_test_case.py (100%) rename src/{do_gradientai => gradientai}/types/agents/api_key_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/api_key_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/api_key_delete_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/api_key_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/api_key_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/api_key_regenerate_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/api_key_update_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/api_key_update_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/api_link_knowledge_base_output.py (100%) rename src/{do_gradientai => gradientai}/types/agents/api_star_metric.py (100%) rename src/{do_gradientai => gradientai}/types/agents/api_star_metric_param.py (100%) rename src/{do_gradientai => gradientai}/types/agents/chat/__init__.py (100%) rename src/{do_gradientai => gradientai}/types/agents/chat/completion_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/chat/completion_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_dataset_create_file_upload_presigned_urls_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_dataset_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_dataset_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metric_list_regions_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metric_list_regions_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metric_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metrics/__init__.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metrics/model_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metrics/model_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metrics/workspace_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metrics/workspace_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metrics/workspace_delete_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metrics/workspace_list_evaluation_test_cases_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metrics/workspace_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metrics/workspace_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metrics/workspace_update_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metrics/workspace_update_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metrics/workspaces/__init__.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metrics/workspaces/agent_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metrics/workspaces/agent_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metrics/workspaces/agent_move_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_metrics/workspaces/agent_move_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_run_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_run_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_run_list_results_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_run_list_results_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_run_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_run_retrieve_results_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_test_case_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_test_case_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_test_case_list_evaluation_runs_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_test_case_list_evaluation_runs_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_test_case_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_test_case_retrieve_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_test_case_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_test_case_update_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/evaluation_test_case_update_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/function_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/function_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/function_delete_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/function_update_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/function_update_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/knowledge_base_detach_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/route_add_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/route_add_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/route_delete_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/route_update_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/route_update_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/route_view_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/version_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/version_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/agents/version_update_params.py (100%) rename src/{do_gradientai => gradientai}/types/agents/version_update_response.py (100%) rename src/{do_gradientai => gradientai}/types/api_agent.py (100%) rename src/{do_gradientai => gradientai}/types/api_agent_api_key_info.py (100%) rename src/{do_gradientai => gradientai}/types/api_agent_model.py (100%) rename src/{do_gradientai => gradientai}/types/api_agreement.py (100%) rename src/{do_gradientai => gradientai}/types/api_anthropic_api_key_info.py (100%) rename src/{do_gradientai => gradientai}/types/api_deployment_visibility.py (100%) rename src/{do_gradientai => gradientai}/types/api_knowledge_base.py (100%) rename src/{do_gradientai => gradientai}/types/api_model.py (100%) rename src/{do_gradientai => gradientai}/types/api_model_version.py (100%) rename src/{do_gradientai => gradientai}/types/api_openai_api_key_info.py (100%) rename src/{do_gradientai => gradientai}/types/api_retrieval_method.py (100%) rename src/{do_gradientai => gradientai}/types/api_workspace.py (100%) rename src/{do_gradientai => gradientai}/types/chat/__init__.py (100%) rename src/{do_gradientai => gradientai}/types/chat/completion_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/chat/completion_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/domains.py (100%) rename src/{do_gradientai => gradientai}/types/domains_param.py (100%) rename src/{do_gradientai => gradientai}/types/droplet_backup_policy.py (100%) rename src/{do_gradientai => gradientai}/types/droplet_backup_policy_param.py (100%) rename src/{do_gradientai => gradientai}/types/droplet_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/droplet_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/droplet_delete_by_tag_params.py (100%) rename src/{do_gradientai => gradientai}/types/droplet_list_firewalls_params.py (100%) rename src/{do_gradientai => gradientai}/types/droplet_list_firewalls_response.py (100%) rename src/{do_gradientai => gradientai}/types/droplet_list_kernels_params.py (100%) rename src/{do_gradientai => gradientai}/types/droplet_list_kernels_response.py (100%) rename src/{do_gradientai => gradientai}/types/droplet_list_neighbors_response.py (100%) rename src/{do_gradientai => gradientai}/types/droplet_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/droplet_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/droplet_list_snapshots_params.py (100%) rename src/{do_gradientai => gradientai}/types/droplet_list_snapshots_response.py (100%) rename src/{do_gradientai => gradientai}/types/droplet_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/droplets/__init__.py (100%) rename src/{do_gradientai => gradientai}/types/droplets/action_bulk_initiate_params.py (100%) rename src/{do_gradientai => gradientai}/types/droplets/action_bulk_initiate_response.py (100%) rename src/{do_gradientai => gradientai}/types/droplets/action_initiate_params.py (100%) rename src/{do_gradientai => gradientai}/types/droplets/action_initiate_response.py (100%) rename src/{do_gradientai => gradientai}/types/droplets/action_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/droplets/action_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/droplets/action_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/droplets/associated_resource.py (100%) rename src/{do_gradientai => gradientai}/types/droplets/autoscale_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/droplets/autoscale_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/droplets/autoscale_list_history_params.py (100%) rename src/{do_gradientai => gradientai}/types/droplets/autoscale_list_history_response.py (100%) rename src/{do_gradientai => gradientai}/types/droplets/autoscale_list_members_params.py (100%) rename src/{do_gradientai => gradientai}/types/droplets/autoscale_list_members_response.py (100%) rename src/{do_gradientai => gradientai}/types/droplets/autoscale_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/droplets/autoscale_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/droplets/autoscale_pool.py (100%) rename src/{do_gradientai => gradientai}/types/droplets/autoscale_pool_droplet_template.py (100%) rename src/{do_gradientai => gradientai}/types/droplets/autoscale_pool_droplet_template_param.py (100%) rename src/{do_gradientai => gradientai}/types/droplets/autoscale_pool_dynamic_config.py (100%) rename src/{do_gradientai => gradientai}/types/droplets/autoscale_pool_dynamic_config_param.py (100%) rename src/{do_gradientai => gradientai}/types/droplets/autoscale_pool_static_config.py (100%) rename src/{do_gradientai => gradientai}/types/droplets/autoscale_pool_static_config_param.py (100%) rename src/{do_gradientai => gradientai}/types/droplets/autoscale_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/droplets/autoscale_update_params.py (100%) rename src/{do_gradientai => gradientai}/types/droplets/autoscale_update_response.py (100%) rename src/{do_gradientai => gradientai}/types/droplets/backup_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/droplets/backup_list_policies_params.py (100%) rename src/{do_gradientai => gradientai}/types/droplets/backup_list_policies_response.py (100%) rename src/{do_gradientai => gradientai}/types/droplets/backup_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/droplets/backup_list_supported_policies_response.py (100%) rename src/{do_gradientai => gradientai}/types/droplets/backup_retrieve_policy_response.py (100%) rename src/{do_gradientai => gradientai}/types/droplets/current_utilization.py (100%) rename src/{do_gradientai => gradientai}/types/droplets/destroy_with_associated_resource_check_status_response.py (100%) rename src/{do_gradientai => gradientai}/types/droplets/destroy_with_associated_resource_delete_selective_params.py (100%) rename src/{do_gradientai => gradientai}/types/droplets/destroy_with_associated_resource_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/droplets/destroyed_associated_resource.py (100%) rename src/{do_gradientai => gradientai}/types/firewall.py (100%) rename src/{do_gradientai => gradientai}/types/firewall_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/firewall_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/firewall_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/firewall_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/firewall_param.py (100%) rename src/{do_gradientai => gradientai}/types/firewall_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/firewall_update_params.py (100%) rename src/{do_gradientai => gradientai}/types/firewall_update_response.py (100%) rename src/{do_gradientai => gradientai}/types/firewalls/__init__.py (100%) rename src/{do_gradientai => gradientai}/types/firewalls/droplet_add_params.py (100%) rename src/{do_gradientai => gradientai}/types/firewalls/droplet_remove_params.py (100%) rename src/{do_gradientai => gradientai}/types/firewalls/rule_add_params.py (100%) rename src/{do_gradientai => gradientai}/types/firewalls/rule_remove_params.py (100%) rename src/{do_gradientai => gradientai}/types/firewalls/tag_add_params.py (100%) rename src/{do_gradientai => gradientai}/types/firewalls/tag_remove_params.py (100%) rename src/{do_gradientai => gradientai}/types/floating_ip.py (100%) rename src/{do_gradientai => gradientai}/types/floating_ip_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/floating_ip_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/floating_ip_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/floating_ip_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/floating_ip_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/floating_ips/__init__.py (100%) rename src/{do_gradientai => gradientai}/types/floating_ips/action_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/floating_ips/action_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/floating_ips/action_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/floating_ips/action_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/forwarding_rule.py (100%) rename src/{do_gradientai => gradientai}/types/forwarding_rule_param.py (100%) rename src/{do_gradientai => gradientai}/types/glb_settings.py (100%) rename src/{do_gradientai => gradientai}/types/glb_settings_param.py (100%) rename src/{do_gradientai => gradientai}/types/health_check.py (100%) rename src/{do_gradientai => gradientai}/types/health_check_param.py (100%) rename src/{do_gradientai => gradientai}/types/image_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/image_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/image_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/image_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/image_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/image_update_params.py (100%) rename src/{do_gradientai => gradientai}/types/image_update_response.py (100%) rename src/{do_gradientai => gradientai}/types/images/__init__.py (100%) rename src/{do_gradientai => gradientai}/types/images/action_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/images/action_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/inference/__init__.py (100%) rename src/{do_gradientai => gradientai}/types/inference/api_key_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/inference/api_key_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/inference/api_key_delete_response.py (100%) rename src/{do_gradientai => gradientai}/types/inference/api_key_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/inference/api_key_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/inference/api_key_update_params.py (100%) rename src/{do_gradientai => gradientai}/types/inference/api_key_update_regenerate_response.py (100%) rename src/{do_gradientai => gradientai}/types/inference/api_key_update_response.py (100%) rename src/{do_gradientai => gradientai}/types/inference/api_model_api_key_info.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_base_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_base_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_base_delete_response.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_base_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_base_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_base_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_base_update_params.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_base_update_response.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/__init__.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/api_file_upload_data_source.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/api_file_upload_data_source_param.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/api_indexed_data_source.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/api_indexing_job.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/api_knowledge_base_data_source.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/api_spaces_data_source.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/api_spaces_data_source_param.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/api_web_crawler_data_source.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/api_web_crawler_data_source_param.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/aws_data_source_param.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/data_source_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/data_source_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/data_source_delete_response.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/data_source_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/data_source_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/indexing_job_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/indexing_job_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/indexing_job_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/indexing_job_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/indexing_job_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/indexing_job_update_cancel_params.py (100%) rename src/{do_gradientai => gradientai}/types/knowledge_bases/indexing_job_update_cancel_response.py (100%) rename src/{do_gradientai => gradientai}/types/lb_firewall.py (100%) rename src/{do_gradientai => gradientai}/types/lb_firewall_param.py (100%) rename src/{do_gradientai => gradientai}/types/load_balancer.py (100%) rename src/{do_gradientai => gradientai}/types/load_balancer_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/load_balancer_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/load_balancer_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/load_balancer_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/load_balancer_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/load_balancer_update_params.py (100%) rename src/{do_gradientai => gradientai}/types/load_balancer_update_response.py (100%) rename src/{do_gradientai => gradientai}/types/load_balancers/__init__.py (100%) rename src/{do_gradientai => gradientai}/types/load_balancers/droplet_add_params.py (100%) rename src/{do_gradientai => gradientai}/types/load_balancers/droplet_remove_params.py (100%) rename src/{do_gradientai => gradientai}/types/load_balancers/forwarding_rule_add_params.py (100%) rename src/{do_gradientai => gradientai}/types/load_balancers/forwarding_rule_remove_params.py (100%) rename src/{do_gradientai => gradientai}/types/model_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/model_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/models/__init__.py (100%) rename src/{do_gradientai => gradientai}/types/models/providers/__init__.py (100%) rename src/{do_gradientai => gradientai}/types/models/providers/anthropic_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/models/providers/anthropic_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/models/providers/anthropic_delete_response.py (100%) rename src/{do_gradientai => gradientai}/types/models/providers/anthropic_list_agents_params.py (100%) rename src/{do_gradientai => gradientai}/types/models/providers/anthropic_list_agents_response.py (100%) rename src/{do_gradientai => gradientai}/types/models/providers/anthropic_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/models/providers/anthropic_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/models/providers/anthropic_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/models/providers/anthropic_update_params.py (100%) rename src/{do_gradientai => gradientai}/types/models/providers/anthropic_update_response.py (100%) rename src/{do_gradientai => gradientai}/types/models/providers/openai_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/models/providers/openai_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/models/providers/openai_delete_response.py (100%) rename src/{do_gradientai => gradientai}/types/models/providers/openai_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/models/providers/openai_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/models/providers/openai_retrieve_agents_params.py (100%) rename src/{do_gradientai => gradientai}/types/models/providers/openai_retrieve_agents_response.py (100%) rename src/{do_gradientai => gradientai}/types/models/providers/openai_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/models/providers/openai_update_params.py (100%) rename src/{do_gradientai => gradientai}/types/models/providers/openai_update_response.py (100%) rename src/{do_gradientai => gradientai}/types/region_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/region_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/shared/__init__.py (100%) rename src/{do_gradientai => gradientai}/types/shared/action.py (100%) rename src/{do_gradientai => gradientai}/types/shared/action_link.py (100%) rename src/{do_gradientai => gradientai}/types/shared/api_links.py (100%) rename src/{do_gradientai => gradientai}/types/shared/api_meta.py (100%) rename src/{do_gradientai => gradientai}/types/shared/backward_links.py (100%) rename src/{do_gradientai => gradientai}/types/shared/chat_completion_chunk.py (100%) rename src/{do_gradientai => gradientai}/types/shared/chat_completion_token_logprob.py (100%) rename src/{do_gradientai => gradientai}/types/shared/completion_usage.py (100%) rename src/{do_gradientai => gradientai}/types/shared/disk_info.py (100%) rename src/{do_gradientai => gradientai}/types/shared/droplet.py (100%) rename src/{do_gradientai => gradientai}/types/shared/droplet_next_backup_window.py (100%) rename src/{do_gradientai => gradientai}/types/shared/firewall_rule_target.py (100%) rename src/{do_gradientai => gradientai}/types/shared/forward_links.py (100%) rename src/{do_gradientai => gradientai}/types/shared/garbage_collection.py (100%) rename src/{do_gradientai => gradientai}/types/shared/gpu_info.py (100%) rename src/{do_gradientai => gradientai}/types/shared/image.py (100%) rename src/{do_gradientai => gradientai}/types/shared/kernel.py (100%) rename src/{do_gradientai => gradientai}/types/shared/meta_properties.py (100%) rename src/{do_gradientai => gradientai}/types/shared/network_v4.py (100%) rename src/{do_gradientai => gradientai}/types/shared/network_v6.py (100%) rename src/{do_gradientai => gradientai}/types/shared/page_links.py (100%) rename src/{do_gradientai => gradientai}/types/shared/region.py (100%) rename src/{do_gradientai => gradientai}/types/shared/repository_blob.py (100%) rename src/{do_gradientai => gradientai}/types/shared/repository_manifest.py (100%) rename src/{do_gradientai => gradientai}/types/shared/repository_tag.py (100%) rename src/{do_gradientai => gradientai}/types/shared/size.py (100%) rename src/{do_gradientai => gradientai}/types/shared/snapshots.py (100%) rename src/{do_gradientai => gradientai}/types/shared/subscription.py (100%) rename src/{do_gradientai => gradientai}/types/shared/subscription_tier_base.py (100%) rename src/{do_gradientai => gradientai}/types/shared/vpc_peering.py (100%) rename src/{do_gradientai => gradientai}/types/shared_params/__init__.py (100%) rename src/{do_gradientai => gradientai}/types/shared_params/firewall_rule_target.py (100%) rename src/{do_gradientai => gradientai}/types/size_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/size_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/snapshot_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/snapshot_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/snapshot_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/sticky_sessions.py (100%) rename src/{do_gradientai => gradientai}/types/sticky_sessions_param.py (100%) rename src/{do_gradientai => gradientai}/types/volume_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/volume_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/volume_delete_by_name_params.py (100%) rename src/{do_gradientai => gradientai}/types/volume_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/volume_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/volume_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/volumes/__init__.py (100%) rename src/{do_gradientai => gradientai}/types/volumes/action_initiate_by_id_params.py (100%) rename src/{do_gradientai => gradientai}/types/volumes/action_initiate_by_id_response.py (100%) rename src/{do_gradientai => gradientai}/types/volumes/action_initiate_by_name_params.py (100%) rename src/{do_gradientai => gradientai}/types/volumes/action_initiate_by_name_response.py (100%) rename src/{do_gradientai => gradientai}/types/volumes/action_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/volumes/action_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/volumes/action_retrieve_params.py (100%) rename src/{do_gradientai => gradientai}/types/volumes/action_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/volumes/snapshot_create_params.py (100%) rename src/{do_gradientai => gradientai}/types/volumes/snapshot_create_response.py (100%) rename src/{do_gradientai => gradientai}/types/volumes/snapshot_list_params.py (100%) rename src/{do_gradientai => gradientai}/types/volumes/snapshot_list_response.py (100%) rename src/{do_gradientai => gradientai}/types/volumes/snapshot_retrieve_response.py (100%) rename src/{do_gradientai => gradientai}/types/volumes/volume_action.py (100%) diff --git a/.stats.yml b/.stats.yml index c0860901..4aec10aa 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 169 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-f8e8c290636c1e218efcf7bfe92ba7570c11690754d21287d838919fbc943a80.yml openapi_spec_hash: 1eddf488ecbe415efb45445697716f5d -config_hash: c59a2f17744fc2b7a8248ec916b8aa70 +config_hash: 39b04f6247d3dc8917c3adab078ec8c4 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4f59c83a..086907ef 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -36,7 +36,7 @@ $ pip install -r requirements-dev.lock Most of the SDK is generated code. Modifications to code will be persisted between generations, but may result in merge conflicts between manual patches and changes from the generator. The generator will never -modify the contents of the `src/do_gradientai/lib/` and `examples/` directories. +modify the contents of the `src/gradientai/lib/` and `examples/` directories. ## Adding and running examples diff --git a/README.md b/README.md index c2cb97ad..2c739c6d 100644 --- a/README.md +++ b/README.md @@ -26,7 +26,7 @@ The full API of this library can be found in [api.md](api.md). ```python import os -from do_gradientai import GradientAI +from gradientai import GradientAI api_client = GradientAI( api_key=os.environ.get("GRADIENTAI_API_KEY"), # This is the default and can be omitted @@ -66,7 +66,7 @@ Simply import `AsyncGradientAI` instead of `GradientAI` and use `await` with eac ```python import os import asyncio -from do_gradientai import AsyncGradientAI +from gradientai import AsyncGradientAI client = AsyncGradientAI( api_key=os.environ.get("GRADIENTAI_API_KEY"), # This is the default and can be omitted @@ -106,8 +106,8 @@ Then you can enable it by instantiating the client with `http_client=DefaultAioH ```python import asyncio -from do_gradientai import DefaultAioHttpClient -from do_gradientai import AsyncGradientAI +from gradientai import DefaultAioHttpClient +from gradientai import AsyncGradientAI async def main() -> None: @@ -135,7 +135,7 @@ asyncio.run(main()) We provide support for streaming responses using Server Side Events (SSE). ```python -from do_gradientai import GradientAI +from gradientai import GradientAI client = GradientAI() @@ -156,7 +156,7 @@ for completion in stream: The async client uses the exact same interface. ```python -from do_gradientai import AsyncGradientAI +from gradientai import AsyncGradientAI client = AsyncGradientAI() @@ -188,7 +188,7 @@ Typed requests and responses provide autocomplete and documentation within your Nested parameters are dictionaries, typed using `TypedDict`, for example: ```python -from do_gradientai import GradientAI +from gradientai import GradientAI client = GradientAI() @@ -207,16 +207,16 @@ print(completion.stream_options) ## Handling errors -When the library is unable to connect to the API (for example, due to network connection problems or a timeout), a subclass of `do_gradientai.APIConnectionError` is raised. +When the library is unable to connect to the API (for example, due to network connection problems or a timeout), a subclass of `gradientai.APIConnectionError` is raised. When the API returns a non-success status code (that is, 4xx or 5xx -response), a subclass of `do_gradientai.APIStatusError` is raised, containing `status_code` and `response` properties. +response), a subclass of `gradientai.APIStatusError` is raised, containing `status_code` and `response` properties. -All errors inherit from `do_gradientai.APIError`. +All errors inherit from `gradientai.APIError`. ```python -import do_gradientai -from do_gradientai import GradientAI +import gradientai +from gradientai import GradientAI client = GradientAI() @@ -230,12 +230,12 @@ try: ], model="llama3.3-70b-instruct", ) -except do_gradientai.APIConnectionError as e: +except gradientai.APIConnectionError as e: print("The server could not be reached") print(e.__cause__) # an underlying Exception, likely raised within httpx. -except do_gradientai.RateLimitError as e: +except gradientai.RateLimitError as e: print("A 429 status code was received; we should back off a bit.") -except do_gradientai.APIStatusError as e: +except gradientai.APIStatusError as e: print("Another non-200-range status code was received") print(e.status_code) print(e.response) @@ -263,7 +263,7 @@ Connection errors (for example, due to a network connectivity problem), 408 Requ You can use the `max_retries` option to configure or disable retry settings: ```python -from do_gradientai import GradientAI +from gradientai import GradientAI # Configure the default for all requests: client = GradientAI( @@ -289,7 +289,7 @@ By default requests time out after 1 minute. You can configure this with a `time which accepts a float or an [`httpx.Timeout`](https://www.python-httpx.org/advanced/timeouts/#fine-tuning-the-configuration) object: ```python -from do_gradientai import GradientAI +from gradientai import GradientAI # Configure the default for all requests: client = GradientAI( @@ -349,7 +349,7 @@ if response.my_field is None: The "raw" Response object can be accessed by prefixing `.with_raw_response.` to any HTTP method call, e.g., ```py -from do_gradientai import GradientAI +from gradientai import GradientAI client = GradientAI() response = client.chat.completions.with_raw_response.create( @@ -365,9 +365,9 @@ completion = response.parse() # get the object that `chat.completions.create()` print(completion.choices) ``` -These methods return an [`APIResponse`](https://github.com/digitalocean/gradientai-python/tree/main/src/do_gradientai/_response.py) object. +These methods return an [`APIResponse`](https://github.com/digitalocean/gradientai-python/tree/main/src/gradientai/_response.py) object. -The async client returns an [`AsyncAPIResponse`](https://github.com/digitalocean/gradientai-python/tree/main/src/do_gradientai/_response.py) with the same structure, the only difference being `await`able methods for reading the response content. +The async client returns an [`AsyncAPIResponse`](https://github.com/digitalocean/gradientai-python/tree/main/src/gradientai/_response.py) with the same structure, the only difference being `await`able methods for reading the response content. #### `.with_streaming_response` @@ -437,7 +437,7 @@ You can directly override the [httpx client](https://www.python-httpx.org/api/#c ```python import httpx -from do_gradientai import GradientAI, DefaultHttpxClient +from gradientai import GradientAI, DefaultHttpxClient client = GradientAI( # Or use the `GRADIENT_AI_BASE_URL` env var @@ -460,7 +460,7 @@ client.with_options(http_client=DefaultHttpxClient(...)) By default the library closes underlying HTTP connections whenever the client is [garbage collected](https://docs.python.org/3/reference/datamodel.html#object.__del__). You can manually close the client using the `.close()` method if desired, or with a context manager that closes when exiting. ```py -from do_gradientai import GradientAI +from gradientai import GradientAI with GradientAI() as client: # make requests here @@ -488,8 +488,8 @@ If you've upgraded to the latest version but aren't seeing any new features you You can determine the version that is being used at runtime with: ```py -import do_gradientai -print(do_gradientai.__version__) +import gradientai +print(gradientai.__version__) ``` ## Requirements diff --git a/api.md b/api.md index 20483e04..231cef87 100644 --- a/api.md +++ b/api.md @@ -1,7 +1,7 @@ # Shared Types ```python -from do_gradientai.types import ( +from gradientai.types import ( Action, ActionLink, APILinks, @@ -40,7 +40,7 @@ from do_gradientai.types import ( Types: ```python -from do_gradientai.types import ( +from gradientai.types import ( APIAgent, APIAgentAPIKeyInfo, APIAgentModel, @@ -60,19 +60,19 @@ from do_gradientai.types import ( Methods: -- client.agents.create(\*\*params) -> AgentCreateResponse -- client.agents.retrieve(uuid) -> AgentRetrieveResponse -- client.agents.update(path_uuid, \*\*params) -> AgentUpdateResponse -- client.agents.list(\*\*params) -> AgentListResponse -- client.agents.delete(uuid) -> AgentDeleteResponse -- client.agents.update_status(path_uuid, \*\*params) -> AgentUpdateStatusResponse +- client.agents.create(\*\*params) -> AgentCreateResponse +- client.agents.retrieve(uuid) -> AgentRetrieveResponse +- client.agents.update(path_uuid, \*\*params) -> AgentUpdateResponse +- client.agents.list(\*\*params) -> AgentListResponse +- client.agents.delete(uuid) -> AgentDeleteResponse +- client.agents.update_status(path_uuid, \*\*params) -> AgentUpdateStatusResponse ## APIKeys Types: ```python -from do_gradientai.types.agents import ( +from gradientai.types.agents import ( APIKeyCreateResponse, APIKeyUpdateResponse, APIKeyListResponse, @@ -83,11 +83,11 @@ from do_gradientai.types.agents import ( Methods: -- client.agents.api_keys.create(path_agent_uuid, \*\*params) -> APIKeyCreateResponse -- client.agents.api_keys.update(path_api_key_uuid, \*, path_agent_uuid, \*\*params) -> APIKeyUpdateResponse -- client.agents.api_keys.list(agent_uuid, \*\*params) -> APIKeyListResponse -- client.agents.api_keys.delete(api_key_uuid, \*, agent_uuid) -> APIKeyDeleteResponse -- client.agents.api_keys.regenerate(api_key_uuid, \*, agent_uuid) -> APIKeyRegenerateResponse +- client.agents.api_keys.create(path_agent_uuid, \*\*params) -> APIKeyCreateResponse +- client.agents.api_keys.update(path_api_key_uuid, \*, path_agent_uuid, \*\*params) -> APIKeyUpdateResponse +- client.agents.api_keys.list(agent_uuid, \*\*params) -> APIKeyListResponse +- client.agents.api_keys.delete(api_key_uuid, \*, agent_uuid) -> APIKeyDeleteResponse +- client.agents.api_keys.regenerate(api_key_uuid, \*, agent_uuid) -> APIKeyRegenerateResponse ## Chat @@ -96,19 +96,19 @@ Methods: Types: ```python -from do_gradientai.types.agents.chat import CompletionCreateResponse +from gradientai.types.agents.chat import CompletionCreateResponse ``` Methods: -- client.agents.chat.completions.create(\*\*params) -> CompletionCreateResponse +- client.agents.chat.completions.create(\*\*params) -> CompletionCreateResponse ## EvaluationMetrics Types: ```python -from do_gradientai.types.agents import ( +from gradientai.types.agents import ( EvaluationMetricListResponse, EvaluationMetricListRegionsResponse, ) @@ -116,15 +116,15 @@ from do_gradientai.types.agents import ( Methods: -- client.agents.evaluation_metrics.list() -> EvaluationMetricListResponse -- client.agents.evaluation_metrics.list_regions(\*\*params) -> EvaluationMetricListRegionsResponse +- client.agents.evaluation_metrics.list() -> EvaluationMetricListResponse +- client.agents.evaluation_metrics.list_regions(\*\*params) -> EvaluationMetricListRegionsResponse ### Workspaces Types: ```python -from do_gradientai.types.agents.evaluation_metrics import ( +from gradientai.types.agents.evaluation_metrics import ( WorkspaceCreateResponse, WorkspaceRetrieveResponse, WorkspaceUpdateResponse, @@ -136,19 +136,19 @@ from do_gradientai.types.agents.evaluation_metrics import ( Methods: -- client.agents.evaluation_metrics.workspaces.create(\*\*params) -> WorkspaceCreateResponse -- client.agents.evaluation_metrics.workspaces.retrieve(workspace_uuid) -> WorkspaceRetrieveResponse -- client.agents.evaluation_metrics.workspaces.update(path_workspace_uuid, \*\*params) -> WorkspaceUpdateResponse -- client.agents.evaluation_metrics.workspaces.list() -> WorkspaceListResponse -- client.agents.evaluation_metrics.workspaces.delete(workspace_uuid) -> WorkspaceDeleteResponse -- client.agents.evaluation_metrics.workspaces.list_evaluation_test_cases(workspace_uuid) -> WorkspaceListEvaluationTestCasesResponse +- client.agents.evaluation_metrics.workspaces.create(\*\*params) -> WorkspaceCreateResponse +- client.agents.evaluation_metrics.workspaces.retrieve(workspace_uuid) -> WorkspaceRetrieveResponse +- client.agents.evaluation_metrics.workspaces.update(path_workspace_uuid, \*\*params) -> WorkspaceUpdateResponse +- client.agents.evaluation_metrics.workspaces.list() -> WorkspaceListResponse +- client.agents.evaluation_metrics.workspaces.delete(workspace_uuid) -> WorkspaceDeleteResponse +- client.agents.evaluation_metrics.workspaces.list_evaluation_test_cases(workspace_uuid) -> WorkspaceListEvaluationTestCasesResponse #### Agents Types: ```python -from do_gradientai.types.agents.evaluation_metrics.workspaces import ( +from gradientai.types.agents.evaluation_metrics.workspaces import ( AgentListResponse, AgentMoveResponse, ) @@ -156,27 +156,27 @@ from do_gradientai.types.agents.evaluation_metrics.workspaces import ( Methods: -- client.agents.evaluation_metrics.workspaces.agents.list(workspace_uuid, \*\*params) -> AgentListResponse -- client.agents.evaluation_metrics.workspaces.agents.move(path_workspace_uuid, \*\*params) -> AgentMoveResponse +- client.agents.evaluation_metrics.workspaces.agents.list(workspace_uuid, \*\*params) -> AgentListResponse +- client.agents.evaluation_metrics.workspaces.agents.move(path_workspace_uuid, \*\*params) -> AgentMoveResponse ### Models Types: ```python -from do_gradientai.types.agents.evaluation_metrics import ModelListResponse +from gradientai.types.agents.evaluation_metrics import ModelListResponse ``` Methods: -- client.agents.evaluation_metrics.models.list(\*\*params) -> ModelListResponse +- client.agents.evaluation_metrics.models.list(\*\*params) -> ModelListResponse ## EvaluationRuns Types: ```python -from do_gradientai.types.agents import ( +from gradientai.types.agents import ( APIEvaluationMetric, APIEvaluationMetricResult, APIEvaluationPrompt, @@ -190,17 +190,17 @@ from do_gradientai.types.agents import ( Methods: -- client.agents.evaluation_runs.create(\*\*params) -> EvaluationRunCreateResponse -- client.agents.evaluation_runs.retrieve(evaluation_run_uuid) -> EvaluationRunRetrieveResponse -- client.agents.evaluation_runs.list_results(evaluation_run_uuid, \*\*params) -> EvaluationRunListResultsResponse -- client.agents.evaluation_runs.retrieve_results(prompt_id, \*, evaluation_run_uuid) -> EvaluationRunRetrieveResultsResponse +- client.agents.evaluation_runs.create(\*\*params) -> EvaluationRunCreateResponse +- client.agents.evaluation_runs.retrieve(evaluation_run_uuid) -> EvaluationRunRetrieveResponse +- client.agents.evaluation_runs.list_results(evaluation_run_uuid, \*\*params) -> EvaluationRunListResultsResponse +- client.agents.evaluation_runs.retrieve_results(prompt_id, \*, evaluation_run_uuid) -> EvaluationRunRetrieveResultsResponse ## EvaluationTestCases Types: ```python -from do_gradientai.types.agents import ( +from gradientai.types.agents import ( APIEvaluationTestCase, APIStarMetric, EvaluationTestCaseCreateResponse, @@ -213,18 +213,18 @@ from do_gradientai.types.agents import ( Methods: -- client.agents.evaluation_test_cases.create(\*\*params) -> EvaluationTestCaseCreateResponse -- client.agents.evaluation_test_cases.retrieve(test_case_uuid, \*\*params) -> EvaluationTestCaseRetrieveResponse -- client.agents.evaluation_test_cases.update(path_test_case_uuid, \*\*params) -> EvaluationTestCaseUpdateResponse -- client.agents.evaluation_test_cases.list() -> EvaluationTestCaseListResponse -- client.agents.evaluation_test_cases.list_evaluation_runs(evaluation_test_case_uuid, \*\*params) -> EvaluationTestCaseListEvaluationRunsResponse +- client.agents.evaluation_test_cases.create(\*\*params) -> EvaluationTestCaseCreateResponse +- client.agents.evaluation_test_cases.retrieve(test_case_uuid, \*\*params) -> EvaluationTestCaseRetrieveResponse +- client.agents.evaluation_test_cases.update(path_test_case_uuid, \*\*params) -> EvaluationTestCaseUpdateResponse +- client.agents.evaluation_test_cases.list() -> EvaluationTestCaseListResponse +- client.agents.evaluation_test_cases.list_evaluation_runs(evaluation_test_case_uuid, \*\*params) -> EvaluationTestCaseListEvaluationRunsResponse ## EvaluationDatasets Types: ```python -from do_gradientai.types.agents import ( +from gradientai.types.agents import ( EvaluationDatasetCreateResponse, EvaluationDatasetCreateFileUploadPresignedURLsResponse, ) @@ -232,15 +232,15 @@ from do_gradientai.types.agents import ( Methods: -- client.agents.evaluation_datasets.create(\*\*params) -> EvaluationDatasetCreateResponse -- client.agents.evaluation_datasets.create_file_upload_presigned_urls(\*\*params) -> EvaluationDatasetCreateFileUploadPresignedURLsResponse +- client.agents.evaluation_datasets.create(\*\*params) -> EvaluationDatasetCreateResponse +- client.agents.evaluation_datasets.create_file_upload_presigned_urls(\*\*params) -> EvaluationDatasetCreateFileUploadPresignedURLsResponse ## Functions Types: ```python -from do_gradientai.types.agents import ( +from gradientai.types.agents import ( FunctionCreateResponse, FunctionUpdateResponse, FunctionDeleteResponse, @@ -249,43 +249,43 @@ from do_gradientai.types.agents import ( Methods: -- client.agents.functions.create(path_agent_uuid, \*\*params) -> FunctionCreateResponse -- client.agents.functions.update(path_function_uuid, \*, path_agent_uuid, \*\*params) -> FunctionUpdateResponse -- client.agents.functions.delete(function_uuid, \*, agent_uuid) -> FunctionDeleteResponse +- client.agents.functions.create(path_agent_uuid, \*\*params) -> FunctionCreateResponse +- client.agents.functions.update(path_function_uuid, \*, path_agent_uuid, \*\*params) -> FunctionUpdateResponse +- client.agents.functions.delete(function_uuid, \*, agent_uuid) -> FunctionDeleteResponse ## Versions Types: ```python -from do_gradientai.types.agents import VersionUpdateResponse, VersionListResponse +from gradientai.types.agents import VersionUpdateResponse, VersionListResponse ``` Methods: -- client.agents.versions.update(path_uuid, \*\*params) -> VersionUpdateResponse -- client.agents.versions.list(uuid, \*\*params) -> VersionListResponse +- client.agents.versions.update(path_uuid, \*\*params) -> VersionUpdateResponse +- client.agents.versions.list(uuid, \*\*params) -> VersionListResponse ## KnowledgeBases Types: ```python -from do_gradientai.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse +from gradientai.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse ``` Methods: -- client.agents.knowledge_bases.attach(agent_uuid) -> APILinkKnowledgeBaseOutput -- client.agents.knowledge_bases.attach_single(knowledge_base_uuid, \*, agent_uuid) -> APILinkKnowledgeBaseOutput -- client.agents.knowledge_bases.detach(knowledge_base_uuid, \*, agent_uuid) -> KnowledgeBaseDetachResponse +- client.agents.knowledge_bases.attach(agent_uuid) -> APILinkKnowledgeBaseOutput +- client.agents.knowledge_bases.attach_single(knowledge_base_uuid, \*, agent_uuid) -> APILinkKnowledgeBaseOutput +- client.agents.knowledge_bases.detach(knowledge_base_uuid, \*, agent_uuid) -> KnowledgeBaseDetachResponse ## Routes Types: ```python -from do_gradientai.types.agents import ( +from gradientai.types.agents import ( RouteUpdateResponse, RouteDeleteResponse, RouteAddResponse, @@ -295,10 +295,10 @@ from do_gradientai.types.agents import ( Methods: -- client.agents.routes.update(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> RouteUpdateResponse -- client.agents.routes.delete(child_agent_uuid, \*, parent_agent_uuid) -> RouteDeleteResponse -- client.agents.routes.add(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> RouteAddResponse -- client.agents.routes.view(uuid) -> RouteViewResponse +- client.agents.routes.update(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> RouteUpdateResponse +- client.agents.routes.delete(child_agent_uuid, \*, parent_agent_uuid) -> RouteDeleteResponse +- client.agents.routes.add(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> RouteAddResponse +- client.agents.routes.view(uuid) -> RouteViewResponse # Chat @@ -307,31 +307,31 @@ Methods: Types: ```python -from do_gradientai.types.chat import CompletionCreateResponse +from gradientai.types.chat import CompletionCreateResponse ``` Methods: -- client.chat.completions.create(\*\*params) -> CompletionCreateResponse +- client.chat.completions.create(\*\*params) -> CompletionCreateResponse # Regions Types: ```python -from do_gradientai.types import RegionListResponse +from gradientai.types import RegionListResponse ``` Methods: -- client.regions.list(\*\*params) -> RegionListResponse +- client.regions.list(\*\*params) -> RegionListResponse # KnowledgeBases Types: ```python -from do_gradientai.types import ( +from gradientai.types import ( APIKnowledgeBase, KnowledgeBaseCreateResponse, KnowledgeBaseRetrieveResponse, @@ -343,18 +343,18 @@ from do_gradientai.types import ( Methods: -- client.knowledge_bases.create(\*\*params) -> KnowledgeBaseCreateResponse -- client.knowledge_bases.retrieve(uuid) -> KnowledgeBaseRetrieveResponse -- client.knowledge_bases.update(path_uuid, \*\*params) -> KnowledgeBaseUpdateResponse -- client.knowledge_bases.list(\*\*params) -> KnowledgeBaseListResponse -- client.knowledge_bases.delete(uuid) -> KnowledgeBaseDeleteResponse +- client.knowledge_bases.create(\*\*params) -> KnowledgeBaseCreateResponse +- client.knowledge_bases.retrieve(uuid) -> KnowledgeBaseRetrieveResponse +- client.knowledge_bases.update(path_uuid, \*\*params) -> KnowledgeBaseUpdateResponse +- client.knowledge_bases.list(\*\*params) -> KnowledgeBaseListResponse +- client.knowledge_bases.delete(uuid) -> KnowledgeBaseDeleteResponse ## DataSources Types: ```python -from do_gradientai.types.knowledge_bases import ( +from gradientai.types.knowledge_bases import ( APIFileUploadDataSource, APIKnowledgeBaseDataSource, APISpacesDataSource, @@ -368,16 +368,16 @@ from do_gradientai.types.knowledge_bases import ( Methods: -- client.knowledge_bases.data_sources.create(path_knowledge_base_uuid, \*\*params) -> DataSourceCreateResponse -- client.knowledge_bases.data_sources.list(knowledge_base_uuid, \*\*params) -> DataSourceListResponse -- client.knowledge_bases.data_sources.delete(data_source_uuid, \*, knowledge_base_uuid) -> DataSourceDeleteResponse +- client.knowledge_bases.data_sources.create(path_knowledge_base_uuid, \*\*params) -> DataSourceCreateResponse +- client.knowledge_bases.data_sources.list(knowledge_base_uuid, \*\*params) -> DataSourceListResponse +- client.knowledge_bases.data_sources.delete(data_source_uuid, \*, knowledge_base_uuid) -> DataSourceDeleteResponse ## IndexingJobs Types: ```python -from do_gradientai.types.knowledge_bases import ( +from gradientai.types.knowledge_bases import ( APIIndexedDataSource, APIIndexingJob, IndexingJobCreateResponse, @@ -390,11 +390,11 @@ from do_gradientai.types.knowledge_bases import ( Methods: -- client.knowledge_bases.indexing_jobs.create(\*\*params) -> IndexingJobCreateResponse -- client.knowledge_bases.indexing_jobs.retrieve(uuid) -> IndexingJobRetrieveResponse -- client.knowledge_bases.indexing_jobs.list(\*\*params) -> IndexingJobListResponse -- client.knowledge_bases.indexing_jobs.retrieve_data_sources(indexing_job_uuid) -> IndexingJobRetrieveDataSourcesResponse -- client.knowledge_bases.indexing_jobs.update_cancel(path_uuid, \*\*params) -> IndexingJobUpdateCancelResponse +- client.knowledge_bases.indexing_jobs.create(\*\*params) -> IndexingJobCreateResponse +- client.knowledge_bases.indexing_jobs.retrieve(uuid) -> IndexingJobRetrieveResponse +- client.knowledge_bases.indexing_jobs.list(\*\*params) -> IndexingJobListResponse +- client.knowledge_bases.indexing_jobs.retrieve_data_sources(indexing_job_uuid) -> IndexingJobRetrieveDataSourcesResponse +- client.knowledge_bases.indexing_jobs.update_cancel(path_uuid, \*\*params) -> IndexingJobUpdateCancelResponse # Inference @@ -403,7 +403,7 @@ Methods: Types: ```python -from do_gradientai.types.inference import ( +from gradientai.types.inference import ( APIModelAPIKeyInfo, APIKeyCreateResponse, APIKeyUpdateResponse, @@ -415,18 +415,18 @@ from do_gradientai.types.inference import ( Methods: -- client.inference.api_keys.create(\*\*params) -> APIKeyCreateResponse -- client.inference.api_keys.update(path_api_key_uuid, \*\*params) -> APIKeyUpdateResponse -- client.inference.api_keys.list(\*\*params) -> APIKeyListResponse -- client.inference.api_keys.delete(api_key_uuid) -> APIKeyDeleteResponse -- client.inference.api_keys.update_regenerate(api_key_uuid) -> APIKeyUpdateRegenerateResponse +- client.inference.api_keys.create(\*\*params) -> APIKeyCreateResponse +- client.inference.api_keys.update(path_api_key_uuid, \*\*params) -> APIKeyUpdateResponse +- client.inference.api_keys.list(\*\*params) -> APIKeyListResponse +- client.inference.api_keys.delete(api_key_uuid) -> APIKeyDeleteResponse +- client.inference.api_keys.update_regenerate(api_key_uuid) -> APIKeyUpdateRegenerateResponse # Models Types: ```python -from do_gradientai.types import ( +from gradientai.types import ( APIAgreement, APIModel, APIModelVersion, @@ -437,8 +437,8 @@ from do_gradientai.types import ( Methods: -- client.models.retrieve(model) -> ModelRetrieveResponse -- client.models.list() -> ModelListResponse +- client.models.retrieve(model) -> ModelRetrieveResponse +- client.models.list() -> ModelListResponse ## Providers @@ -447,7 +447,7 @@ Methods: Types: ```python -from do_gradientai.types.models.providers import ( +from gradientai.types.models.providers import ( AnthropicCreateResponse, AnthropicRetrieveResponse, AnthropicUpdateResponse, @@ -459,19 +459,19 @@ from do_gradientai.types.models.providers import ( Methods: -- client.models.providers.anthropic.create(\*\*params) -> AnthropicCreateResponse -- client.models.providers.anthropic.retrieve(api_key_uuid) -> AnthropicRetrieveResponse -- client.models.providers.anthropic.update(path_api_key_uuid, \*\*params) -> AnthropicUpdateResponse -- client.models.providers.anthropic.list(\*\*params) -> AnthropicListResponse -- client.models.providers.anthropic.delete(api_key_uuid) -> AnthropicDeleteResponse -- client.models.providers.anthropic.list_agents(uuid, \*\*params) -> AnthropicListAgentsResponse +- client.models.providers.anthropic.create(\*\*params) -> AnthropicCreateResponse +- client.models.providers.anthropic.retrieve(api_key_uuid) -> AnthropicRetrieveResponse +- client.models.providers.anthropic.update(path_api_key_uuid, \*\*params) -> AnthropicUpdateResponse +- client.models.providers.anthropic.list(\*\*params) -> AnthropicListResponse +- client.models.providers.anthropic.delete(api_key_uuid) -> AnthropicDeleteResponse +- client.models.providers.anthropic.list_agents(uuid, \*\*params) -> AnthropicListAgentsResponse ### OpenAI Types: ```python -from do_gradientai.types.models.providers import ( +from gradientai.types.models.providers import ( OpenAICreateResponse, OpenAIRetrieveResponse, OpenAIUpdateResponse, @@ -483,19 +483,19 @@ from do_gradientai.types.models.providers import ( Methods: -- client.models.providers.openai.create(\*\*params) -> OpenAICreateResponse -- client.models.providers.openai.retrieve(api_key_uuid) -> OpenAIRetrieveResponse -- client.models.providers.openai.update(path_api_key_uuid, \*\*params) -> OpenAIUpdateResponse -- client.models.providers.openai.list(\*\*params) -> OpenAIListResponse -- client.models.providers.openai.delete(api_key_uuid) -> OpenAIDeleteResponse -- client.models.providers.openai.retrieve_agents(uuid, \*\*params) -> OpenAIRetrieveAgentsResponse +- client.models.providers.openai.create(\*\*params) -> OpenAICreateResponse +- client.models.providers.openai.retrieve(api_key_uuid) -> OpenAIRetrieveResponse +- client.models.providers.openai.update(path_api_key_uuid, \*\*params) -> OpenAIUpdateResponse +- client.models.providers.openai.list(\*\*params) -> OpenAIListResponse +- client.models.providers.openai.delete(api_key_uuid) -> OpenAIDeleteResponse +- client.models.providers.openai.retrieve_agents(uuid, \*\*params) -> OpenAIRetrieveAgentsResponse # Droplets Types: ```python -from do_gradientai.types import ( +from gradientai.types import ( DropletBackupPolicy, DropletCreateResponse, DropletRetrieveResponse, @@ -509,22 +509,22 @@ from do_gradientai.types import ( Methods: -- client.droplets.create(\*\*params) -> DropletCreateResponse -- client.droplets.retrieve(droplet_id) -> DropletRetrieveResponse -- client.droplets.list(\*\*params) -> DropletListResponse -- client.droplets.delete(droplet_id) -> None -- client.droplets.delete_by_tag(\*\*params) -> None -- client.droplets.list_firewalls(droplet_id, \*\*params) -> DropletListFirewallsResponse -- client.droplets.list_kernels(droplet_id, \*\*params) -> DropletListKernelsResponse -- client.droplets.list_neighbors(droplet_id) -> DropletListNeighborsResponse -- client.droplets.list_snapshots(droplet_id, \*\*params) -> DropletListSnapshotsResponse +- client.droplets.create(\*\*params) -> DropletCreateResponse +- client.droplets.retrieve(droplet_id) -> DropletRetrieveResponse +- client.droplets.list(\*\*params) -> DropletListResponse +- client.droplets.delete(droplet_id) -> None +- client.droplets.delete_by_tag(\*\*params) -> None +- client.droplets.list_firewalls(droplet_id, \*\*params) -> DropletListFirewallsResponse +- client.droplets.list_kernels(droplet_id, \*\*params) -> DropletListKernelsResponse +- client.droplets.list_neighbors(droplet_id) -> DropletListNeighborsResponse +- client.droplets.list_snapshots(droplet_id, \*\*params) -> DropletListSnapshotsResponse ## Backups Types: ```python -from do_gradientai.types.droplets import ( +from gradientai.types.droplets import ( BackupListResponse, BackupListPoliciesResponse, BackupListSupportedPoliciesResponse, @@ -534,17 +534,17 @@ from do_gradientai.types.droplets import ( Methods: -- client.droplets.backups.list(droplet_id, \*\*params) -> BackupListResponse -- client.droplets.backups.list_policies(\*\*params) -> BackupListPoliciesResponse -- client.droplets.backups.list_supported_policies() -> BackupListSupportedPoliciesResponse -- client.droplets.backups.retrieve_policy(droplet_id) -> BackupRetrievePolicyResponse +- client.droplets.backups.list(droplet_id, \*\*params) -> BackupListResponse +- client.droplets.backups.list_policies(\*\*params) -> BackupListPoliciesResponse +- client.droplets.backups.list_supported_policies() -> BackupListSupportedPoliciesResponse +- client.droplets.backups.retrieve_policy(droplet_id) -> BackupRetrievePolicyResponse ## Actions Types: ```python -from do_gradientai.types.droplets import ( +from gradientai.types.droplets import ( ActionRetrieveResponse, ActionListResponse, ActionBulkInitiateResponse, @@ -554,17 +554,17 @@ from do_gradientai.types.droplets import ( Methods: -- client.droplets.actions.retrieve(action_id, \*, droplet_id) -> ActionRetrieveResponse -- client.droplets.actions.list(droplet_id, \*\*params) -> ActionListResponse -- client.droplets.actions.bulk_initiate(\*\*params) -> ActionBulkInitiateResponse -- client.droplets.actions.initiate(droplet_id, \*\*params) -> ActionInitiateResponse +- client.droplets.actions.retrieve(action_id, \*, droplet_id) -> ActionRetrieveResponse +- client.droplets.actions.list(droplet_id, \*\*params) -> ActionListResponse +- client.droplets.actions.bulk_initiate(\*\*params) -> ActionBulkInitiateResponse +- client.droplets.actions.initiate(droplet_id, \*\*params) -> ActionInitiateResponse ## DestroyWithAssociatedResources Types: ```python -from do_gradientai.types.droplets import ( +from gradientai.types.droplets import ( AssociatedResource, DestroyedAssociatedResource, DestroyWithAssociatedResourceListResponse, @@ -574,18 +574,18 @@ from do_gradientai.types.droplets import ( Methods: -- client.droplets.destroy_with_associated_resources.list(droplet_id) -> DestroyWithAssociatedResourceListResponse -- client.droplets.destroy_with_associated_resources.check_status(droplet_id) -> DestroyWithAssociatedResourceCheckStatusResponse -- client.droplets.destroy_with_associated_resources.delete_dangerous(droplet_id) -> None -- client.droplets.destroy_with_associated_resources.delete_selective(droplet_id, \*\*params) -> None -- client.droplets.destroy_with_associated_resources.retry(droplet_id) -> None +- client.droplets.destroy_with_associated_resources.list(droplet_id) -> DestroyWithAssociatedResourceListResponse +- client.droplets.destroy_with_associated_resources.check_status(droplet_id) -> DestroyWithAssociatedResourceCheckStatusResponse +- client.droplets.destroy_with_associated_resources.delete_dangerous(droplet_id) -> None +- client.droplets.destroy_with_associated_resources.delete_selective(droplet_id, \*\*params) -> None +- client.droplets.destroy_with_associated_resources.retry(droplet_id) -> None ## Autoscale Types: ```python -from do_gradientai.types.droplets import ( +from gradientai.types.droplets import ( AutoscalePool, AutoscalePoolDropletTemplate, AutoscalePoolDynamicConfig, @@ -602,21 +602,21 @@ from do_gradientai.types.droplets import ( Methods: -- client.droplets.autoscale.create(\*\*params) -> AutoscaleCreateResponse -- client.droplets.autoscale.retrieve(autoscale_pool_id) -> AutoscaleRetrieveResponse -- client.droplets.autoscale.update(autoscale_pool_id, \*\*params) -> AutoscaleUpdateResponse -- client.droplets.autoscale.list(\*\*params) -> AutoscaleListResponse -- client.droplets.autoscale.delete(autoscale_pool_id) -> None -- client.droplets.autoscale.delete_dangerous(autoscale_pool_id) -> None -- client.droplets.autoscale.list_history(autoscale_pool_id, \*\*params) -> AutoscaleListHistoryResponse -- client.droplets.autoscale.list_members(autoscale_pool_id, \*\*params) -> AutoscaleListMembersResponse +- client.droplets.autoscale.create(\*\*params) -> AutoscaleCreateResponse +- client.droplets.autoscale.retrieve(autoscale_pool_id) -> AutoscaleRetrieveResponse +- client.droplets.autoscale.update(autoscale_pool_id, \*\*params) -> AutoscaleUpdateResponse +- client.droplets.autoscale.list(\*\*params) -> AutoscaleListResponse +- client.droplets.autoscale.delete(autoscale_pool_id) -> None +- client.droplets.autoscale.delete_dangerous(autoscale_pool_id) -> None +- client.droplets.autoscale.list_history(autoscale_pool_id, \*\*params) -> AutoscaleListHistoryResponse +- client.droplets.autoscale.list_members(autoscale_pool_id, \*\*params) -> AutoscaleListMembersResponse # Firewalls Types: ```python -from do_gradientai.types import ( +from gradientai.types import ( Firewall, FirewallCreateResponse, FirewallRetrieveResponse, @@ -627,39 +627,39 @@ from do_gradientai.types import ( Methods: -- client.firewalls.create(\*\*params) -> FirewallCreateResponse -- client.firewalls.retrieve(firewall_id) -> FirewallRetrieveResponse -- client.firewalls.update(firewall_id, \*\*params) -> FirewallUpdateResponse -- client.firewalls.list(\*\*params) -> FirewallListResponse -- client.firewalls.delete(firewall_id) -> None +- client.firewalls.create(\*\*params) -> FirewallCreateResponse +- client.firewalls.retrieve(firewall_id) -> FirewallRetrieveResponse +- client.firewalls.update(firewall_id, \*\*params) -> FirewallUpdateResponse +- client.firewalls.list(\*\*params) -> FirewallListResponse +- client.firewalls.delete(firewall_id) -> None ## Droplets Methods: -- client.firewalls.droplets.add(firewall_id, \*\*params) -> None -- client.firewalls.droplets.remove(firewall_id, \*\*params) -> None +- client.firewalls.droplets.add(firewall_id, \*\*params) -> None +- client.firewalls.droplets.remove(firewall_id, \*\*params) -> None ## Tags Methods: -- client.firewalls.tags.add(firewall_id, \*\*params) -> None -- client.firewalls.tags.remove(firewall_id, \*\*params) -> None +- client.firewalls.tags.add(firewall_id, \*\*params) -> None +- client.firewalls.tags.remove(firewall_id, \*\*params) -> None ## Rules Methods: -- client.firewalls.rules.add(firewall_id, \*\*params) -> None -- client.firewalls.rules.remove(firewall_id, \*\*params) -> None +- client.firewalls.rules.add(firewall_id, \*\*params) -> None +- client.firewalls.rules.remove(firewall_id, \*\*params) -> None # FloatingIPs Types: ```python -from do_gradientai.types import ( +from gradientai.types import ( FloatingIP, FloatingIPCreateResponse, FloatingIPRetrieveResponse, @@ -669,17 +669,17 @@ from do_gradientai.types import ( Methods: -- client.floating_ips.create(\*\*params) -> FloatingIPCreateResponse -- client.floating_ips.retrieve(floating_ip) -> FloatingIPRetrieveResponse -- client.floating_ips.list(\*\*params) -> FloatingIPListResponse -- client.floating_ips.delete(floating_ip) -> None +- client.floating_ips.create(\*\*params) -> FloatingIPCreateResponse +- client.floating_ips.retrieve(floating_ip) -> FloatingIPRetrieveResponse +- client.floating_ips.list(\*\*params) -> FloatingIPListResponse +- client.floating_ips.delete(floating_ip) -> None ## Actions Types: ```python -from do_gradientai.types.floating_ips import ( +from gradientai.types.floating_ips import ( ActionCreateResponse, ActionRetrieveResponse, ActionListResponse, @@ -688,16 +688,16 @@ from do_gradientai.types.floating_ips import ( Methods: -- client.floating_ips.actions.create(floating_ip, \*\*params) -> ActionCreateResponse -- client.floating_ips.actions.retrieve(action_id, \*, floating_ip) -> ActionRetrieveResponse -- client.floating_ips.actions.list(floating_ip) -> ActionListResponse +- client.floating_ips.actions.create(floating_ip, \*\*params) -> ActionCreateResponse +- client.floating_ips.actions.retrieve(action_id, \*, floating_ip) -> ActionRetrieveResponse +- client.floating_ips.actions.list(floating_ip) -> ActionListResponse # Images Types: ```python -from do_gradientai.types import ( +from gradientai.types import ( ImageCreateResponse, ImageRetrieveResponse, ImageUpdateResponse, @@ -707,32 +707,32 @@ from do_gradientai.types import ( Methods: -- client.images.create(\*\*params) -> ImageCreateResponse -- client.images.retrieve(image_id) -> ImageRetrieveResponse -- client.images.update(image_id, \*\*params) -> ImageUpdateResponse -- client.images.list(\*\*params) -> ImageListResponse -- client.images.delete(image_id) -> None +- client.images.create(\*\*params) -> ImageCreateResponse +- client.images.retrieve(image_id) -> ImageRetrieveResponse +- client.images.update(image_id, \*\*params) -> ImageUpdateResponse +- client.images.list(\*\*params) -> ImageListResponse +- client.images.delete(image_id) -> None ## Actions Types: ```python -from do_gradientai.types.images import ActionListResponse +from gradientai.types.images import ActionListResponse ``` Methods: -- client.images.actions.create(image_id, \*\*params) -> Action -- client.images.actions.retrieve(action_id, \*, image_id) -> Action -- client.images.actions.list(image_id) -> ActionListResponse +- client.images.actions.create(image_id, \*\*params) -> Action +- client.images.actions.retrieve(action_id, \*, image_id) -> Action +- client.images.actions.list(image_id) -> ActionListResponse # LoadBalancers Types: ```python -from do_gradientai.types import ( +from gradientai.types import ( Domains, ForwardingRule, GlbSettings, @@ -749,75 +749,75 @@ from do_gradientai.types import ( Methods: -- client.load_balancers.create(\*\*params) -> LoadBalancerCreateResponse -- client.load_balancers.retrieve(lb_id) -> LoadBalancerRetrieveResponse -- client.load_balancers.update(lb_id, \*\*params) -> LoadBalancerUpdateResponse -- client.load_balancers.list(\*\*params) -> LoadBalancerListResponse -- client.load_balancers.delete(lb_id) -> None -- client.load_balancers.delete_cache(lb_id) -> None +- client.load_balancers.create(\*\*params) -> LoadBalancerCreateResponse +- client.load_balancers.retrieve(lb_id) -> LoadBalancerRetrieveResponse +- client.load_balancers.update(lb_id, \*\*params) -> LoadBalancerUpdateResponse +- client.load_balancers.list(\*\*params) -> LoadBalancerListResponse +- client.load_balancers.delete(lb_id) -> None +- client.load_balancers.delete_cache(lb_id) -> None ## Droplets Methods: -- client.load_balancers.droplets.add(lb_id, \*\*params) -> None -- client.load_balancers.droplets.remove(lb_id, \*\*params) -> None +- client.load_balancers.droplets.add(lb_id, \*\*params) -> None +- client.load_balancers.droplets.remove(lb_id, \*\*params) -> None ## ForwardingRules Methods: -- client.load_balancers.forwarding_rules.add(lb_id, \*\*params) -> None -- client.load_balancers.forwarding_rules.remove(lb_id, \*\*params) -> None +- client.load_balancers.forwarding_rules.add(lb_id, \*\*params) -> None +- client.load_balancers.forwarding_rules.remove(lb_id, \*\*params) -> None # Sizes Types: ```python -from do_gradientai.types import SizeListResponse +from gradientai.types import SizeListResponse ``` Methods: -- client.sizes.list(\*\*params) -> SizeListResponse +- client.sizes.list(\*\*params) -> SizeListResponse # Snapshots Types: ```python -from do_gradientai.types import SnapshotRetrieveResponse, SnapshotListResponse +from gradientai.types import SnapshotRetrieveResponse, SnapshotListResponse ``` Methods: -- client.snapshots.retrieve(snapshot_id) -> SnapshotRetrieveResponse -- client.snapshots.list(\*\*params) -> SnapshotListResponse -- client.snapshots.delete(snapshot_id) -> None +- client.snapshots.retrieve(snapshot_id) -> SnapshotRetrieveResponse +- client.snapshots.list(\*\*params) -> SnapshotListResponse +- client.snapshots.delete(snapshot_id) -> None # Volumes Types: ```python -from do_gradientai.types import VolumeCreateResponse, VolumeRetrieveResponse, VolumeListResponse +from gradientai.types import VolumeCreateResponse, VolumeRetrieveResponse, VolumeListResponse ``` Methods: -- client.volumes.create(\*\*params) -> VolumeCreateResponse -- client.volumes.retrieve(volume_id) -> VolumeRetrieveResponse -- client.volumes.list(\*\*params) -> VolumeListResponse -- client.volumes.delete(volume_id) -> None -- client.volumes.delete_by_name(\*\*params) -> None +- client.volumes.create(\*\*params) -> VolumeCreateResponse +- client.volumes.retrieve(volume_id) -> VolumeRetrieveResponse +- client.volumes.list(\*\*params) -> VolumeListResponse +- client.volumes.delete(volume_id) -> None +- client.volumes.delete_by_name(\*\*params) -> None ## Actions Types: ```python -from do_gradientai.types.volumes import ( +from gradientai.types.volumes import ( VolumeAction, ActionRetrieveResponse, ActionListResponse, @@ -828,17 +828,17 @@ from do_gradientai.types.volumes import ( Methods: -- client.volumes.actions.retrieve(action_id, \*, volume_id, \*\*params) -> ActionRetrieveResponse -- client.volumes.actions.list(volume_id, \*\*params) -> ActionListResponse -- client.volumes.actions.initiate_by_id(volume_id, \*\*params) -> ActionInitiateByIDResponse -- client.volumes.actions.initiate_by_name(\*\*params) -> ActionInitiateByNameResponse +- client.volumes.actions.retrieve(action_id, \*, volume_id, \*\*params) -> ActionRetrieveResponse +- client.volumes.actions.list(volume_id, \*\*params) -> ActionListResponse +- client.volumes.actions.initiate_by_id(volume_id, \*\*params) -> ActionInitiateByIDResponse +- client.volumes.actions.initiate_by_name(\*\*params) -> ActionInitiateByNameResponse ## Snapshots Types: ```python -from do_gradientai.types.volumes import ( +from gradientai.types.volumes import ( SnapshotCreateResponse, SnapshotRetrieveResponse, SnapshotListResponse, @@ -847,29 +847,29 @@ from do_gradientai.types.volumes import ( Methods: -- client.volumes.snapshots.create(volume_id, \*\*params) -> SnapshotCreateResponse -- client.volumes.snapshots.retrieve(snapshot_id) -> SnapshotRetrieveResponse -- client.volumes.snapshots.list(volume_id, \*\*params) -> SnapshotListResponse -- client.volumes.snapshots.delete(snapshot_id) -> None +- client.volumes.snapshots.create(volume_id, \*\*params) -> SnapshotCreateResponse +- client.volumes.snapshots.retrieve(snapshot_id) -> SnapshotRetrieveResponse +- client.volumes.snapshots.list(volume_id, \*\*params) -> SnapshotListResponse +- client.volumes.snapshots.delete(snapshot_id) -> None # Account Types: ```python -from do_gradientai.types import AccountRetrieveResponse +from gradientai.types import AccountRetrieveResponse ``` Methods: -- client.account.retrieve() -> AccountRetrieveResponse +- client.account.retrieve() -> AccountRetrieveResponse ## Keys Types: ```python -from do_gradientai.types.account import ( +from gradientai.types.account import ( KeyCreateResponse, KeyRetrieveResponse, KeyUpdateResponse, @@ -879,8 +879,8 @@ from do_gradientai.types.account import ( Methods: -- client.account.keys.create(\*\*params) -> KeyCreateResponse -- client.account.keys.retrieve(ssh_key_identifier) -> KeyRetrieveResponse -- client.account.keys.update(ssh_key_identifier, \*\*params) -> KeyUpdateResponse -- client.account.keys.list(\*\*params) -> KeyListResponse -- client.account.keys.delete(ssh_key_identifier) -> None +- client.account.keys.create(\*\*params) -> KeyCreateResponse +- client.account.keys.retrieve(ssh_key_identifier) -> KeyRetrieveResponse +- client.account.keys.update(ssh_key_identifier, \*\*params) -> KeyUpdateResponse +- client.account.keys.list(\*\*params) -> KeyListResponse +- client.account.keys.delete(ssh_key_identifier) -> None diff --git a/mypy.ini b/mypy.ini index 82b0c891..748d8234 100644 --- a/mypy.ini +++ b/mypy.ini @@ -8,7 +8,7 @@ show_error_codes = True # # We also exclude our `tests` as mypy doesn't always infer # types correctly and Pyright will still catch any type errors. -exclude = ^(src/do_gradientai/_files\.py|_dev/.*\.py|tests/.*)$ +exclude = ^(src/gradientai/_files\.py|_dev/.*\.py|tests/.*)$ strict_equality = True implicit_reexport = True diff --git a/pyproject.toml b/pyproject.toml index f5e5770a..2cd02155 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -79,14 +79,14 @@ format = { chain = [ "check:ruff" = "ruff check ." "fix:ruff" = "ruff check --fix ." -"check:importable" = "python -c 'import do_gradientai'" +"check:importable" = "python -c 'import gradientai'" typecheck = { chain = [ "typecheck:pyright", "typecheck:mypy" ]} "typecheck:pyright" = "pyright" -"typecheck:verify-types" = "pyright --verifytypes do_gradientai --ignoreexternal" +"typecheck:verify-types" = "pyright --verifytypes gradientai --ignoreexternal" "typecheck:mypy" = "mypy ." [build-system] @@ -99,7 +99,7 @@ include = [ ] [tool.hatch.build.targets.wheel] -packages = ["src/do_gradientai"] +packages = ["src/gradientai"] [tool.hatch.build.targets.sdist] # Basically everything except hidden files/directories (such as .github, .devcontainers, .python-version, etc) @@ -202,7 +202,7 @@ length-sort = true length-sort-straight = true combine-as-imports = true extra-standard-library = ["typing_extensions"] -known-first-party = ["do_gradientai", "tests"] +known-first-party = ["gradientai", "tests"] [tool.ruff.lint.per-file-ignores] "bin/**.py" = ["T201", "T203"] diff --git a/release-please-config.json b/release-please-config.json index a320c1a8..2ff9a58c 100644 --- a/release-please-config.json +++ b/release-please-config.json @@ -61,6 +61,6 @@ ], "release-type": "python", "extra-files": [ - "src/do_gradientai/_version.py" + "src/gradientai/_version.py" ] } \ No newline at end of file diff --git a/scripts/lint b/scripts/lint index e46e909b..37b38f6f 100755 --- a/scripts/lint +++ b/scripts/lint @@ -8,4 +8,4 @@ echo "==> Running lints" rye run lint echo "==> Making sure it imports" -rye run python -c 'import do_gradientai' +rye run python -c 'import gradientai' diff --git a/src/do_gradientai/__init__.py b/src/gradientai/__init__.py similarity index 95% rename from src/do_gradientai/__init__.py rename to src/gradientai/__init__.py index 41b943b2..3316fe47 100644 --- a/src/do_gradientai/__init__.py +++ b/src/gradientai/__init__.py @@ -89,12 +89,12 @@ # Update the __module__ attribute for exported symbols so that # error messages point to this module instead of the module # it was originally defined in, e.g. -# do_gradientai._exceptions.NotFoundError -> do_gradientai.NotFoundError +# gradientai._exceptions.NotFoundError -> gradientai.NotFoundError __locals = locals() for __name in __all__: if not __name.startswith("__"): try: - __locals[__name].__module__ = "do_gradientai" + __locals[__name].__module__ = "gradientai" except (TypeError, AttributeError): # Some of our exported symbols are builtins which we can't set attributes for. pass diff --git a/src/do_gradientai/_base_client.py b/src/gradientai/_base_client.py similarity index 99% rename from src/do_gradientai/_base_client.py rename to src/gradientai/_base_client.py index 326c662c..379c27d1 100644 --- a/src/do_gradientai/_base_client.py +++ b/src/gradientai/_base_client.py @@ -389,7 +389,7 @@ def __init__( if max_retries is None: # pyright: ignore[reportUnnecessaryComparison] raise TypeError( - "max_retries cannot be None. If you want to disable retries, pass `0`; if you want unlimited retries, pass `math.inf` or a very high number; if you want the default behavior, pass `do_gradientai.DEFAULT_MAX_RETRIES`" + "max_retries cannot be None. If you want to disable retries, pass `0`; if you want unlimited retries, pass `math.inf` or a very high number; if you want the default behavior, pass `gradientai.DEFAULT_MAX_RETRIES`" ) def _enforce_trailing_slash(self, url: URL) -> URL: diff --git a/src/do_gradientai/_client.py b/src/gradientai/_client.py similarity index 100% rename from src/do_gradientai/_client.py rename to src/gradientai/_client.py diff --git a/src/do_gradientai/_compat.py b/src/gradientai/_compat.py similarity index 100% rename from src/do_gradientai/_compat.py rename to src/gradientai/_compat.py diff --git a/src/do_gradientai/_constants.py b/src/gradientai/_constants.py similarity index 100% rename from src/do_gradientai/_constants.py rename to src/gradientai/_constants.py diff --git a/src/do_gradientai/_exceptions.py b/src/gradientai/_exceptions.py similarity index 100% rename from src/do_gradientai/_exceptions.py rename to src/gradientai/_exceptions.py diff --git a/src/do_gradientai/_files.py b/src/gradientai/_files.py similarity index 100% rename from src/do_gradientai/_files.py rename to src/gradientai/_files.py diff --git a/src/do_gradientai/_models.py b/src/gradientai/_models.py similarity index 100% rename from src/do_gradientai/_models.py rename to src/gradientai/_models.py diff --git a/src/do_gradientai/_qs.py b/src/gradientai/_qs.py similarity index 100% rename from src/do_gradientai/_qs.py rename to src/gradientai/_qs.py diff --git a/src/do_gradientai/_resource.py b/src/gradientai/_resource.py similarity index 100% rename from src/do_gradientai/_resource.py rename to src/gradientai/_resource.py diff --git a/src/do_gradientai/_response.py b/src/gradientai/_response.py similarity index 99% rename from src/do_gradientai/_response.py rename to src/gradientai/_response.py index 8ca43971..2037e4ca 100644 --- a/src/do_gradientai/_response.py +++ b/src/gradientai/_response.py @@ -218,7 +218,7 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T: and issubclass(origin, pydantic.BaseModel) ): raise TypeError( - "Pydantic models must subclass our base model type, e.g. `from do_gradientai import BaseModel`" + "Pydantic models must subclass our base model type, e.g. `from gradientai import BaseModel`" ) if ( @@ -285,7 +285,7 @@ def parse(self, *, to: type[_T] | None = None) -> R | _T: the `to` argument, e.g. ```py - from do_gradientai import BaseModel + from gradientai import BaseModel class MyModel(BaseModel): @@ -387,7 +387,7 @@ async def parse(self, *, to: type[_T] | None = None) -> R | _T: the `to` argument, e.g. ```py - from do_gradientai import BaseModel + from gradientai import BaseModel class MyModel(BaseModel): @@ -558,7 +558,7 @@ async def stream_to_file( class MissingStreamClassError(TypeError): def __init__(self) -> None: super().__init__( - "The `stream` argument was set to `True` but the `stream_cls` argument was not given. See `do_gradientai._streaming` for reference", + "The `stream` argument was set to `True` but the `stream_cls` argument was not given. See `gradientai._streaming` for reference", ) diff --git a/src/do_gradientai/_streaming.py b/src/gradientai/_streaming.py similarity index 100% rename from src/do_gradientai/_streaming.py rename to src/gradientai/_streaming.py diff --git a/src/do_gradientai/_types.py b/src/gradientai/_types.py similarity index 99% rename from src/do_gradientai/_types.py rename to src/gradientai/_types.py index c356c700..1bac876d 100644 --- a/src/do_gradientai/_types.py +++ b/src/gradientai/_types.py @@ -81,7 +81,7 @@ # This unfortunately means that you will either have # to import this type and pass it explicitly: # -# from do_gradientai import NoneType +# from gradientai import NoneType # client.get('/foo', cast_to=NoneType) # # or build it yourself: diff --git a/src/do_gradientai/_utils/__init__.py b/src/gradientai/_utils/__init__.py similarity index 100% rename from src/do_gradientai/_utils/__init__.py rename to src/gradientai/_utils/__init__.py diff --git a/src/do_gradientai/_utils/_logs.py b/src/gradientai/_utils/_logs.py similarity index 75% rename from src/do_gradientai/_utils/_logs.py rename to src/gradientai/_utils/_logs.py index ac45b1a5..9047e5c8 100644 --- a/src/do_gradientai/_utils/_logs.py +++ b/src/gradientai/_utils/_logs.py @@ -1,12 +1,12 @@ import os import logging -logger: logging.Logger = logging.getLogger("do_gradientai") +logger: logging.Logger = logging.getLogger("gradientai") httpx_logger: logging.Logger = logging.getLogger("httpx") def _basic_config() -> None: - # e.g. [2023-10-05 14:12:26 - do_gradientai._base_client:818 - DEBUG] HTTP Request: POST http://127.0.0.1:4010/foo/bar "200 OK" + # e.g. [2023-10-05 14:12:26 - gradientai._base_client:818 - DEBUG] HTTP Request: POST http://127.0.0.1:4010/foo/bar "200 OK" logging.basicConfig( format="[%(asctime)s - %(name)s:%(lineno)d - %(levelname)s] %(message)s", datefmt="%Y-%m-%d %H:%M:%S", diff --git a/src/do_gradientai/_utils/_proxy.py b/src/gradientai/_utils/_proxy.py similarity index 100% rename from src/do_gradientai/_utils/_proxy.py rename to src/gradientai/_utils/_proxy.py diff --git a/src/do_gradientai/_utils/_reflection.py b/src/gradientai/_utils/_reflection.py similarity index 100% rename from src/do_gradientai/_utils/_reflection.py rename to src/gradientai/_utils/_reflection.py diff --git a/src/do_gradientai/_utils/_resources_proxy.py b/src/gradientai/_utils/_resources_proxy.py similarity index 50% rename from src/do_gradientai/_utils/_resources_proxy.py rename to src/gradientai/_utils/_resources_proxy.py index 03763c3b..b3bc4931 100644 --- a/src/do_gradientai/_utils/_resources_proxy.py +++ b/src/gradientai/_utils/_resources_proxy.py @@ -7,17 +7,17 @@ class ResourcesProxy(LazyProxy[Any]): - """A proxy for the `do_gradientai.resources` module. + """A proxy for the `gradientai.resources` module. - This is used so that we can lazily import `do_gradientai.resources` only when - needed *and* so that users can just import `do_gradientai` and reference `do_gradientai.resources` + This is used so that we can lazily import `gradientai.resources` only when + needed *and* so that users can just import `gradientai` and reference `gradientai.resources` """ @override def __load__(self) -> Any: import importlib - mod = importlib.import_module("do_gradientai.resources") + mod = importlib.import_module("gradientai.resources") return mod diff --git a/src/do_gradientai/_utils/_streams.py b/src/gradientai/_utils/_streams.py similarity index 100% rename from src/do_gradientai/_utils/_streams.py rename to src/gradientai/_utils/_streams.py diff --git a/src/do_gradientai/_utils/_sync.py b/src/gradientai/_utils/_sync.py similarity index 100% rename from src/do_gradientai/_utils/_sync.py rename to src/gradientai/_utils/_sync.py diff --git a/src/do_gradientai/_utils/_transform.py b/src/gradientai/_utils/_transform.py similarity index 100% rename from src/do_gradientai/_utils/_transform.py rename to src/gradientai/_utils/_transform.py diff --git a/src/do_gradientai/_utils/_typing.py b/src/gradientai/_utils/_typing.py similarity index 100% rename from src/do_gradientai/_utils/_typing.py rename to src/gradientai/_utils/_typing.py diff --git a/src/do_gradientai/_utils/_utils.py b/src/gradientai/_utils/_utils.py similarity index 100% rename from src/do_gradientai/_utils/_utils.py rename to src/gradientai/_utils/_utils.py diff --git a/src/do_gradientai/_version.py b/src/gradientai/_version.py similarity index 100% rename from src/do_gradientai/_version.py rename to src/gradientai/_version.py diff --git a/src/do_gradientai/py.typed b/src/gradientai/py.typed similarity index 100% rename from src/do_gradientai/py.typed rename to src/gradientai/py.typed diff --git a/src/do_gradientai/resources/__init__.py b/src/gradientai/resources/__init__.py similarity index 100% rename from src/do_gradientai/resources/__init__.py rename to src/gradientai/resources/__init__.py diff --git a/src/do_gradientai/resources/account/__init__.py b/src/gradientai/resources/account/__init__.py similarity index 100% rename from src/do_gradientai/resources/account/__init__.py rename to src/gradientai/resources/account/__init__.py diff --git a/src/do_gradientai/resources/account/account.py b/src/gradientai/resources/account/account.py similarity index 100% rename from src/do_gradientai/resources/account/account.py rename to src/gradientai/resources/account/account.py diff --git a/src/do_gradientai/resources/account/keys.py b/src/gradientai/resources/account/keys.py similarity index 100% rename from src/do_gradientai/resources/account/keys.py rename to src/gradientai/resources/account/keys.py diff --git a/src/do_gradientai/resources/agents/__init__.py b/src/gradientai/resources/agents/__init__.py similarity index 100% rename from src/do_gradientai/resources/agents/__init__.py rename to src/gradientai/resources/agents/__init__.py diff --git a/src/do_gradientai/resources/agents/agents.py b/src/gradientai/resources/agents/agents.py similarity index 100% rename from src/do_gradientai/resources/agents/agents.py rename to src/gradientai/resources/agents/agents.py diff --git a/src/do_gradientai/resources/agents/api_keys.py b/src/gradientai/resources/agents/api_keys.py similarity index 100% rename from src/do_gradientai/resources/agents/api_keys.py rename to src/gradientai/resources/agents/api_keys.py diff --git a/src/do_gradientai/resources/agents/chat/__init__.py b/src/gradientai/resources/agents/chat/__init__.py similarity index 100% rename from src/do_gradientai/resources/agents/chat/__init__.py rename to src/gradientai/resources/agents/chat/__init__.py diff --git a/src/do_gradientai/resources/agents/chat/chat.py b/src/gradientai/resources/agents/chat/chat.py similarity index 100% rename from src/do_gradientai/resources/agents/chat/chat.py rename to src/gradientai/resources/agents/chat/chat.py diff --git a/src/do_gradientai/resources/agents/chat/completions.py b/src/gradientai/resources/agents/chat/completions.py similarity index 100% rename from src/do_gradientai/resources/agents/chat/completions.py rename to src/gradientai/resources/agents/chat/completions.py diff --git a/src/do_gradientai/resources/agents/evaluation_datasets.py b/src/gradientai/resources/agents/evaluation_datasets.py similarity index 100% rename from src/do_gradientai/resources/agents/evaluation_datasets.py rename to src/gradientai/resources/agents/evaluation_datasets.py diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/__init__.py b/src/gradientai/resources/agents/evaluation_metrics/__init__.py similarity index 100% rename from src/do_gradientai/resources/agents/evaluation_metrics/__init__.py rename to src/gradientai/resources/agents/evaluation_metrics/__init__.py diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py b/src/gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py similarity index 100% rename from src/do_gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py rename to src/gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/models.py b/src/gradientai/resources/agents/evaluation_metrics/models.py similarity index 100% rename from src/do_gradientai/resources/agents/evaluation_metrics/models.py rename to src/gradientai/resources/agents/evaluation_metrics/models.py diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/workspaces/__init__.py b/src/gradientai/resources/agents/evaluation_metrics/workspaces/__init__.py similarity index 100% rename from src/do_gradientai/resources/agents/evaluation_metrics/workspaces/__init__.py rename to src/gradientai/resources/agents/evaluation_metrics/workspaces/__init__.py diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/workspaces/agents.py b/src/gradientai/resources/agents/evaluation_metrics/workspaces/agents.py similarity index 100% rename from src/do_gradientai/resources/agents/evaluation_metrics/workspaces/agents.py rename to src/gradientai/resources/agents/evaluation_metrics/workspaces/agents.py diff --git a/src/do_gradientai/resources/agents/evaluation_metrics/workspaces/workspaces.py b/src/gradientai/resources/agents/evaluation_metrics/workspaces/workspaces.py similarity index 100% rename from src/do_gradientai/resources/agents/evaluation_metrics/workspaces/workspaces.py rename to src/gradientai/resources/agents/evaluation_metrics/workspaces/workspaces.py diff --git a/src/do_gradientai/resources/agents/evaluation_runs.py b/src/gradientai/resources/agents/evaluation_runs.py similarity index 100% rename from src/do_gradientai/resources/agents/evaluation_runs.py rename to src/gradientai/resources/agents/evaluation_runs.py diff --git a/src/do_gradientai/resources/agents/evaluation_test_cases.py b/src/gradientai/resources/agents/evaluation_test_cases.py similarity index 100% rename from src/do_gradientai/resources/agents/evaluation_test_cases.py rename to src/gradientai/resources/agents/evaluation_test_cases.py diff --git a/src/do_gradientai/resources/agents/functions.py b/src/gradientai/resources/agents/functions.py similarity index 100% rename from src/do_gradientai/resources/agents/functions.py rename to src/gradientai/resources/agents/functions.py diff --git a/src/do_gradientai/resources/agents/knowledge_bases.py b/src/gradientai/resources/agents/knowledge_bases.py similarity index 100% rename from src/do_gradientai/resources/agents/knowledge_bases.py rename to src/gradientai/resources/agents/knowledge_bases.py diff --git a/src/do_gradientai/resources/agents/routes.py b/src/gradientai/resources/agents/routes.py similarity index 100% rename from src/do_gradientai/resources/agents/routes.py rename to src/gradientai/resources/agents/routes.py diff --git a/src/do_gradientai/resources/agents/versions.py b/src/gradientai/resources/agents/versions.py similarity index 100% rename from src/do_gradientai/resources/agents/versions.py rename to src/gradientai/resources/agents/versions.py diff --git a/src/do_gradientai/resources/chat/__init__.py b/src/gradientai/resources/chat/__init__.py similarity index 100% rename from src/do_gradientai/resources/chat/__init__.py rename to src/gradientai/resources/chat/__init__.py diff --git a/src/do_gradientai/resources/chat/chat.py b/src/gradientai/resources/chat/chat.py similarity index 100% rename from src/do_gradientai/resources/chat/chat.py rename to src/gradientai/resources/chat/chat.py diff --git a/src/do_gradientai/resources/chat/completions.py b/src/gradientai/resources/chat/completions.py similarity index 100% rename from src/do_gradientai/resources/chat/completions.py rename to src/gradientai/resources/chat/completions.py diff --git a/src/do_gradientai/resources/droplets/__init__.py b/src/gradientai/resources/droplets/__init__.py similarity index 100% rename from src/do_gradientai/resources/droplets/__init__.py rename to src/gradientai/resources/droplets/__init__.py diff --git a/src/do_gradientai/resources/droplets/actions.py b/src/gradientai/resources/droplets/actions.py similarity index 100% rename from src/do_gradientai/resources/droplets/actions.py rename to src/gradientai/resources/droplets/actions.py diff --git a/src/do_gradientai/resources/droplets/autoscale.py b/src/gradientai/resources/droplets/autoscale.py similarity index 100% rename from src/do_gradientai/resources/droplets/autoscale.py rename to src/gradientai/resources/droplets/autoscale.py diff --git a/src/do_gradientai/resources/droplets/backups.py b/src/gradientai/resources/droplets/backups.py similarity index 100% rename from src/do_gradientai/resources/droplets/backups.py rename to src/gradientai/resources/droplets/backups.py diff --git a/src/do_gradientai/resources/droplets/destroy_with_associated_resources.py b/src/gradientai/resources/droplets/destroy_with_associated_resources.py similarity index 100% rename from src/do_gradientai/resources/droplets/destroy_with_associated_resources.py rename to src/gradientai/resources/droplets/destroy_with_associated_resources.py diff --git a/src/do_gradientai/resources/droplets/droplets.py b/src/gradientai/resources/droplets/droplets.py similarity index 100% rename from src/do_gradientai/resources/droplets/droplets.py rename to src/gradientai/resources/droplets/droplets.py diff --git a/src/do_gradientai/resources/firewalls/__init__.py b/src/gradientai/resources/firewalls/__init__.py similarity index 100% rename from src/do_gradientai/resources/firewalls/__init__.py rename to src/gradientai/resources/firewalls/__init__.py diff --git a/src/do_gradientai/resources/firewalls/droplets.py b/src/gradientai/resources/firewalls/droplets.py similarity index 100% rename from src/do_gradientai/resources/firewalls/droplets.py rename to src/gradientai/resources/firewalls/droplets.py diff --git a/src/do_gradientai/resources/firewalls/firewalls.py b/src/gradientai/resources/firewalls/firewalls.py similarity index 100% rename from src/do_gradientai/resources/firewalls/firewalls.py rename to src/gradientai/resources/firewalls/firewalls.py diff --git a/src/do_gradientai/resources/firewalls/rules.py b/src/gradientai/resources/firewalls/rules.py similarity index 100% rename from src/do_gradientai/resources/firewalls/rules.py rename to src/gradientai/resources/firewalls/rules.py diff --git a/src/do_gradientai/resources/firewalls/tags.py b/src/gradientai/resources/firewalls/tags.py similarity index 100% rename from src/do_gradientai/resources/firewalls/tags.py rename to src/gradientai/resources/firewalls/tags.py diff --git a/src/do_gradientai/resources/floating_ips/__init__.py b/src/gradientai/resources/floating_ips/__init__.py similarity index 100% rename from src/do_gradientai/resources/floating_ips/__init__.py rename to src/gradientai/resources/floating_ips/__init__.py diff --git a/src/do_gradientai/resources/floating_ips/actions.py b/src/gradientai/resources/floating_ips/actions.py similarity index 100% rename from src/do_gradientai/resources/floating_ips/actions.py rename to src/gradientai/resources/floating_ips/actions.py diff --git a/src/do_gradientai/resources/floating_ips/floating_ips.py b/src/gradientai/resources/floating_ips/floating_ips.py similarity index 100% rename from src/do_gradientai/resources/floating_ips/floating_ips.py rename to src/gradientai/resources/floating_ips/floating_ips.py diff --git a/src/do_gradientai/resources/images/__init__.py b/src/gradientai/resources/images/__init__.py similarity index 100% rename from src/do_gradientai/resources/images/__init__.py rename to src/gradientai/resources/images/__init__.py diff --git a/src/do_gradientai/resources/images/actions.py b/src/gradientai/resources/images/actions.py similarity index 100% rename from src/do_gradientai/resources/images/actions.py rename to src/gradientai/resources/images/actions.py diff --git a/src/do_gradientai/resources/images/images.py b/src/gradientai/resources/images/images.py similarity index 100% rename from src/do_gradientai/resources/images/images.py rename to src/gradientai/resources/images/images.py diff --git a/src/do_gradientai/resources/inference/__init__.py b/src/gradientai/resources/inference/__init__.py similarity index 100% rename from src/do_gradientai/resources/inference/__init__.py rename to src/gradientai/resources/inference/__init__.py diff --git a/src/do_gradientai/resources/inference/api_keys.py b/src/gradientai/resources/inference/api_keys.py similarity index 100% rename from src/do_gradientai/resources/inference/api_keys.py rename to src/gradientai/resources/inference/api_keys.py diff --git a/src/do_gradientai/resources/inference/inference.py b/src/gradientai/resources/inference/inference.py similarity index 100% rename from src/do_gradientai/resources/inference/inference.py rename to src/gradientai/resources/inference/inference.py diff --git a/src/do_gradientai/resources/knowledge_bases/__init__.py b/src/gradientai/resources/knowledge_bases/__init__.py similarity index 100% rename from src/do_gradientai/resources/knowledge_bases/__init__.py rename to src/gradientai/resources/knowledge_bases/__init__.py diff --git a/src/do_gradientai/resources/knowledge_bases/data_sources.py b/src/gradientai/resources/knowledge_bases/data_sources.py similarity index 100% rename from src/do_gradientai/resources/knowledge_bases/data_sources.py rename to src/gradientai/resources/knowledge_bases/data_sources.py diff --git a/src/do_gradientai/resources/knowledge_bases/indexing_jobs.py b/src/gradientai/resources/knowledge_bases/indexing_jobs.py similarity index 100% rename from src/do_gradientai/resources/knowledge_bases/indexing_jobs.py rename to src/gradientai/resources/knowledge_bases/indexing_jobs.py diff --git a/src/do_gradientai/resources/knowledge_bases/knowledge_bases.py b/src/gradientai/resources/knowledge_bases/knowledge_bases.py similarity index 100% rename from src/do_gradientai/resources/knowledge_bases/knowledge_bases.py rename to src/gradientai/resources/knowledge_bases/knowledge_bases.py diff --git a/src/do_gradientai/resources/load_balancers/__init__.py b/src/gradientai/resources/load_balancers/__init__.py similarity index 100% rename from src/do_gradientai/resources/load_balancers/__init__.py rename to src/gradientai/resources/load_balancers/__init__.py diff --git a/src/do_gradientai/resources/load_balancers/droplets.py b/src/gradientai/resources/load_balancers/droplets.py similarity index 100% rename from src/do_gradientai/resources/load_balancers/droplets.py rename to src/gradientai/resources/load_balancers/droplets.py diff --git a/src/do_gradientai/resources/load_balancers/forwarding_rules.py b/src/gradientai/resources/load_balancers/forwarding_rules.py similarity index 100% rename from src/do_gradientai/resources/load_balancers/forwarding_rules.py rename to src/gradientai/resources/load_balancers/forwarding_rules.py diff --git a/src/do_gradientai/resources/load_balancers/load_balancers.py b/src/gradientai/resources/load_balancers/load_balancers.py similarity index 100% rename from src/do_gradientai/resources/load_balancers/load_balancers.py rename to src/gradientai/resources/load_balancers/load_balancers.py diff --git a/src/do_gradientai/resources/models/__init__.py b/src/gradientai/resources/models/__init__.py similarity index 100% rename from src/do_gradientai/resources/models/__init__.py rename to src/gradientai/resources/models/__init__.py diff --git a/src/do_gradientai/resources/models/models.py b/src/gradientai/resources/models/models.py similarity index 100% rename from src/do_gradientai/resources/models/models.py rename to src/gradientai/resources/models/models.py diff --git a/src/do_gradientai/resources/models/providers/__init__.py b/src/gradientai/resources/models/providers/__init__.py similarity index 100% rename from src/do_gradientai/resources/models/providers/__init__.py rename to src/gradientai/resources/models/providers/__init__.py diff --git a/src/do_gradientai/resources/models/providers/anthropic.py b/src/gradientai/resources/models/providers/anthropic.py similarity index 100% rename from src/do_gradientai/resources/models/providers/anthropic.py rename to src/gradientai/resources/models/providers/anthropic.py diff --git a/src/do_gradientai/resources/models/providers/openai.py b/src/gradientai/resources/models/providers/openai.py similarity index 100% rename from src/do_gradientai/resources/models/providers/openai.py rename to src/gradientai/resources/models/providers/openai.py diff --git a/src/do_gradientai/resources/models/providers/providers.py b/src/gradientai/resources/models/providers/providers.py similarity index 100% rename from src/do_gradientai/resources/models/providers/providers.py rename to src/gradientai/resources/models/providers/providers.py diff --git a/src/do_gradientai/resources/regions.py b/src/gradientai/resources/regions.py similarity index 100% rename from src/do_gradientai/resources/regions.py rename to src/gradientai/resources/regions.py diff --git a/src/do_gradientai/resources/sizes.py b/src/gradientai/resources/sizes.py similarity index 100% rename from src/do_gradientai/resources/sizes.py rename to src/gradientai/resources/sizes.py diff --git a/src/do_gradientai/resources/snapshots.py b/src/gradientai/resources/snapshots.py similarity index 100% rename from src/do_gradientai/resources/snapshots.py rename to src/gradientai/resources/snapshots.py diff --git a/src/do_gradientai/resources/volumes/__init__.py b/src/gradientai/resources/volumes/__init__.py similarity index 100% rename from src/do_gradientai/resources/volumes/__init__.py rename to src/gradientai/resources/volumes/__init__.py diff --git a/src/do_gradientai/resources/volumes/actions.py b/src/gradientai/resources/volumes/actions.py similarity index 100% rename from src/do_gradientai/resources/volumes/actions.py rename to src/gradientai/resources/volumes/actions.py diff --git a/src/do_gradientai/resources/volumes/snapshots.py b/src/gradientai/resources/volumes/snapshots.py similarity index 100% rename from src/do_gradientai/resources/volumes/snapshots.py rename to src/gradientai/resources/volumes/snapshots.py diff --git a/src/do_gradientai/resources/volumes/volumes.py b/src/gradientai/resources/volumes/volumes.py similarity index 100% rename from src/do_gradientai/resources/volumes/volumes.py rename to src/gradientai/resources/volumes/volumes.py diff --git a/src/do_gradientai/types/__init__.py b/src/gradientai/types/__init__.py similarity index 100% rename from src/do_gradientai/types/__init__.py rename to src/gradientai/types/__init__.py diff --git a/src/do_gradientai/types/account/__init__.py b/src/gradientai/types/account/__init__.py similarity index 100% rename from src/do_gradientai/types/account/__init__.py rename to src/gradientai/types/account/__init__.py diff --git a/src/do_gradientai/types/account/key_create_params.py b/src/gradientai/types/account/key_create_params.py similarity index 100% rename from src/do_gradientai/types/account/key_create_params.py rename to src/gradientai/types/account/key_create_params.py diff --git a/src/do_gradientai/types/account/key_create_response.py b/src/gradientai/types/account/key_create_response.py similarity index 100% rename from src/do_gradientai/types/account/key_create_response.py rename to src/gradientai/types/account/key_create_response.py diff --git a/src/do_gradientai/types/account/key_list_params.py b/src/gradientai/types/account/key_list_params.py similarity index 100% rename from src/do_gradientai/types/account/key_list_params.py rename to src/gradientai/types/account/key_list_params.py diff --git a/src/do_gradientai/types/account/key_list_response.py b/src/gradientai/types/account/key_list_response.py similarity index 100% rename from src/do_gradientai/types/account/key_list_response.py rename to src/gradientai/types/account/key_list_response.py diff --git a/src/do_gradientai/types/account/key_retrieve_response.py b/src/gradientai/types/account/key_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/account/key_retrieve_response.py rename to src/gradientai/types/account/key_retrieve_response.py diff --git a/src/do_gradientai/types/account/key_update_params.py b/src/gradientai/types/account/key_update_params.py similarity index 100% rename from src/do_gradientai/types/account/key_update_params.py rename to src/gradientai/types/account/key_update_params.py diff --git a/src/do_gradientai/types/account/key_update_response.py b/src/gradientai/types/account/key_update_response.py similarity index 100% rename from src/do_gradientai/types/account/key_update_response.py rename to src/gradientai/types/account/key_update_response.py diff --git a/src/do_gradientai/types/account_retrieve_response.py b/src/gradientai/types/account_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/account_retrieve_response.py rename to src/gradientai/types/account_retrieve_response.py diff --git a/src/do_gradientai/types/agent_create_params.py b/src/gradientai/types/agent_create_params.py similarity index 100% rename from src/do_gradientai/types/agent_create_params.py rename to src/gradientai/types/agent_create_params.py diff --git a/src/do_gradientai/types/agent_create_response.py b/src/gradientai/types/agent_create_response.py similarity index 100% rename from src/do_gradientai/types/agent_create_response.py rename to src/gradientai/types/agent_create_response.py diff --git a/src/do_gradientai/types/agent_delete_response.py b/src/gradientai/types/agent_delete_response.py similarity index 100% rename from src/do_gradientai/types/agent_delete_response.py rename to src/gradientai/types/agent_delete_response.py diff --git a/src/do_gradientai/types/agent_list_params.py b/src/gradientai/types/agent_list_params.py similarity index 100% rename from src/do_gradientai/types/agent_list_params.py rename to src/gradientai/types/agent_list_params.py diff --git a/src/do_gradientai/types/agent_list_response.py b/src/gradientai/types/agent_list_response.py similarity index 100% rename from src/do_gradientai/types/agent_list_response.py rename to src/gradientai/types/agent_list_response.py diff --git a/src/do_gradientai/types/agent_retrieve_response.py b/src/gradientai/types/agent_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/agent_retrieve_response.py rename to src/gradientai/types/agent_retrieve_response.py diff --git a/src/do_gradientai/types/agent_update_params.py b/src/gradientai/types/agent_update_params.py similarity index 100% rename from src/do_gradientai/types/agent_update_params.py rename to src/gradientai/types/agent_update_params.py diff --git a/src/do_gradientai/types/agent_update_response.py b/src/gradientai/types/agent_update_response.py similarity index 100% rename from src/do_gradientai/types/agent_update_response.py rename to src/gradientai/types/agent_update_response.py diff --git a/src/do_gradientai/types/agent_update_status_params.py b/src/gradientai/types/agent_update_status_params.py similarity index 100% rename from src/do_gradientai/types/agent_update_status_params.py rename to src/gradientai/types/agent_update_status_params.py diff --git a/src/do_gradientai/types/agent_update_status_response.py b/src/gradientai/types/agent_update_status_response.py similarity index 100% rename from src/do_gradientai/types/agent_update_status_response.py rename to src/gradientai/types/agent_update_status_response.py diff --git a/src/do_gradientai/types/agents/__init__.py b/src/gradientai/types/agents/__init__.py similarity index 100% rename from src/do_gradientai/types/agents/__init__.py rename to src/gradientai/types/agents/__init__.py diff --git a/src/do_gradientai/types/agents/api_evaluation_metric.py b/src/gradientai/types/agents/api_evaluation_metric.py similarity index 100% rename from src/do_gradientai/types/agents/api_evaluation_metric.py rename to src/gradientai/types/agents/api_evaluation_metric.py diff --git a/src/do_gradientai/types/agents/api_evaluation_metric_result.py b/src/gradientai/types/agents/api_evaluation_metric_result.py similarity index 100% rename from src/do_gradientai/types/agents/api_evaluation_metric_result.py rename to src/gradientai/types/agents/api_evaluation_metric_result.py diff --git a/src/do_gradientai/types/agents/api_evaluation_prompt.py b/src/gradientai/types/agents/api_evaluation_prompt.py similarity index 100% rename from src/do_gradientai/types/agents/api_evaluation_prompt.py rename to src/gradientai/types/agents/api_evaluation_prompt.py diff --git a/src/do_gradientai/types/agents/api_evaluation_run.py b/src/gradientai/types/agents/api_evaluation_run.py similarity index 100% rename from src/do_gradientai/types/agents/api_evaluation_run.py rename to src/gradientai/types/agents/api_evaluation_run.py diff --git a/src/do_gradientai/types/agents/api_evaluation_test_case.py b/src/gradientai/types/agents/api_evaluation_test_case.py similarity index 100% rename from src/do_gradientai/types/agents/api_evaluation_test_case.py rename to src/gradientai/types/agents/api_evaluation_test_case.py diff --git a/src/do_gradientai/types/agents/api_key_create_params.py b/src/gradientai/types/agents/api_key_create_params.py similarity index 100% rename from src/do_gradientai/types/agents/api_key_create_params.py rename to src/gradientai/types/agents/api_key_create_params.py diff --git a/src/do_gradientai/types/agents/api_key_create_response.py b/src/gradientai/types/agents/api_key_create_response.py similarity index 100% rename from src/do_gradientai/types/agents/api_key_create_response.py rename to src/gradientai/types/agents/api_key_create_response.py diff --git a/src/do_gradientai/types/agents/api_key_delete_response.py b/src/gradientai/types/agents/api_key_delete_response.py similarity index 100% rename from src/do_gradientai/types/agents/api_key_delete_response.py rename to src/gradientai/types/agents/api_key_delete_response.py diff --git a/src/do_gradientai/types/agents/api_key_list_params.py b/src/gradientai/types/agents/api_key_list_params.py similarity index 100% rename from src/do_gradientai/types/agents/api_key_list_params.py rename to src/gradientai/types/agents/api_key_list_params.py diff --git a/src/do_gradientai/types/agents/api_key_list_response.py b/src/gradientai/types/agents/api_key_list_response.py similarity index 100% rename from src/do_gradientai/types/agents/api_key_list_response.py rename to src/gradientai/types/agents/api_key_list_response.py diff --git a/src/do_gradientai/types/agents/api_key_regenerate_response.py b/src/gradientai/types/agents/api_key_regenerate_response.py similarity index 100% rename from src/do_gradientai/types/agents/api_key_regenerate_response.py rename to src/gradientai/types/agents/api_key_regenerate_response.py diff --git a/src/do_gradientai/types/agents/api_key_update_params.py b/src/gradientai/types/agents/api_key_update_params.py similarity index 100% rename from src/do_gradientai/types/agents/api_key_update_params.py rename to src/gradientai/types/agents/api_key_update_params.py diff --git a/src/do_gradientai/types/agents/api_key_update_response.py b/src/gradientai/types/agents/api_key_update_response.py similarity index 100% rename from src/do_gradientai/types/agents/api_key_update_response.py rename to src/gradientai/types/agents/api_key_update_response.py diff --git a/src/do_gradientai/types/agents/api_link_knowledge_base_output.py b/src/gradientai/types/agents/api_link_knowledge_base_output.py similarity index 100% rename from src/do_gradientai/types/agents/api_link_knowledge_base_output.py rename to src/gradientai/types/agents/api_link_knowledge_base_output.py diff --git a/src/do_gradientai/types/agents/api_star_metric.py b/src/gradientai/types/agents/api_star_metric.py similarity index 100% rename from src/do_gradientai/types/agents/api_star_metric.py rename to src/gradientai/types/agents/api_star_metric.py diff --git a/src/do_gradientai/types/agents/api_star_metric_param.py b/src/gradientai/types/agents/api_star_metric_param.py similarity index 100% rename from src/do_gradientai/types/agents/api_star_metric_param.py rename to src/gradientai/types/agents/api_star_metric_param.py diff --git a/src/do_gradientai/types/agents/chat/__init__.py b/src/gradientai/types/agents/chat/__init__.py similarity index 100% rename from src/do_gradientai/types/agents/chat/__init__.py rename to src/gradientai/types/agents/chat/__init__.py diff --git a/src/do_gradientai/types/agents/chat/completion_create_params.py b/src/gradientai/types/agents/chat/completion_create_params.py similarity index 100% rename from src/do_gradientai/types/agents/chat/completion_create_params.py rename to src/gradientai/types/agents/chat/completion_create_params.py diff --git a/src/do_gradientai/types/agents/chat/completion_create_response.py b/src/gradientai/types/agents/chat/completion_create_response.py similarity index 100% rename from src/do_gradientai/types/agents/chat/completion_create_response.py rename to src/gradientai/types/agents/chat/completion_create_response.py diff --git a/src/do_gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py b/src/gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py rename to src/gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py diff --git a/src/do_gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_response.py b/src/gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_response.py rename to src/gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_response.py diff --git a/src/do_gradientai/types/agents/evaluation_dataset_create_params.py b/src/gradientai/types/agents/evaluation_dataset_create_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_dataset_create_params.py rename to src/gradientai/types/agents/evaluation_dataset_create_params.py diff --git a/src/do_gradientai/types/agents/evaluation_dataset_create_response.py b/src/gradientai/types/agents/evaluation_dataset_create_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_dataset_create_response.py rename to src/gradientai/types/agents/evaluation_dataset_create_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metric_list_regions_params.py b/src/gradientai/types/agents/evaluation_metric_list_regions_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metric_list_regions_params.py rename to src/gradientai/types/agents/evaluation_metric_list_regions_params.py diff --git a/src/do_gradientai/types/agents/evaluation_metric_list_regions_response.py b/src/gradientai/types/agents/evaluation_metric_list_regions_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metric_list_regions_response.py rename to src/gradientai/types/agents/evaluation_metric_list_regions_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metric_list_response.py b/src/gradientai/types/agents/evaluation_metric_list_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metric_list_response.py rename to src/gradientai/types/agents/evaluation_metric_list_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/__init__.py b/src/gradientai/types/agents/evaluation_metrics/__init__.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/__init__.py rename to src/gradientai/types/agents/evaluation_metrics/__init__.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/model_list_params.py b/src/gradientai/types/agents/evaluation_metrics/model_list_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/model_list_params.py rename to src/gradientai/types/agents/evaluation_metrics/model_list_params.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/model_list_response.py b/src/gradientai/types/agents/evaluation_metrics/model_list_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/model_list_response.py rename to src/gradientai/types/agents/evaluation_metrics/model_list_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspace_create_params.py b/src/gradientai/types/agents/evaluation_metrics/workspace_create_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspace_create_params.py rename to src/gradientai/types/agents/evaluation_metrics/workspace_create_params.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspace_create_response.py b/src/gradientai/types/agents/evaluation_metrics/workspace_create_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspace_create_response.py rename to src/gradientai/types/agents/evaluation_metrics/workspace_create_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspace_delete_response.py b/src/gradientai/types/agents/evaluation_metrics/workspace_delete_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspace_delete_response.py rename to src/gradientai/types/agents/evaluation_metrics/workspace_delete_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspace_list_evaluation_test_cases_response.py b/src/gradientai/types/agents/evaluation_metrics/workspace_list_evaluation_test_cases_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspace_list_evaluation_test_cases_response.py rename to src/gradientai/types/agents/evaluation_metrics/workspace_list_evaluation_test_cases_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspace_list_response.py b/src/gradientai/types/agents/evaluation_metrics/workspace_list_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspace_list_response.py rename to src/gradientai/types/agents/evaluation_metrics/workspace_list_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspace_retrieve_response.py b/src/gradientai/types/agents/evaluation_metrics/workspace_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspace_retrieve_response.py rename to src/gradientai/types/agents/evaluation_metrics/workspace_retrieve_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspace_update_params.py b/src/gradientai/types/agents/evaluation_metrics/workspace_update_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspace_update_params.py rename to src/gradientai/types/agents/evaluation_metrics/workspace_update_params.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspace_update_response.py b/src/gradientai/types/agents/evaluation_metrics/workspace_update_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspace_update_response.py rename to src/gradientai/types/agents/evaluation_metrics/workspace_update_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspaces/__init__.py b/src/gradientai/types/agents/evaluation_metrics/workspaces/__init__.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspaces/__init__.py rename to src/gradientai/types/agents/evaluation_metrics/workspaces/__init__.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_list_params.py b/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_list_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_list_params.py rename to src/gradientai/types/agents/evaluation_metrics/workspaces/agent_list_params.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_list_response.py b/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_list_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_list_response.py rename to src/gradientai/types/agents/evaluation_metrics/workspaces/agent_list_response.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_move_params.py b/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_move_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_move_params.py rename to src/gradientai/types/agents/evaluation_metrics/workspaces/agent_move_params.py diff --git a/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_move_response.py b/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_move_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_move_response.py rename to src/gradientai/types/agents/evaluation_metrics/workspaces/agent_move_response.py diff --git a/src/do_gradientai/types/agents/evaluation_run_create_params.py b/src/gradientai/types/agents/evaluation_run_create_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_run_create_params.py rename to src/gradientai/types/agents/evaluation_run_create_params.py diff --git a/src/do_gradientai/types/agents/evaluation_run_create_response.py b/src/gradientai/types/agents/evaluation_run_create_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_run_create_response.py rename to src/gradientai/types/agents/evaluation_run_create_response.py diff --git a/src/do_gradientai/types/agents/evaluation_run_list_results_params.py b/src/gradientai/types/agents/evaluation_run_list_results_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_run_list_results_params.py rename to src/gradientai/types/agents/evaluation_run_list_results_params.py diff --git a/src/do_gradientai/types/agents/evaluation_run_list_results_response.py b/src/gradientai/types/agents/evaluation_run_list_results_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_run_list_results_response.py rename to src/gradientai/types/agents/evaluation_run_list_results_response.py diff --git a/src/do_gradientai/types/agents/evaluation_run_retrieve_response.py b/src/gradientai/types/agents/evaluation_run_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_run_retrieve_response.py rename to src/gradientai/types/agents/evaluation_run_retrieve_response.py diff --git a/src/do_gradientai/types/agents/evaluation_run_retrieve_results_response.py b/src/gradientai/types/agents/evaluation_run_retrieve_results_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_run_retrieve_results_response.py rename to src/gradientai/types/agents/evaluation_run_retrieve_results_response.py diff --git a/src/do_gradientai/types/agents/evaluation_test_case_create_params.py b/src/gradientai/types/agents/evaluation_test_case_create_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_test_case_create_params.py rename to src/gradientai/types/agents/evaluation_test_case_create_params.py diff --git a/src/do_gradientai/types/agents/evaluation_test_case_create_response.py b/src/gradientai/types/agents/evaluation_test_case_create_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_test_case_create_response.py rename to src/gradientai/types/agents/evaluation_test_case_create_response.py diff --git a/src/do_gradientai/types/agents/evaluation_test_case_list_evaluation_runs_params.py b/src/gradientai/types/agents/evaluation_test_case_list_evaluation_runs_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_test_case_list_evaluation_runs_params.py rename to src/gradientai/types/agents/evaluation_test_case_list_evaluation_runs_params.py diff --git a/src/do_gradientai/types/agents/evaluation_test_case_list_evaluation_runs_response.py b/src/gradientai/types/agents/evaluation_test_case_list_evaluation_runs_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_test_case_list_evaluation_runs_response.py rename to src/gradientai/types/agents/evaluation_test_case_list_evaluation_runs_response.py diff --git a/src/do_gradientai/types/agents/evaluation_test_case_list_response.py b/src/gradientai/types/agents/evaluation_test_case_list_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_test_case_list_response.py rename to src/gradientai/types/agents/evaluation_test_case_list_response.py diff --git a/src/do_gradientai/types/agents/evaluation_test_case_retrieve_params.py b/src/gradientai/types/agents/evaluation_test_case_retrieve_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_test_case_retrieve_params.py rename to src/gradientai/types/agents/evaluation_test_case_retrieve_params.py diff --git a/src/do_gradientai/types/agents/evaluation_test_case_retrieve_response.py b/src/gradientai/types/agents/evaluation_test_case_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_test_case_retrieve_response.py rename to src/gradientai/types/agents/evaluation_test_case_retrieve_response.py diff --git a/src/do_gradientai/types/agents/evaluation_test_case_update_params.py b/src/gradientai/types/agents/evaluation_test_case_update_params.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_test_case_update_params.py rename to src/gradientai/types/agents/evaluation_test_case_update_params.py diff --git a/src/do_gradientai/types/agents/evaluation_test_case_update_response.py b/src/gradientai/types/agents/evaluation_test_case_update_response.py similarity index 100% rename from src/do_gradientai/types/agents/evaluation_test_case_update_response.py rename to src/gradientai/types/agents/evaluation_test_case_update_response.py diff --git a/src/do_gradientai/types/agents/function_create_params.py b/src/gradientai/types/agents/function_create_params.py similarity index 100% rename from src/do_gradientai/types/agents/function_create_params.py rename to src/gradientai/types/agents/function_create_params.py diff --git a/src/do_gradientai/types/agents/function_create_response.py b/src/gradientai/types/agents/function_create_response.py similarity index 100% rename from src/do_gradientai/types/agents/function_create_response.py rename to src/gradientai/types/agents/function_create_response.py diff --git a/src/do_gradientai/types/agents/function_delete_response.py b/src/gradientai/types/agents/function_delete_response.py similarity index 100% rename from src/do_gradientai/types/agents/function_delete_response.py rename to src/gradientai/types/agents/function_delete_response.py diff --git a/src/do_gradientai/types/agents/function_update_params.py b/src/gradientai/types/agents/function_update_params.py similarity index 100% rename from src/do_gradientai/types/agents/function_update_params.py rename to src/gradientai/types/agents/function_update_params.py diff --git a/src/do_gradientai/types/agents/function_update_response.py b/src/gradientai/types/agents/function_update_response.py similarity index 100% rename from src/do_gradientai/types/agents/function_update_response.py rename to src/gradientai/types/agents/function_update_response.py diff --git a/src/do_gradientai/types/agents/knowledge_base_detach_response.py b/src/gradientai/types/agents/knowledge_base_detach_response.py similarity index 100% rename from src/do_gradientai/types/agents/knowledge_base_detach_response.py rename to src/gradientai/types/agents/knowledge_base_detach_response.py diff --git a/src/do_gradientai/types/agents/route_add_params.py b/src/gradientai/types/agents/route_add_params.py similarity index 100% rename from src/do_gradientai/types/agents/route_add_params.py rename to src/gradientai/types/agents/route_add_params.py diff --git a/src/do_gradientai/types/agents/route_add_response.py b/src/gradientai/types/agents/route_add_response.py similarity index 100% rename from src/do_gradientai/types/agents/route_add_response.py rename to src/gradientai/types/agents/route_add_response.py diff --git a/src/do_gradientai/types/agents/route_delete_response.py b/src/gradientai/types/agents/route_delete_response.py similarity index 100% rename from src/do_gradientai/types/agents/route_delete_response.py rename to src/gradientai/types/agents/route_delete_response.py diff --git a/src/do_gradientai/types/agents/route_update_params.py b/src/gradientai/types/agents/route_update_params.py similarity index 100% rename from src/do_gradientai/types/agents/route_update_params.py rename to src/gradientai/types/agents/route_update_params.py diff --git a/src/do_gradientai/types/agents/route_update_response.py b/src/gradientai/types/agents/route_update_response.py similarity index 100% rename from src/do_gradientai/types/agents/route_update_response.py rename to src/gradientai/types/agents/route_update_response.py diff --git a/src/do_gradientai/types/agents/route_view_response.py b/src/gradientai/types/agents/route_view_response.py similarity index 100% rename from src/do_gradientai/types/agents/route_view_response.py rename to src/gradientai/types/agents/route_view_response.py diff --git a/src/do_gradientai/types/agents/version_list_params.py b/src/gradientai/types/agents/version_list_params.py similarity index 100% rename from src/do_gradientai/types/agents/version_list_params.py rename to src/gradientai/types/agents/version_list_params.py diff --git a/src/do_gradientai/types/agents/version_list_response.py b/src/gradientai/types/agents/version_list_response.py similarity index 100% rename from src/do_gradientai/types/agents/version_list_response.py rename to src/gradientai/types/agents/version_list_response.py diff --git a/src/do_gradientai/types/agents/version_update_params.py b/src/gradientai/types/agents/version_update_params.py similarity index 100% rename from src/do_gradientai/types/agents/version_update_params.py rename to src/gradientai/types/agents/version_update_params.py diff --git a/src/do_gradientai/types/agents/version_update_response.py b/src/gradientai/types/agents/version_update_response.py similarity index 100% rename from src/do_gradientai/types/agents/version_update_response.py rename to src/gradientai/types/agents/version_update_response.py diff --git a/src/do_gradientai/types/api_agent.py b/src/gradientai/types/api_agent.py similarity index 100% rename from src/do_gradientai/types/api_agent.py rename to src/gradientai/types/api_agent.py diff --git a/src/do_gradientai/types/api_agent_api_key_info.py b/src/gradientai/types/api_agent_api_key_info.py similarity index 100% rename from src/do_gradientai/types/api_agent_api_key_info.py rename to src/gradientai/types/api_agent_api_key_info.py diff --git a/src/do_gradientai/types/api_agent_model.py b/src/gradientai/types/api_agent_model.py similarity index 100% rename from src/do_gradientai/types/api_agent_model.py rename to src/gradientai/types/api_agent_model.py diff --git a/src/do_gradientai/types/api_agreement.py b/src/gradientai/types/api_agreement.py similarity index 100% rename from src/do_gradientai/types/api_agreement.py rename to src/gradientai/types/api_agreement.py diff --git a/src/do_gradientai/types/api_anthropic_api_key_info.py b/src/gradientai/types/api_anthropic_api_key_info.py similarity index 100% rename from src/do_gradientai/types/api_anthropic_api_key_info.py rename to src/gradientai/types/api_anthropic_api_key_info.py diff --git a/src/do_gradientai/types/api_deployment_visibility.py b/src/gradientai/types/api_deployment_visibility.py similarity index 100% rename from src/do_gradientai/types/api_deployment_visibility.py rename to src/gradientai/types/api_deployment_visibility.py diff --git a/src/do_gradientai/types/api_knowledge_base.py b/src/gradientai/types/api_knowledge_base.py similarity index 100% rename from src/do_gradientai/types/api_knowledge_base.py rename to src/gradientai/types/api_knowledge_base.py diff --git a/src/do_gradientai/types/api_model.py b/src/gradientai/types/api_model.py similarity index 100% rename from src/do_gradientai/types/api_model.py rename to src/gradientai/types/api_model.py diff --git a/src/do_gradientai/types/api_model_version.py b/src/gradientai/types/api_model_version.py similarity index 100% rename from src/do_gradientai/types/api_model_version.py rename to src/gradientai/types/api_model_version.py diff --git a/src/do_gradientai/types/api_openai_api_key_info.py b/src/gradientai/types/api_openai_api_key_info.py similarity index 100% rename from src/do_gradientai/types/api_openai_api_key_info.py rename to src/gradientai/types/api_openai_api_key_info.py diff --git a/src/do_gradientai/types/api_retrieval_method.py b/src/gradientai/types/api_retrieval_method.py similarity index 100% rename from src/do_gradientai/types/api_retrieval_method.py rename to src/gradientai/types/api_retrieval_method.py diff --git a/src/do_gradientai/types/api_workspace.py b/src/gradientai/types/api_workspace.py similarity index 100% rename from src/do_gradientai/types/api_workspace.py rename to src/gradientai/types/api_workspace.py diff --git a/src/do_gradientai/types/chat/__init__.py b/src/gradientai/types/chat/__init__.py similarity index 100% rename from src/do_gradientai/types/chat/__init__.py rename to src/gradientai/types/chat/__init__.py diff --git a/src/do_gradientai/types/chat/completion_create_params.py b/src/gradientai/types/chat/completion_create_params.py similarity index 100% rename from src/do_gradientai/types/chat/completion_create_params.py rename to src/gradientai/types/chat/completion_create_params.py diff --git a/src/do_gradientai/types/chat/completion_create_response.py b/src/gradientai/types/chat/completion_create_response.py similarity index 100% rename from src/do_gradientai/types/chat/completion_create_response.py rename to src/gradientai/types/chat/completion_create_response.py diff --git a/src/do_gradientai/types/domains.py b/src/gradientai/types/domains.py similarity index 100% rename from src/do_gradientai/types/domains.py rename to src/gradientai/types/domains.py diff --git a/src/do_gradientai/types/domains_param.py b/src/gradientai/types/domains_param.py similarity index 100% rename from src/do_gradientai/types/domains_param.py rename to src/gradientai/types/domains_param.py diff --git a/src/do_gradientai/types/droplet_backup_policy.py b/src/gradientai/types/droplet_backup_policy.py similarity index 100% rename from src/do_gradientai/types/droplet_backup_policy.py rename to src/gradientai/types/droplet_backup_policy.py diff --git a/src/do_gradientai/types/droplet_backup_policy_param.py b/src/gradientai/types/droplet_backup_policy_param.py similarity index 100% rename from src/do_gradientai/types/droplet_backup_policy_param.py rename to src/gradientai/types/droplet_backup_policy_param.py diff --git a/src/do_gradientai/types/droplet_create_params.py b/src/gradientai/types/droplet_create_params.py similarity index 100% rename from src/do_gradientai/types/droplet_create_params.py rename to src/gradientai/types/droplet_create_params.py diff --git a/src/do_gradientai/types/droplet_create_response.py b/src/gradientai/types/droplet_create_response.py similarity index 100% rename from src/do_gradientai/types/droplet_create_response.py rename to src/gradientai/types/droplet_create_response.py diff --git a/src/do_gradientai/types/droplet_delete_by_tag_params.py b/src/gradientai/types/droplet_delete_by_tag_params.py similarity index 100% rename from src/do_gradientai/types/droplet_delete_by_tag_params.py rename to src/gradientai/types/droplet_delete_by_tag_params.py diff --git a/src/do_gradientai/types/droplet_list_firewalls_params.py b/src/gradientai/types/droplet_list_firewalls_params.py similarity index 100% rename from src/do_gradientai/types/droplet_list_firewalls_params.py rename to src/gradientai/types/droplet_list_firewalls_params.py diff --git a/src/do_gradientai/types/droplet_list_firewalls_response.py b/src/gradientai/types/droplet_list_firewalls_response.py similarity index 100% rename from src/do_gradientai/types/droplet_list_firewalls_response.py rename to src/gradientai/types/droplet_list_firewalls_response.py diff --git a/src/do_gradientai/types/droplet_list_kernels_params.py b/src/gradientai/types/droplet_list_kernels_params.py similarity index 100% rename from src/do_gradientai/types/droplet_list_kernels_params.py rename to src/gradientai/types/droplet_list_kernels_params.py diff --git a/src/do_gradientai/types/droplet_list_kernels_response.py b/src/gradientai/types/droplet_list_kernels_response.py similarity index 100% rename from src/do_gradientai/types/droplet_list_kernels_response.py rename to src/gradientai/types/droplet_list_kernels_response.py diff --git a/src/do_gradientai/types/droplet_list_neighbors_response.py b/src/gradientai/types/droplet_list_neighbors_response.py similarity index 100% rename from src/do_gradientai/types/droplet_list_neighbors_response.py rename to src/gradientai/types/droplet_list_neighbors_response.py diff --git a/src/do_gradientai/types/droplet_list_params.py b/src/gradientai/types/droplet_list_params.py similarity index 100% rename from src/do_gradientai/types/droplet_list_params.py rename to src/gradientai/types/droplet_list_params.py diff --git a/src/do_gradientai/types/droplet_list_response.py b/src/gradientai/types/droplet_list_response.py similarity index 100% rename from src/do_gradientai/types/droplet_list_response.py rename to src/gradientai/types/droplet_list_response.py diff --git a/src/do_gradientai/types/droplet_list_snapshots_params.py b/src/gradientai/types/droplet_list_snapshots_params.py similarity index 100% rename from src/do_gradientai/types/droplet_list_snapshots_params.py rename to src/gradientai/types/droplet_list_snapshots_params.py diff --git a/src/do_gradientai/types/droplet_list_snapshots_response.py b/src/gradientai/types/droplet_list_snapshots_response.py similarity index 100% rename from src/do_gradientai/types/droplet_list_snapshots_response.py rename to src/gradientai/types/droplet_list_snapshots_response.py diff --git a/src/do_gradientai/types/droplet_retrieve_response.py b/src/gradientai/types/droplet_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/droplet_retrieve_response.py rename to src/gradientai/types/droplet_retrieve_response.py diff --git a/src/do_gradientai/types/droplets/__init__.py b/src/gradientai/types/droplets/__init__.py similarity index 100% rename from src/do_gradientai/types/droplets/__init__.py rename to src/gradientai/types/droplets/__init__.py diff --git a/src/do_gradientai/types/droplets/action_bulk_initiate_params.py b/src/gradientai/types/droplets/action_bulk_initiate_params.py similarity index 100% rename from src/do_gradientai/types/droplets/action_bulk_initiate_params.py rename to src/gradientai/types/droplets/action_bulk_initiate_params.py diff --git a/src/do_gradientai/types/droplets/action_bulk_initiate_response.py b/src/gradientai/types/droplets/action_bulk_initiate_response.py similarity index 100% rename from src/do_gradientai/types/droplets/action_bulk_initiate_response.py rename to src/gradientai/types/droplets/action_bulk_initiate_response.py diff --git a/src/do_gradientai/types/droplets/action_initiate_params.py b/src/gradientai/types/droplets/action_initiate_params.py similarity index 100% rename from src/do_gradientai/types/droplets/action_initiate_params.py rename to src/gradientai/types/droplets/action_initiate_params.py diff --git a/src/do_gradientai/types/droplets/action_initiate_response.py b/src/gradientai/types/droplets/action_initiate_response.py similarity index 100% rename from src/do_gradientai/types/droplets/action_initiate_response.py rename to src/gradientai/types/droplets/action_initiate_response.py diff --git a/src/do_gradientai/types/droplets/action_list_params.py b/src/gradientai/types/droplets/action_list_params.py similarity index 100% rename from src/do_gradientai/types/droplets/action_list_params.py rename to src/gradientai/types/droplets/action_list_params.py diff --git a/src/do_gradientai/types/droplets/action_list_response.py b/src/gradientai/types/droplets/action_list_response.py similarity index 100% rename from src/do_gradientai/types/droplets/action_list_response.py rename to src/gradientai/types/droplets/action_list_response.py diff --git a/src/do_gradientai/types/droplets/action_retrieve_response.py b/src/gradientai/types/droplets/action_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/droplets/action_retrieve_response.py rename to src/gradientai/types/droplets/action_retrieve_response.py diff --git a/src/do_gradientai/types/droplets/associated_resource.py b/src/gradientai/types/droplets/associated_resource.py similarity index 100% rename from src/do_gradientai/types/droplets/associated_resource.py rename to src/gradientai/types/droplets/associated_resource.py diff --git a/src/do_gradientai/types/droplets/autoscale_create_params.py b/src/gradientai/types/droplets/autoscale_create_params.py similarity index 100% rename from src/do_gradientai/types/droplets/autoscale_create_params.py rename to src/gradientai/types/droplets/autoscale_create_params.py diff --git a/src/do_gradientai/types/droplets/autoscale_create_response.py b/src/gradientai/types/droplets/autoscale_create_response.py similarity index 100% rename from src/do_gradientai/types/droplets/autoscale_create_response.py rename to src/gradientai/types/droplets/autoscale_create_response.py diff --git a/src/do_gradientai/types/droplets/autoscale_list_history_params.py b/src/gradientai/types/droplets/autoscale_list_history_params.py similarity index 100% rename from src/do_gradientai/types/droplets/autoscale_list_history_params.py rename to src/gradientai/types/droplets/autoscale_list_history_params.py diff --git a/src/do_gradientai/types/droplets/autoscale_list_history_response.py b/src/gradientai/types/droplets/autoscale_list_history_response.py similarity index 100% rename from src/do_gradientai/types/droplets/autoscale_list_history_response.py rename to src/gradientai/types/droplets/autoscale_list_history_response.py diff --git a/src/do_gradientai/types/droplets/autoscale_list_members_params.py b/src/gradientai/types/droplets/autoscale_list_members_params.py similarity index 100% rename from src/do_gradientai/types/droplets/autoscale_list_members_params.py rename to src/gradientai/types/droplets/autoscale_list_members_params.py diff --git a/src/do_gradientai/types/droplets/autoscale_list_members_response.py b/src/gradientai/types/droplets/autoscale_list_members_response.py similarity index 100% rename from src/do_gradientai/types/droplets/autoscale_list_members_response.py rename to src/gradientai/types/droplets/autoscale_list_members_response.py diff --git a/src/do_gradientai/types/droplets/autoscale_list_params.py b/src/gradientai/types/droplets/autoscale_list_params.py similarity index 100% rename from src/do_gradientai/types/droplets/autoscale_list_params.py rename to src/gradientai/types/droplets/autoscale_list_params.py diff --git a/src/do_gradientai/types/droplets/autoscale_list_response.py b/src/gradientai/types/droplets/autoscale_list_response.py similarity index 100% rename from src/do_gradientai/types/droplets/autoscale_list_response.py rename to src/gradientai/types/droplets/autoscale_list_response.py diff --git a/src/do_gradientai/types/droplets/autoscale_pool.py b/src/gradientai/types/droplets/autoscale_pool.py similarity index 100% rename from src/do_gradientai/types/droplets/autoscale_pool.py rename to src/gradientai/types/droplets/autoscale_pool.py diff --git a/src/do_gradientai/types/droplets/autoscale_pool_droplet_template.py b/src/gradientai/types/droplets/autoscale_pool_droplet_template.py similarity index 100% rename from src/do_gradientai/types/droplets/autoscale_pool_droplet_template.py rename to src/gradientai/types/droplets/autoscale_pool_droplet_template.py diff --git a/src/do_gradientai/types/droplets/autoscale_pool_droplet_template_param.py b/src/gradientai/types/droplets/autoscale_pool_droplet_template_param.py similarity index 100% rename from src/do_gradientai/types/droplets/autoscale_pool_droplet_template_param.py rename to src/gradientai/types/droplets/autoscale_pool_droplet_template_param.py diff --git a/src/do_gradientai/types/droplets/autoscale_pool_dynamic_config.py b/src/gradientai/types/droplets/autoscale_pool_dynamic_config.py similarity index 100% rename from src/do_gradientai/types/droplets/autoscale_pool_dynamic_config.py rename to src/gradientai/types/droplets/autoscale_pool_dynamic_config.py diff --git a/src/do_gradientai/types/droplets/autoscale_pool_dynamic_config_param.py b/src/gradientai/types/droplets/autoscale_pool_dynamic_config_param.py similarity index 100% rename from src/do_gradientai/types/droplets/autoscale_pool_dynamic_config_param.py rename to src/gradientai/types/droplets/autoscale_pool_dynamic_config_param.py diff --git a/src/do_gradientai/types/droplets/autoscale_pool_static_config.py b/src/gradientai/types/droplets/autoscale_pool_static_config.py similarity index 100% rename from src/do_gradientai/types/droplets/autoscale_pool_static_config.py rename to src/gradientai/types/droplets/autoscale_pool_static_config.py diff --git a/src/do_gradientai/types/droplets/autoscale_pool_static_config_param.py b/src/gradientai/types/droplets/autoscale_pool_static_config_param.py similarity index 100% rename from src/do_gradientai/types/droplets/autoscale_pool_static_config_param.py rename to src/gradientai/types/droplets/autoscale_pool_static_config_param.py diff --git a/src/do_gradientai/types/droplets/autoscale_retrieve_response.py b/src/gradientai/types/droplets/autoscale_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/droplets/autoscale_retrieve_response.py rename to src/gradientai/types/droplets/autoscale_retrieve_response.py diff --git a/src/do_gradientai/types/droplets/autoscale_update_params.py b/src/gradientai/types/droplets/autoscale_update_params.py similarity index 100% rename from src/do_gradientai/types/droplets/autoscale_update_params.py rename to src/gradientai/types/droplets/autoscale_update_params.py diff --git a/src/do_gradientai/types/droplets/autoscale_update_response.py b/src/gradientai/types/droplets/autoscale_update_response.py similarity index 100% rename from src/do_gradientai/types/droplets/autoscale_update_response.py rename to src/gradientai/types/droplets/autoscale_update_response.py diff --git a/src/do_gradientai/types/droplets/backup_list_params.py b/src/gradientai/types/droplets/backup_list_params.py similarity index 100% rename from src/do_gradientai/types/droplets/backup_list_params.py rename to src/gradientai/types/droplets/backup_list_params.py diff --git a/src/do_gradientai/types/droplets/backup_list_policies_params.py b/src/gradientai/types/droplets/backup_list_policies_params.py similarity index 100% rename from src/do_gradientai/types/droplets/backup_list_policies_params.py rename to src/gradientai/types/droplets/backup_list_policies_params.py diff --git a/src/do_gradientai/types/droplets/backup_list_policies_response.py b/src/gradientai/types/droplets/backup_list_policies_response.py similarity index 100% rename from src/do_gradientai/types/droplets/backup_list_policies_response.py rename to src/gradientai/types/droplets/backup_list_policies_response.py diff --git a/src/do_gradientai/types/droplets/backup_list_response.py b/src/gradientai/types/droplets/backup_list_response.py similarity index 100% rename from src/do_gradientai/types/droplets/backup_list_response.py rename to src/gradientai/types/droplets/backup_list_response.py diff --git a/src/do_gradientai/types/droplets/backup_list_supported_policies_response.py b/src/gradientai/types/droplets/backup_list_supported_policies_response.py similarity index 100% rename from src/do_gradientai/types/droplets/backup_list_supported_policies_response.py rename to src/gradientai/types/droplets/backup_list_supported_policies_response.py diff --git a/src/do_gradientai/types/droplets/backup_retrieve_policy_response.py b/src/gradientai/types/droplets/backup_retrieve_policy_response.py similarity index 100% rename from src/do_gradientai/types/droplets/backup_retrieve_policy_response.py rename to src/gradientai/types/droplets/backup_retrieve_policy_response.py diff --git a/src/do_gradientai/types/droplets/current_utilization.py b/src/gradientai/types/droplets/current_utilization.py similarity index 100% rename from src/do_gradientai/types/droplets/current_utilization.py rename to src/gradientai/types/droplets/current_utilization.py diff --git a/src/do_gradientai/types/droplets/destroy_with_associated_resource_check_status_response.py b/src/gradientai/types/droplets/destroy_with_associated_resource_check_status_response.py similarity index 100% rename from src/do_gradientai/types/droplets/destroy_with_associated_resource_check_status_response.py rename to src/gradientai/types/droplets/destroy_with_associated_resource_check_status_response.py diff --git a/src/do_gradientai/types/droplets/destroy_with_associated_resource_delete_selective_params.py b/src/gradientai/types/droplets/destroy_with_associated_resource_delete_selective_params.py similarity index 100% rename from src/do_gradientai/types/droplets/destroy_with_associated_resource_delete_selective_params.py rename to src/gradientai/types/droplets/destroy_with_associated_resource_delete_selective_params.py diff --git a/src/do_gradientai/types/droplets/destroy_with_associated_resource_list_response.py b/src/gradientai/types/droplets/destroy_with_associated_resource_list_response.py similarity index 100% rename from src/do_gradientai/types/droplets/destroy_with_associated_resource_list_response.py rename to src/gradientai/types/droplets/destroy_with_associated_resource_list_response.py diff --git a/src/do_gradientai/types/droplets/destroyed_associated_resource.py b/src/gradientai/types/droplets/destroyed_associated_resource.py similarity index 100% rename from src/do_gradientai/types/droplets/destroyed_associated_resource.py rename to src/gradientai/types/droplets/destroyed_associated_resource.py diff --git a/src/do_gradientai/types/firewall.py b/src/gradientai/types/firewall.py similarity index 100% rename from src/do_gradientai/types/firewall.py rename to src/gradientai/types/firewall.py diff --git a/src/do_gradientai/types/firewall_create_params.py b/src/gradientai/types/firewall_create_params.py similarity index 100% rename from src/do_gradientai/types/firewall_create_params.py rename to src/gradientai/types/firewall_create_params.py diff --git a/src/do_gradientai/types/firewall_create_response.py b/src/gradientai/types/firewall_create_response.py similarity index 100% rename from src/do_gradientai/types/firewall_create_response.py rename to src/gradientai/types/firewall_create_response.py diff --git a/src/do_gradientai/types/firewall_list_params.py b/src/gradientai/types/firewall_list_params.py similarity index 100% rename from src/do_gradientai/types/firewall_list_params.py rename to src/gradientai/types/firewall_list_params.py diff --git a/src/do_gradientai/types/firewall_list_response.py b/src/gradientai/types/firewall_list_response.py similarity index 100% rename from src/do_gradientai/types/firewall_list_response.py rename to src/gradientai/types/firewall_list_response.py diff --git a/src/do_gradientai/types/firewall_param.py b/src/gradientai/types/firewall_param.py similarity index 100% rename from src/do_gradientai/types/firewall_param.py rename to src/gradientai/types/firewall_param.py diff --git a/src/do_gradientai/types/firewall_retrieve_response.py b/src/gradientai/types/firewall_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/firewall_retrieve_response.py rename to src/gradientai/types/firewall_retrieve_response.py diff --git a/src/do_gradientai/types/firewall_update_params.py b/src/gradientai/types/firewall_update_params.py similarity index 100% rename from src/do_gradientai/types/firewall_update_params.py rename to src/gradientai/types/firewall_update_params.py diff --git a/src/do_gradientai/types/firewall_update_response.py b/src/gradientai/types/firewall_update_response.py similarity index 100% rename from src/do_gradientai/types/firewall_update_response.py rename to src/gradientai/types/firewall_update_response.py diff --git a/src/do_gradientai/types/firewalls/__init__.py b/src/gradientai/types/firewalls/__init__.py similarity index 100% rename from src/do_gradientai/types/firewalls/__init__.py rename to src/gradientai/types/firewalls/__init__.py diff --git a/src/do_gradientai/types/firewalls/droplet_add_params.py b/src/gradientai/types/firewalls/droplet_add_params.py similarity index 100% rename from src/do_gradientai/types/firewalls/droplet_add_params.py rename to src/gradientai/types/firewalls/droplet_add_params.py diff --git a/src/do_gradientai/types/firewalls/droplet_remove_params.py b/src/gradientai/types/firewalls/droplet_remove_params.py similarity index 100% rename from src/do_gradientai/types/firewalls/droplet_remove_params.py rename to src/gradientai/types/firewalls/droplet_remove_params.py diff --git a/src/do_gradientai/types/firewalls/rule_add_params.py b/src/gradientai/types/firewalls/rule_add_params.py similarity index 100% rename from src/do_gradientai/types/firewalls/rule_add_params.py rename to src/gradientai/types/firewalls/rule_add_params.py diff --git a/src/do_gradientai/types/firewalls/rule_remove_params.py b/src/gradientai/types/firewalls/rule_remove_params.py similarity index 100% rename from src/do_gradientai/types/firewalls/rule_remove_params.py rename to src/gradientai/types/firewalls/rule_remove_params.py diff --git a/src/do_gradientai/types/firewalls/tag_add_params.py b/src/gradientai/types/firewalls/tag_add_params.py similarity index 100% rename from src/do_gradientai/types/firewalls/tag_add_params.py rename to src/gradientai/types/firewalls/tag_add_params.py diff --git a/src/do_gradientai/types/firewalls/tag_remove_params.py b/src/gradientai/types/firewalls/tag_remove_params.py similarity index 100% rename from src/do_gradientai/types/firewalls/tag_remove_params.py rename to src/gradientai/types/firewalls/tag_remove_params.py diff --git a/src/do_gradientai/types/floating_ip.py b/src/gradientai/types/floating_ip.py similarity index 100% rename from src/do_gradientai/types/floating_ip.py rename to src/gradientai/types/floating_ip.py diff --git a/src/do_gradientai/types/floating_ip_create_params.py b/src/gradientai/types/floating_ip_create_params.py similarity index 100% rename from src/do_gradientai/types/floating_ip_create_params.py rename to src/gradientai/types/floating_ip_create_params.py diff --git a/src/do_gradientai/types/floating_ip_create_response.py b/src/gradientai/types/floating_ip_create_response.py similarity index 100% rename from src/do_gradientai/types/floating_ip_create_response.py rename to src/gradientai/types/floating_ip_create_response.py diff --git a/src/do_gradientai/types/floating_ip_list_params.py b/src/gradientai/types/floating_ip_list_params.py similarity index 100% rename from src/do_gradientai/types/floating_ip_list_params.py rename to src/gradientai/types/floating_ip_list_params.py diff --git a/src/do_gradientai/types/floating_ip_list_response.py b/src/gradientai/types/floating_ip_list_response.py similarity index 100% rename from src/do_gradientai/types/floating_ip_list_response.py rename to src/gradientai/types/floating_ip_list_response.py diff --git a/src/do_gradientai/types/floating_ip_retrieve_response.py b/src/gradientai/types/floating_ip_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/floating_ip_retrieve_response.py rename to src/gradientai/types/floating_ip_retrieve_response.py diff --git a/src/do_gradientai/types/floating_ips/__init__.py b/src/gradientai/types/floating_ips/__init__.py similarity index 100% rename from src/do_gradientai/types/floating_ips/__init__.py rename to src/gradientai/types/floating_ips/__init__.py diff --git a/src/do_gradientai/types/floating_ips/action_create_params.py b/src/gradientai/types/floating_ips/action_create_params.py similarity index 100% rename from src/do_gradientai/types/floating_ips/action_create_params.py rename to src/gradientai/types/floating_ips/action_create_params.py diff --git a/src/do_gradientai/types/floating_ips/action_create_response.py b/src/gradientai/types/floating_ips/action_create_response.py similarity index 100% rename from src/do_gradientai/types/floating_ips/action_create_response.py rename to src/gradientai/types/floating_ips/action_create_response.py diff --git a/src/do_gradientai/types/floating_ips/action_list_response.py b/src/gradientai/types/floating_ips/action_list_response.py similarity index 100% rename from src/do_gradientai/types/floating_ips/action_list_response.py rename to src/gradientai/types/floating_ips/action_list_response.py diff --git a/src/do_gradientai/types/floating_ips/action_retrieve_response.py b/src/gradientai/types/floating_ips/action_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/floating_ips/action_retrieve_response.py rename to src/gradientai/types/floating_ips/action_retrieve_response.py diff --git a/src/do_gradientai/types/forwarding_rule.py b/src/gradientai/types/forwarding_rule.py similarity index 100% rename from src/do_gradientai/types/forwarding_rule.py rename to src/gradientai/types/forwarding_rule.py diff --git a/src/do_gradientai/types/forwarding_rule_param.py b/src/gradientai/types/forwarding_rule_param.py similarity index 100% rename from src/do_gradientai/types/forwarding_rule_param.py rename to src/gradientai/types/forwarding_rule_param.py diff --git a/src/do_gradientai/types/glb_settings.py b/src/gradientai/types/glb_settings.py similarity index 100% rename from src/do_gradientai/types/glb_settings.py rename to src/gradientai/types/glb_settings.py diff --git a/src/do_gradientai/types/glb_settings_param.py b/src/gradientai/types/glb_settings_param.py similarity index 100% rename from src/do_gradientai/types/glb_settings_param.py rename to src/gradientai/types/glb_settings_param.py diff --git a/src/do_gradientai/types/health_check.py b/src/gradientai/types/health_check.py similarity index 100% rename from src/do_gradientai/types/health_check.py rename to src/gradientai/types/health_check.py diff --git a/src/do_gradientai/types/health_check_param.py b/src/gradientai/types/health_check_param.py similarity index 100% rename from src/do_gradientai/types/health_check_param.py rename to src/gradientai/types/health_check_param.py diff --git a/src/do_gradientai/types/image_create_params.py b/src/gradientai/types/image_create_params.py similarity index 100% rename from src/do_gradientai/types/image_create_params.py rename to src/gradientai/types/image_create_params.py diff --git a/src/do_gradientai/types/image_create_response.py b/src/gradientai/types/image_create_response.py similarity index 100% rename from src/do_gradientai/types/image_create_response.py rename to src/gradientai/types/image_create_response.py diff --git a/src/do_gradientai/types/image_list_params.py b/src/gradientai/types/image_list_params.py similarity index 100% rename from src/do_gradientai/types/image_list_params.py rename to src/gradientai/types/image_list_params.py diff --git a/src/do_gradientai/types/image_list_response.py b/src/gradientai/types/image_list_response.py similarity index 100% rename from src/do_gradientai/types/image_list_response.py rename to src/gradientai/types/image_list_response.py diff --git a/src/do_gradientai/types/image_retrieve_response.py b/src/gradientai/types/image_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/image_retrieve_response.py rename to src/gradientai/types/image_retrieve_response.py diff --git a/src/do_gradientai/types/image_update_params.py b/src/gradientai/types/image_update_params.py similarity index 100% rename from src/do_gradientai/types/image_update_params.py rename to src/gradientai/types/image_update_params.py diff --git a/src/do_gradientai/types/image_update_response.py b/src/gradientai/types/image_update_response.py similarity index 100% rename from src/do_gradientai/types/image_update_response.py rename to src/gradientai/types/image_update_response.py diff --git a/src/do_gradientai/types/images/__init__.py b/src/gradientai/types/images/__init__.py similarity index 100% rename from src/do_gradientai/types/images/__init__.py rename to src/gradientai/types/images/__init__.py diff --git a/src/do_gradientai/types/images/action_create_params.py b/src/gradientai/types/images/action_create_params.py similarity index 100% rename from src/do_gradientai/types/images/action_create_params.py rename to src/gradientai/types/images/action_create_params.py diff --git a/src/do_gradientai/types/images/action_list_response.py b/src/gradientai/types/images/action_list_response.py similarity index 100% rename from src/do_gradientai/types/images/action_list_response.py rename to src/gradientai/types/images/action_list_response.py diff --git a/src/do_gradientai/types/inference/__init__.py b/src/gradientai/types/inference/__init__.py similarity index 100% rename from src/do_gradientai/types/inference/__init__.py rename to src/gradientai/types/inference/__init__.py diff --git a/src/do_gradientai/types/inference/api_key_create_params.py b/src/gradientai/types/inference/api_key_create_params.py similarity index 100% rename from src/do_gradientai/types/inference/api_key_create_params.py rename to src/gradientai/types/inference/api_key_create_params.py diff --git a/src/do_gradientai/types/inference/api_key_create_response.py b/src/gradientai/types/inference/api_key_create_response.py similarity index 100% rename from src/do_gradientai/types/inference/api_key_create_response.py rename to src/gradientai/types/inference/api_key_create_response.py diff --git a/src/do_gradientai/types/inference/api_key_delete_response.py b/src/gradientai/types/inference/api_key_delete_response.py similarity index 100% rename from src/do_gradientai/types/inference/api_key_delete_response.py rename to src/gradientai/types/inference/api_key_delete_response.py diff --git a/src/do_gradientai/types/inference/api_key_list_params.py b/src/gradientai/types/inference/api_key_list_params.py similarity index 100% rename from src/do_gradientai/types/inference/api_key_list_params.py rename to src/gradientai/types/inference/api_key_list_params.py diff --git a/src/do_gradientai/types/inference/api_key_list_response.py b/src/gradientai/types/inference/api_key_list_response.py similarity index 100% rename from src/do_gradientai/types/inference/api_key_list_response.py rename to src/gradientai/types/inference/api_key_list_response.py diff --git a/src/do_gradientai/types/inference/api_key_update_params.py b/src/gradientai/types/inference/api_key_update_params.py similarity index 100% rename from src/do_gradientai/types/inference/api_key_update_params.py rename to src/gradientai/types/inference/api_key_update_params.py diff --git a/src/do_gradientai/types/inference/api_key_update_regenerate_response.py b/src/gradientai/types/inference/api_key_update_regenerate_response.py similarity index 100% rename from src/do_gradientai/types/inference/api_key_update_regenerate_response.py rename to src/gradientai/types/inference/api_key_update_regenerate_response.py diff --git a/src/do_gradientai/types/inference/api_key_update_response.py b/src/gradientai/types/inference/api_key_update_response.py similarity index 100% rename from src/do_gradientai/types/inference/api_key_update_response.py rename to src/gradientai/types/inference/api_key_update_response.py diff --git a/src/do_gradientai/types/inference/api_model_api_key_info.py b/src/gradientai/types/inference/api_model_api_key_info.py similarity index 100% rename from src/do_gradientai/types/inference/api_model_api_key_info.py rename to src/gradientai/types/inference/api_model_api_key_info.py diff --git a/src/do_gradientai/types/knowledge_base_create_params.py b/src/gradientai/types/knowledge_base_create_params.py similarity index 100% rename from src/do_gradientai/types/knowledge_base_create_params.py rename to src/gradientai/types/knowledge_base_create_params.py diff --git a/src/do_gradientai/types/knowledge_base_create_response.py b/src/gradientai/types/knowledge_base_create_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_base_create_response.py rename to src/gradientai/types/knowledge_base_create_response.py diff --git a/src/do_gradientai/types/knowledge_base_delete_response.py b/src/gradientai/types/knowledge_base_delete_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_base_delete_response.py rename to src/gradientai/types/knowledge_base_delete_response.py diff --git a/src/do_gradientai/types/knowledge_base_list_params.py b/src/gradientai/types/knowledge_base_list_params.py similarity index 100% rename from src/do_gradientai/types/knowledge_base_list_params.py rename to src/gradientai/types/knowledge_base_list_params.py diff --git a/src/do_gradientai/types/knowledge_base_list_response.py b/src/gradientai/types/knowledge_base_list_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_base_list_response.py rename to src/gradientai/types/knowledge_base_list_response.py diff --git a/src/do_gradientai/types/knowledge_base_retrieve_response.py b/src/gradientai/types/knowledge_base_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_base_retrieve_response.py rename to src/gradientai/types/knowledge_base_retrieve_response.py diff --git a/src/do_gradientai/types/knowledge_base_update_params.py b/src/gradientai/types/knowledge_base_update_params.py similarity index 100% rename from src/do_gradientai/types/knowledge_base_update_params.py rename to src/gradientai/types/knowledge_base_update_params.py diff --git a/src/do_gradientai/types/knowledge_base_update_response.py b/src/gradientai/types/knowledge_base_update_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_base_update_response.py rename to src/gradientai/types/knowledge_base_update_response.py diff --git a/src/do_gradientai/types/knowledge_bases/__init__.py b/src/gradientai/types/knowledge_bases/__init__.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/__init__.py rename to src/gradientai/types/knowledge_bases/__init__.py diff --git a/src/do_gradientai/types/knowledge_bases/api_file_upload_data_source.py b/src/gradientai/types/knowledge_bases/api_file_upload_data_source.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/api_file_upload_data_source.py rename to src/gradientai/types/knowledge_bases/api_file_upload_data_source.py diff --git a/src/do_gradientai/types/knowledge_bases/api_file_upload_data_source_param.py b/src/gradientai/types/knowledge_bases/api_file_upload_data_source_param.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/api_file_upload_data_source_param.py rename to src/gradientai/types/knowledge_bases/api_file_upload_data_source_param.py diff --git a/src/do_gradientai/types/knowledge_bases/api_indexed_data_source.py b/src/gradientai/types/knowledge_bases/api_indexed_data_source.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/api_indexed_data_source.py rename to src/gradientai/types/knowledge_bases/api_indexed_data_source.py diff --git a/src/do_gradientai/types/knowledge_bases/api_indexing_job.py b/src/gradientai/types/knowledge_bases/api_indexing_job.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/api_indexing_job.py rename to src/gradientai/types/knowledge_bases/api_indexing_job.py diff --git a/src/do_gradientai/types/knowledge_bases/api_knowledge_base_data_source.py b/src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/api_knowledge_base_data_source.py rename to src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py diff --git a/src/do_gradientai/types/knowledge_bases/api_spaces_data_source.py b/src/gradientai/types/knowledge_bases/api_spaces_data_source.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/api_spaces_data_source.py rename to src/gradientai/types/knowledge_bases/api_spaces_data_source.py diff --git a/src/do_gradientai/types/knowledge_bases/api_spaces_data_source_param.py b/src/gradientai/types/knowledge_bases/api_spaces_data_source_param.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/api_spaces_data_source_param.py rename to src/gradientai/types/knowledge_bases/api_spaces_data_source_param.py diff --git a/src/do_gradientai/types/knowledge_bases/api_web_crawler_data_source.py b/src/gradientai/types/knowledge_bases/api_web_crawler_data_source.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/api_web_crawler_data_source.py rename to src/gradientai/types/knowledge_bases/api_web_crawler_data_source.py diff --git a/src/do_gradientai/types/knowledge_bases/api_web_crawler_data_source_param.py b/src/gradientai/types/knowledge_bases/api_web_crawler_data_source_param.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/api_web_crawler_data_source_param.py rename to src/gradientai/types/knowledge_bases/api_web_crawler_data_source_param.py diff --git a/src/do_gradientai/types/knowledge_bases/aws_data_source_param.py b/src/gradientai/types/knowledge_bases/aws_data_source_param.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/aws_data_source_param.py rename to src/gradientai/types/knowledge_bases/aws_data_source_param.py diff --git a/src/do_gradientai/types/knowledge_bases/data_source_create_params.py b/src/gradientai/types/knowledge_bases/data_source_create_params.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/data_source_create_params.py rename to src/gradientai/types/knowledge_bases/data_source_create_params.py diff --git a/src/do_gradientai/types/knowledge_bases/data_source_create_response.py b/src/gradientai/types/knowledge_bases/data_source_create_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/data_source_create_response.py rename to src/gradientai/types/knowledge_bases/data_source_create_response.py diff --git a/src/do_gradientai/types/knowledge_bases/data_source_delete_response.py b/src/gradientai/types/knowledge_bases/data_source_delete_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/data_source_delete_response.py rename to src/gradientai/types/knowledge_bases/data_source_delete_response.py diff --git a/src/do_gradientai/types/knowledge_bases/data_source_list_params.py b/src/gradientai/types/knowledge_bases/data_source_list_params.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/data_source_list_params.py rename to src/gradientai/types/knowledge_bases/data_source_list_params.py diff --git a/src/do_gradientai/types/knowledge_bases/data_source_list_response.py b/src/gradientai/types/knowledge_bases/data_source_list_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/data_source_list_response.py rename to src/gradientai/types/knowledge_bases/data_source_list_response.py diff --git a/src/do_gradientai/types/knowledge_bases/indexing_job_create_params.py b/src/gradientai/types/knowledge_bases/indexing_job_create_params.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/indexing_job_create_params.py rename to src/gradientai/types/knowledge_bases/indexing_job_create_params.py diff --git a/src/do_gradientai/types/knowledge_bases/indexing_job_create_response.py b/src/gradientai/types/knowledge_bases/indexing_job_create_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/indexing_job_create_response.py rename to src/gradientai/types/knowledge_bases/indexing_job_create_response.py diff --git a/src/do_gradientai/types/knowledge_bases/indexing_job_list_params.py b/src/gradientai/types/knowledge_bases/indexing_job_list_params.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/indexing_job_list_params.py rename to src/gradientai/types/knowledge_bases/indexing_job_list_params.py diff --git a/src/do_gradientai/types/knowledge_bases/indexing_job_list_response.py b/src/gradientai/types/knowledge_bases/indexing_job_list_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/indexing_job_list_response.py rename to src/gradientai/types/knowledge_bases/indexing_job_list_response.py diff --git a/src/do_gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py b/src/gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py rename to src/gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py diff --git a/src/do_gradientai/types/knowledge_bases/indexing_job_retrieve_response.py b/src/gradientai/types/knowledge_bases/indexing_job_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/indexing_job_retrieve_response.py rename to src/gradientai/types/knowledge_bases/indexing_job_retrieve_response.py diff --git a/src/do_gradientai/types/knowledge_bases/indexing_job_update_cancel_params.py b/src/gradientai/types/knowledge_bases/indexing_job_update_cancel_params.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/indexing_job_update_cancel_params.py rename to src/gradientai/types/knowledge_bases/indexing_job_update_cancel_params.py diff --git a/src/do_gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py b/src/gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py similarity index 100% rename from src/do_gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py rename to src/gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py diff --git a/src/do_gradientai/types/lb_firewall.py b/src/gradientai/types/lb_firewall.py similarity index 100% rename from src/do_gradientai/types/lb_firewall.py rename to src/gradientai/types/lb_firewall.py diff --git a/src/do_gradientai/types/lb_firewall_param.py b/src/gradientai/types/lb_firewall_param.py similarity index 100% rename from src/do_gradientai/types/lb_firewall_param.py rename to src/gradientai/types/lb_firewall_param.py diff --git a/src/do_gradientai/types/load_balancer.py b/src/gradientai/types/load_balancer.py similarity index 100% rename from src/do_gradientai/types/load_balancer.py rename to src/gradientai/types/load_balancer.py diff --git a/src/do_gradientai/types/load_balancer_create_params.py b/src/gradientai/types/load_balancer_create_params.py similarity index 100% rename from src/do_gradientai/types/load_balancer_create_params.py rename to src/gradientai/types/load_balancer_create_params.py diff --git a/src/do_gradientai/types/load_balancer_create_response.py b/src/gradientai/types/load_balancer_create_response.py similarity index 100% rename from src/do_gradientai/types/load_balancer_create_response.py rename to src/gradientai/types/load_balancer_create_response.py diff --git a/src/do_gradientai/types/load_balancer_list_params.py b/src/gradientai/types/load_balancer_list_params.py similarity index 100% rename from src/do_gradientai/types/load_balancer_list_params.py rename to src/gradientai/types/load_balancer_list_params.py diff --git a/src/do_gradientai/types/load_balancer_list_response.py b/src/gradientai/types/load_balancer_list_response.py similarity index 100% rename from src/do_gradientai/types/load_balancer_list_response.py rename to src/gradientai/types/load_balancer_list_response.py diff --git a/src/do_gradientai/types/load_balancer_retrieve_response.py b/src/gradientai/types/load_balancer_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/load_balancer_retrieve_response.py rename to src/gradientai/types/load_balancer_retrieve_response.py diff --git a/src/do_gradientai/types/load_balancer_update_params.py b/src/gradientai/types/load_balancer_update_params.py similarity index 100% rename from src/do_gradientai/types/load_balancer_update_params.py rename to src/gradientai/types/load_balancer_update_params.py diff --git a/src/do_gradientai/types/load_balancer_update_response.py b/src/gradientai/types/load_balancer_update_response.py similarity index 100% rename from src/do_gradientai/types/load_balancer_update_response.py rename to src/gradientai/types/load_balancer_update_response.py diff --git a/src/do_gradientai/types/load_balancers/__init__.py b/src/gradientai/types/load_balancers/__init__.py similarity index 100% rename from src/do_gradientai/types/load_balancers/__init__.py rename to src/gradientai/types/load_balancers/__init__.py diff --git a/src/do_gradientai/types/load_balancers/droplet_add_params.py b/src/gradientai/types/load_balancers/droplet_add_params.py similarity index 100% rename from src/do_gradientai/types/load_balancers/droplet_add_params.py rename to src/gradientai/types/load_balancers/droplet_add_params.py diff --git a/src/do_gradientai/types/load_balancers/droplet_remove_params.py b/src/gradientai/types/load_balancers/droplet_remove_params.py similarity index 100% rename from src/do_gradientai/types/load_balancers/droplet_remove_params.py rename to src/gradientai/types/load_balancers/droplet_remove_params.py diff --git a/src/do_gradientai/types/load_balancers/forwarding_rule_add_params.py b/src/gradientai/types/load_balancers/forwarding_rule_add_params.py similarity index 100% rename from src/do_gradientai/types/load_balancers/forwarding_rule_add_params.py rename to src/gradientai/types/load_balancers/forwarding_rule_add_params.py diff --git a/src/do_gradientai/types/load_balancers/forwarding_rule_remove_params.py b/src/gradientai/types/load_balancers/forwarding_rule_remove_params.py similarity index 100% rename from src/do_gradientai/types/load_balancers/forwarding_rule_remove_params.py rename to src/gradientai/types/load_balancers/forwarding_rule_remove_params.py diff --git a/src/do_gradientai/types/model_list_response.py b/src/gradientai/types/model_list_response.py similarity index 100% rename from src/do_gradientai/types/model_list_response.py rename to src/gradientai/types/model_list_response.py diff --git a/src/do_gradientai/types/model_retrieve_response.py b/src/gradientai/types/model_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/model_retrieve_response.py rename to src/gradientai/types/model_retrieve_response.py diff --git a/src/do_gradientai/types/models/__init__.py b/src/gradientai/types/models/__init__.py similarity index 100% rename from src/do_gradientai/types/models/__init__.py rename to src/gradientai/types/models/__init__.py diff --git a/src/do_gradientai/types/models/providers/__init__.py b/src/gradientai/types/models/providers/__init__.py similarity index 100% rename from src/do_gradientai/types/models/providers/__init__.py rename to src/gradientai/types/models/providers/__init__.py diff --git a/src/do_gradientai/types/models/providers/anthropic_create_params.py b/src/gradientai/types/models/providers/anthropic_create_params.py similarity index 100% rename from src/do_gradientai/types/models/providers/anthropic_create_params.py rename to src/gradientai/types/models/providers/anthropic_create_params.py diff --git a/src/do_gradientai/types/models/providers/anthropic_create_response.py b/src/gradientai/types/models/providers/anthropic_create_response.py similarity index 100% rename from src/do_gradientai/types/models/providers/anthropic_create_response.py rename to src/gradientai/types/models/providers/anthropic_create_response.py diff --git a/src/do_gradientai/types/models/providers/anthropic_delete_response.py b/src/gradientai/types/models/providers/anthropic_delete_response.py similarity index 100% rename from src/do_gradientai/types/models/providers/anthropic_delete_response.py rename to src/gradientai/types/models/providers/anthropic_delete_response.py diff --git a/src/do_gradientai/types/models/providers/anthropic_list_agents_params.py b/src/gradientai/types/models/providers/anthropic_list_agents_params.py similarity index 100% rename from src/do_gradientai/types/models/providers/anthropic_list_agents_params.py rename to src/gradientai/types/models/providers/anthropic_list_agents_params.py diff --git a/src/do_gradientai/types/models/providers/anthropic_list_agents_response.py b/src/gradientai/types/models/providers/anthropic_list_agents_response.py similarity index 100% rename from src/do_gradientai/types/models/providers/anthropic_list_agents_response.py rename to src/gradientai/types/models/providers/anthropic_list_agents_response.py diff --git a/src/do_gradientai/types/models/providers/anthropic_list_params.py b/src/gradientai/types/models/providers/anthropic_list_params.py similarity index 100% rename from src/do_gradientai/types/models/providers/anthropic_list_params.py rename to src/gradientai/types/models/providers/anthropic_list_params.py diff --git a/src/do_gradientai/types/models/providers/anthropic_list_response.py b/src/gradientai/types/models/providers/anthropic_list_response.py similarity index 100% rename from src/do_gradientai/types/models/providers/anthropic_list_response.py rename to src/gradientai/types/models/providers/anthropic_list_response.py diff --git a/src/do_gradientai/types/models/providers/anthropic_retrieve_response.py b/src/gradientai/types/models/providers/anthropic_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/models/providers/anthropic_retrieve_response.py rename to src/gradientai/types/models/providers/anthropic_retrieve_response.py diff --git a/src/do_gradientai/types/models/providers/anthropic_update_params.py b/src/gradientai/types/models/providers/anthropic_update_params.py similarity index 100% rename from src/do_gradientai/types/models/providers/anthropic_update_params.py rename to src/gradientai/types/models/providers/anthropic_update_params.py diff --git a/src/do_gradientai/types/models/providers/anthropic_update_response.py b/src/gradientai/types/models/providers/anthropic_update_response.py similarity index 100% rename from src/do_gradientai/types/models/providers/anthropic_update_response.py rename to src/gradientai/types/models/providers/anthropic_update_response.py diff --git a/src/do_gradientai/types/models/providers/openai_create_params.py b/src/gradientai/types/models/providers/openai_create_params.py similarity index 100% rename from src/do_gradientai/types/models/providers/openai_create_params.py rename to src/gradientai/types/models/providers/openai_create_params.py diff --git a/src/do_gradientai/types/models/providers/openai_create_response.py b/src/gradientai/types/models/providers/openai_create_response.py similarity index 100% rename from src/do_gradientai/types/models/providers/openai_create_response.py rename to src/gradientai/types/models/providers/openai_create_response.py diff --git a/src/do_gradientai/types/models/providers/openai_delete_response.py b/src/gradientai/types/models/providers/openai_delete_response.py similarity index 100% rename from src/do_gradientai/types/models/providers/openai_delete_response.py rename to src/gradientai/types/models/providers/openai_delete_response.py diff --git a/src/do_gradientai/types/models/providers/openai_list_params.py b/src/gradientai/types/models/providers/openai_list_params.py similarity index 100% rename from src/do_gradientai/types/models/providers/openai_list_params.py rename to src/gradientai/types/models/providers/openai_list_params.py diff --git a/src/do_gradientai/types/models/providers/openai_list_response.py b/src/gradientai/types/models/providers/openai_list_response.py similarity index 100% rename from src/do_gradientai/types/models/providers/openai_list_response.py rename to src/gradientai/types/models/providers/openai_list_response.py diff --git a/src/do_gradientai/types/models/providers/openai_retrieve_agents_params.py b/src/gradientai/types/models/providers/openai_retrieve_agents_params.py similarity index 100% rename from src/do_gradientai/types/models/providers/openai_retrieve_agents_params.py rename to src/gradientai/types/models/providers/openai_retrieve_agents_params.py diff --git a/src/do_gradientai/types/models/providers/openai_retrieve_agents_response.py b/src/gradientai/types/models/providers/openai_retrieve_agents_response.py similarity index 100% rename from src/do_gradientai/types/models/providers/openai_retrieve_agents_response.py rename to src/gradientai/types/models/providers/openai_retrieve_agents_response.py diff --git a/src/do_gradientai/types/models/providers/openai_retrieve_response.py b/src/gradientai/types/models/providers/openai_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/models/providers/openai_retrieve_response.py rename to src/gradientai/types/models/providers/openai_retrieve_response.py diff --git a/src/do_gradientai/types/models/providers/openai_update_params.py b/src/gradientai/types/models/providers/openai_update_params.py similarity index 100% rename from src/do_gradientai/types/models/providers/openai_update_params.py rename to src/gradientai/types/models/providers/openai_update_params.py diff --git a/src/do_gradientai/types/models/providers/openai_update_response.py b/src/gradientai/types/models/providers/openai_update_response.py similarity index 100% rename from src/do_gradientai/types/models/providers/openai_update_response.py rename to src/gradientai/types/models/providers/openai_update_response.py diff --git a/src/do_gradientai/types/region_list_params.py b/src/gradientai/types/region_list_params.py similarity index 100% rename from src/do_gradientai/types/region_list_params.py rename to src/gradientai/types/region_list_params.py diff --git a/src/do_gradientai/types/region_list_response.py b/src/gradientai/types/region_list_response.py similarity index 100% rename from src/do_gradientai/types/region_list_response.py rename to src/gradientai/types/region_list_response.py diff --git a/src/do_gradientai/types/shared/__init__.py b/src/gradientai/types/shared/__init__.py similarity index 100% rename from src/do_gradientai/types/shared/__init__.py rename to src/gradientai/types/shared/__init__.py diff --git a/src/do_gradientai/types/shared/action.py b/src/gradientai/types/shared/action.py similarity index 100% rename from src/do_gradientai/types/shared/action.py rename to src/gradientai/types/shared/action.py diff --git a/src/do_gradientai/types/shared/action_link.py b/src/gradientai/types/shared/action_link.py similarity index 100% rename from src/do_gradientai/types/shared/action_link.py rename to src/gradientai/types/shared/action_link.py diff --git a/src/do_gradientai/types/shared/api_links.py b/src/gradientai/types/shared/api_links.py similarity index 100% rename from src/do_gradientai/types/shared/api_links.py rename to src/gradientai/types/shared/api_links.py diff --git a/src/do_gradientai/types/shared/api_meta.py b/src/gradientai/types/shared/api_meta.py similarity index 100% rename from src/do_gradientai/types/shared/api_meta.py rename to src/gradientai/types/shared/api_meta.py diff --git a/src/do_gradientai/types/shared/backward_links.py b/src/gradientai/types/shared/backward_links.py similarity index 100% rename from src/do_gradientai/types/shared/backward_links.py rename to src/gradientai/types/shared/backward_links.py diff --git a/src/do_gradientai/types/shared/chat_completion_chunk.py b/src/gradientai/types/shared/chat_completion_chunk.py similarity index 100% rename from src/do_gradientai/types/shared/chat_completion_chunk.py rename to src/gradientai/types/shared/chat_completion_chunk.py diff --git a/src/do_gradientai/types/shared/chat_completion_token_logprob.py b/src/gradientai/types/shared/chat_completion_token_logprob.py similarity index 100% rename from src/do_gradientai/types/shared/chat_completion_token_logprob.py rename to src/gradientai/types/shared/chat_completion_token_logprob.py diff --git a/src/do_gradientai/types/shared/completion_usage.py b/src/gradientai/types/shared/completion_usage.py similarity index 100% rename from src/do_gradientai/types/shared/completion_usage.py rename to src/gradientai/types/shared/completion_usage.py diff --git a/src/do_gradientai/types/shared/disk_info.py b/src/gradientai/types/shared/disk_info.py similarity index 100% rename from src/do_gradientai/types/shared/disk_info.py rename to src/gradientai/types/shared/disk_info.py diff --git a/src/do_gradientai/types/shared/droplet.py b/src/gradientai/types/shared/droplet.py similarity index 100% rename from src/do_gradientai/types/shared/droplet.py rename to src/gradientai/types/shared/droplet.py diff --git a/src/do_gradientai/types/shared/droplet_next_backup_window.py b/src/gradientai/types/shared/droplet_next_backup_window.py similarity index 100% rename from src/do_gradientai/types/shared/droplet_next_backup_window.py rename to src/gradientai/types/shared/droplet_next_backup_window.py diff --git a/src/do_gradientai/types/shared/firewall_rule_target.py b/src/gradientai/types/shared/firewall_rule_target.py similarity index 100% rename from src/do_gradientai/types/shared/firewall_rule_target.py rename to src/gradientai/types/shared/firewall_rule_target.py diff --git a/src/do_gradientai/types/shared/forward_links.py b/src/gradientai/types/shared/forward_links.py similarity index 100% rename from src/do_gradientai/types/shared/forward_links.py rename to src/gradientai/types/shared/forward_links.py diff --git a/src/do_gradientai/types/shared/garbage_collection.py b/src/gradientai/types/shared/garbage_collection.py similarity index 100% rename from src/do_gradientai/types/shared/garbage_collection.py rename to src/gradientai/types/shared/garbage_collection.py diff --git a/src/do_gradientai/types/shared/gpu_info.py b/src/gradientai/types/shared/gpu_info.py similarity index 100% rename from src/do_gradientai/types/shared/gpu_info.py rename to src/gradientai/types/shared/gpu_info.py diff --git a/src/do_gradientai/types/shared/image.py b/src/gradientai/types/shared/image.py similarity index 100% rename from src/do_gradientai/types/shared/image.py rename to src/gradientai/types/shared/image.py diff --git a/src/do_gradientai/types/shared/kernel.py b/src/gradientai/types/shared/kernel.py similarity index 100% rename from src/do_gradientai/types/shared/kernel.py rename to src/gradientai/types/shared/kernel.py diff --git a/src/do_gradientai/types/shared/meta_properties.py b/src/gradientai/types/shared/meta_properties.py similarity index 100% rename from src/do_gradientai/types/shared/meta_properties.py rename to src/gradientai/types/shared/meta_properties.py diff --git a/src/do_gradientai/types/shared/network_v4.py b/src/gradientai/types/shared/network_v4.py similarity index 100% rename from src/do_gradientai/types/shared/network_v4.py rename to src/gradientai/types/shared/network_v4.py diff --git a/src/do_gradientai/types/shared/network_v6.py b/src/gradientai/types/shared/network_v6.py similarity index 100% rename from src/do_gradientai/types/shared/network_v6.py rename to src/gradientai/types/shared/network_v6.py diff --git a/src/do_gradientai/types/shared/page_links.py b/src/gradientai/types/shared/page_links.py similarity index 100% rename from src/do_gradientai/types/shared/page_links.py rename to src/gradientai/types/shared/page_links.py diff --git a/src/do_gradientai/types/shared/region.py b/src/gradientai/types/shared/region.py similarity index 100% rename from src/do_gradientai/types/shared/region.py rename to src/gradientai/types/shared/region.py diff --git a/src/do_gradientai/types/shared/repository_blob.py b/src/gradientai/types/shared/repository_blob.py similarity index 100% rename from src/do_gradientai/types/shared/repository_blob.py rename to src/gradientai/types/shared/repository_blob.py diff --git a/src/do_gradientai/types/shared/repository_manifest.py b/src/gradientai/types/shared/repository_manifest.py similarity index 100% rename from src/do_gradientai/types/shared/repository_manifest.py rename to src/gradientai/types/shared/repository_manifest.py diff --git a/src/do_gradientai/types/shared/repository_tag.py b/src/gradientai/types/shared/repository_tag.py similarity index 100% rename from src/do_gradientai/types/shared/repository_tag.py rename to src/gradientai/types/shared/repository_tag.py diff --git a/src/do_gradientai/types/shared/size.py b/src/gradientai/types/shared/size.py similarity index 100% rename from src/do_gradientai/types/shared/size.py rename to src/gradientai/types/shared/size.py diff --git a/src/do_gradientai/types/shared/snapshots.py b/src/gradientai/types/shared/snapshots.py similarity index 100% rename from src/do_gradientai/types/shared/snapshots.py rename to src/gradientai/types/shared/snapshots.py diff --git a/src/do_gradientai/types/shared/subscription.py b/src/gradientai/types/shared/subscription.py similarity index 100% rename from src/do_gradientai/types/shared/subscription.py rename to src/gradientai/types/shared/subscription.py diff --git a/src/do_gradientai/types/shared/subscription_tier_base.py b/src/gradientai/types/shared/subscription_tier_base.py similarity index 100% rename from src/do_gradientai/types/shared/subscription_tier_base.py rename to src/gradientai/types/shared/subscription_tier_base.py diff --git a/src/do_gradientai/types/shared/vpc_peering.py b/src/gradientai/types/shared/vpc_peering.py similarity index 100% rename from src/do_gradientai/types/shared/vpc_peering.py rename to src/gradientai/types/shared/vpc_peering.py diff --git a/src/do_gradientai/types/shared_params/__init__.py b/src/gradientai/types/shared_params/__init__.py similarity index 100% rename from src/do_gradientai/types/shared_params/__init__.py rename to src/gradientai/types/shared_params/__init__.py diff --git a/src/do_gradientai/types/shared_params/firewall_rule_target.py b/src/gradientai/types/shared_params/firewall_rule_target.py similarity index 100% rename from src/do_gradientai/types/shared_params/firewall_rule_target.py rename to src/gradientai/types/shared_params/firewall_rule_target.py diff --git a/src/do_gradientai/types/size_list_params.py b/src/gradientai/types/size_list_params.py similarity index 100% rename from src/do_gradientai/types/size_list_params.py rename to src/gradientai/types/size_list_params.py diff --git a/src/do_gradientai/types/size_list_response.py b/src/gradientai/types/size_list_response.py similarity index 100% rename from src/do_gradientai/types/size_list_response.py rename to src/gradientai/types/size_list_response.py diff --git a/src/do_gradientai/types/snapshot_list_params.py b/src/gradientai/types/snapshot_list_params.py similarity index 100% rename from src/do_gradientai/types/snapshot_list_params.py rename to src/gradientai/types/snapshot_list_params.py diff --git a/src/do_gradientai/types/snapshot_list_response.py b/src/gradientai/types/snapshot_list_response.py similarity index 100% rename from src/do_gradientai/types/snapshot_list_response.py rename to src/gradientai/types/snapshot_list_response.py diff --git a/src/do_gradientai/types/snapshot_retrieve_response.py b/src/gradientai/types/snapshot_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/snapshot_retrieve_response.py rename to src/gradientai/types/snapshot_retrieve_response.py diff --git a/src/do_gradientai/types/sticky_sessions.py b/src/gradientai/types/sticky_sessions.py similarity index 100% rename from src/do_gradientai/types/sticky_sessions.py rename to src/gradientai/types/sticky_sessions.py diff --git a/src/do_gradientai/types/sticky_sessions_param.py b/src/gradientai/types/sticky_sessions_param.py similarity index 100% rename from src/do_gradientai/types/sticky_sessions_param.py rename to src/gradientai/types/sticky_sessions_param.py diff --git a/src/do_gradientai/types/volume_create_params.py b/src/gradientai/types/volume_create_params.py similarity index 100% rename from src/do_gradientai/types/volume_create_params.py rename to src/gradientai/types/volume_create_params.py diff --git a/src/do_gradientai/types/volume_create_response.py b/src/gradientai/types/volume_create_response.py similarity index 100% rename from src/do_gradientai/types/volume_create_response.py rename to src/gradientai/types/volume_create_response.py diff --git a/src/do_gradientai/types/volume_delete_by_name_params.py b/src/gradientai/types/volume_delete_by_name_params.py similarity index 100% rename from src/do_gradientai/types/volume_delete_by_name_params.py rename to src/gradientai/types/volume_delete_by_name_params.py diff --git a/src/do_gradientai/types/volume_list_params.py b/src/gradientai/types/volume_list_params.py similarity index 100% rename from src/do_gradientai/types/volume_list_params.py rename to src/gradientai/types/volume_list_params.py diff --git a/src/do_gradientai/types/volume_list_response.py b/src/gradientai/types/volume_list_response.py similarity index 100% rename from src/do_gradientai/types/volume_list_response.py rename to src/gradientai/types/volume_list_response.py diff --git a/src/do_gradientai/types/volume_retrieve_response.py b/src/gradientai/types/volume_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/volume_retrieve_response.py rename to src/gradientai/types/volume_retrieve_response.py diff --git a/src/do_gradientai/types/volumes/__init__.py b/src/gradientai/types/volumes/__init__.py similarity index 100% rename from src/do_gradientai/types/volumes/__init__.py rename to src/gradientai/types/volumes/__init__.py diff --git a/src/do_gradientai/types/volumes/action_initiate_by_id_params.py b/src/gradientai/types/volumes/action_initiate_by_id_params.py similarity index 100% rename from src/do_gradientai/types/volumes/action_initiate_by_id_params.py rename to src/gradientai/types/volumes/action_initiate_by_id_params.py diff --git a/src/do_gradientai/types/volumes/action_initiate_by_id_response.py b/src/gradientai/types/volumes/action_initiate_by_id_response.py similarity index 100% rename from src/do_gradientai/types/volumes/action_initiate_by_id_response.py rename to src/gradientai/types/volumes/action_initiate_by_id_response.py diff --git a/src/do_gradientai/types/volumes/action_initiate_by_name_params.py b/src/gradientai/types/volumes/action_initiate_by_name_params.py similarity index 100% rename from src/do_gradientai/types/volumes/action_initiate_by_name_params.py rename to src/gradientai/types/volumes/action_initiate_by_name_params.py diff --git a/src/do_gradientai/types/volumes/action_initiate_by_name_response.py b/src/gradientai/types/volumes/action_initiate_by_name_response.py similarity index 100% rename from src/do_gradientai/types/volumes/action_initiate_by_name_response.py rename to src/gradientai/types/volumes/action_initiate_by_name_response.py diff --git a/src/do_gradientai/types/volumes/action_list_params.py b/src/gradientai/types/volumes/action_list_params.py similarity index 100% rename from src/do_gradientai/types/volumes/action_list_params.py rename to src/gradientai/types/volumes/action_list_params.py diff --git a/src/do_gradientai/types/volumes/action_list_response.py b/src/gradientai/types/volumes/action_list_response.py similarity index 100% rename from src/do_gradientai/types/volumes/action_list_response.py rename to src/gradientai/types/volumes/action_list_response.py diff --git a/src/do_gradientai/types/volumes/action_retrieve_params.py b/src/gradientai/types/volumes/action_retrieve_params.py similarity index 100% rename from src/do_gradientai/types/volumes/action_retrieve_params.py rename to src/gradientai/types/volumes/action_retrieve_params.py diff --git a/src/do_gradientai/types/volumes/action_retrieve_response.py b/src/gradientai/types/volumes/action_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/volumes/action_retrieve_response.py rename to src/gradientai/types/volumes/action_retrieve_response.py diff --git a/src/do_gradientai/types/volumes/snapshot_create_params.py b/src/gradientai/types/volumes/snapshot_create_params.py similarity index 100% rename from src/do_gradientai/types/volumes/snapshot_create_params.py rename to src/gradientai/types/volumes/snapshot_create_params.py diff --git a/src/do_gradientai/types/volumes/snapshot_create_response.py b/src/gradientai/types/volumes/snapshot_create_response.py similarity index 100% rename from src/do_gradientai/types/volumes/snapshot_create_response.py rename to src/gradientai/types/volumes/snapshot_create_response.py diff --git a/src/do_gradientai/types/volumes/snapshot_list_params.py b/src/gradientai/types/volumes/snapshot_list_params.py similarity index 100% rename from src/do_gradientai/types/volumes/snapshot_list_params.py rename to src/gradientai/types/volumes/snapshot_list_params.py diff --git a/src/do_gradientai/types/volumes/snapshot_list_response.py b/src/gradientai/types/volumes/snapshot_list_response.py similarity index 100% rename from src/do_gradientai/types/volumes/snapshot_list_response.py rename to src/gradientai/types/volumes/snapshot_list_response.py diff --git a/src/do_gradientai/types/volumes/snapshot_retrieve_response.py b/src/gradientai/types/volumes/snapshot_retrieve_response.py similarity index 100% rename from src/do_gradientai/types/volumes/snapshot_retrieve_response.py rename to src/gradientai/types/volumes/snapshot_retrieve_response.py diff --git a/src/do_gradientai/types/volumes/volume_action.py b/src/gradientai/types/volumes/volume_action.py similarity index 100% rename from src/do_gradientai/types/volumes/volume_action.py rename to src/gradientai/types/volumes/volume_action.py diff --git a/tests/api_resources/account/test_keys.py b/tests/api_resources/account/test_keys.py index 38318d6a..d123c774 100644 --- a/tests/api_resources/account/test_keys.py +++ b/tests/api_resources/account/test_keys.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.account import ( +from gradientai.types.account import ( KeyListResponse, KeyCreateResponse, KeyUpdateResponse, diff --git a/tests/api_resources/agents/chat/test_completions.py b/tests/api_resources/agents/chat/test_completions.py index 2ec29fc3..6533a423 100644 --- a/tests/api_resources/agents/chat/test_completions.py +++ b/tests/api_resources/agents/chat/test_completions.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents.chat import CompletionCreateResponse +from gradientai.types.agents.chat import CompletionCreateResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/agents/evaluation_metrics/test_models.py b/tests/api_resources/agents/evaluation_metrics/test_models.py index 27ab4a27..6b8f8bc7 100644 --- a/tests/api_resources/agents/evaluation_metrics/test_models.py +++ b/tests/api_resources/agents/evaluation_metrics/test_models.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents.evaluation_metrics import ModelListResponse +from gradientai.types.agents.evaluation_metrics import ModelListResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/agents/evaluation_metrics/test_workspaces.py b/tests/api_resources/agents/evaluation_metrics/test_workspaces.py index 2728393e..ea39c474 100644 --- a/tests/api_resources/agents/evaluation_metrics/test_workspaces.py +++ b/tests/api_resources/agents/evaluation_metrics/test_workspaces.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents.evaluation_metrics import ( +from gradientai.types.agents.evaluation_metrics import ( WorkspaceListResponse, WorkspaceCreateResponse, WorkspaceDeleteResponse, diff --git a/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py b/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py index 37d39018..635721b3 100644 --- a/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py +++ b/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents.evaluation_metrics.workspaces import ( +from gradientai.types.agents.evaluation_metrics.workspaces import ( AgentListResponse, AgentMoveResponse, ) diff --git a/tests/api_resources/agents/test_api_keys.py b/tests/api_resources/agents/test_api_keys.py index 1e5275fe..c29511f5 100644 --- a/tests/api_resources/agents/test_api_keys.py +++ b/tests/api_resources/agents/test_api_keys.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents import ( +from gradientai.types.agents import ( APIKeyListResponse, APIKeyCreateResponse, APIKeyDeleteResponse, diff --git a/tests/api_resources/agents/test_evaluation_datasets.py b/tests/api_resources/agents/test_evaluation_datasets.py index 56edd598..0413591e 100644 --- a/tests/api_resources/agents/test_evaluation_datasets.py +++ b/tests/api_resources/agents/test_evaluation_datasets.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents import ( +from gradientai.types.agents import ( EvaluationDatasetCreateResponse, EvaluationDatasetCreateFileUploadPresignedURLsResponse, ) diff --git a/tests/api_resources/agents/test_evaluation_metrics.py b/tests/api_resources/agents/test_evaluation_metrics.py index 303d85d6..d64367ae 100644 --- a/tests/api_resources/agents/test_evaluation_metrics.py +++ b/tests/api_resources/agents/test_evaluation_metrics.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents import ( +from gradientai.types.agents import ( EvaluationMetricListResponse, EvaluationMetricListRegionsResponse, ) diff --git a/tests/api_resources/agents/test_evaluation_runs.py b/tests/api_resources/agents/test_evaluation_runs.py index 9d443f16..2ea44e6b 100644 --- a/tests/api_resources/agents/test_evaluation_runs.py +++ b/tests/api_resources/agents/test_evaluation_runs.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents import ( +from gradientai.types.agents import ( EvaluationRunCreateResponse, EvaluationRunRetrieveResponse, EvaluationRunListResultsResponse, diff --git a/tests/api_resources/agents/test_evaluation_test_cases.py b/tests/api_resources/agents/test_evaluation_test_cases.py index ae986abc..e9083ba3 100644 --- a/tests/api_resources/agents/test_evaluation_test_cases.py +++ b/tests/api_resources/agents/test_evaluation_test_cases.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents import ( +from gradientai.types.agents import ( EvaluationTestCaseListResponse, EvaluationTestCaseCreateResponse, EvaluationTestCaseUpdateResponse, diff --git a/tests/api_resources/agents/test_functions.py b/tests/api_resources/agents/test_functions.py index 624446e0..4390d1d2 100644 --- a/tests/api_resources/agents/test_functions.py +++ b/tests/api_resources/agents/test_functions.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents import ( +from gradientai.types.agents import ( FunctionCreateResponse, FunctionDeleteResponse, FunctionUpdateResponse, diff --git a/tests/api_resources/agents/test_knowledge_bases.py b/tests/api_resources/agents/test_knowledge_bases.py index 7ac99316..2ac20d89 100644 --- a/tests/api_resources/agents/test_knowledge_bases.py +++ b/tests/api_resources/agents/test_knowledge_bases.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse +from gradientai.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/agents/test_routes.py b/tests/api_resources/agents/test_routes.py index 256a4757..d04e8c90 100644 --- a/tests/api_resources/agents/test_routes.py +++ b/tests/api_resources/agents/test_routes.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents import ( +from gradientai.types.agents import ( RouteAddResponse, RouteViewResponse, RouteDeleteResponse, diff --git a/tests/api_resources/agents/test_versions.py b/tests/api_resources/agents/test_versions.py index 158856ed..d6151470 100644 --- a/tests/api_resources/agents/test_versions.py +++ b/tests/api_resources/agents/test_versions.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.agents import ( +from gradientai.types.agents import ( VersionListResponse, VersionUpdateResponse, ) diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index 95b02106..46c8b431 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.chat import CompletionCreateResponse +from gradientai.types.chat import CompletionCreateResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/droplets/test_actions.py b/tests/api_resources/droplets/test_actions.py index e5696d6c..33ecb60c 100644 --- a/tests/api_resources/droplets/test_actions.py +++ b/tests/api_resources/droplets/test_actions.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.droplets import ( +from gradientai.types.droplets import ( ActionListResponse, ActionInitiateResponse, ActionRetrieveResponse, diff --git a/tests/api_resources/droplets/test_autoscale.py b/tests/api_resources/droplets/test_autoscale.py index 4f6ce219..c1865864 100644 --- a/tests/api_resources/droplets/test_autoscale.py +++ b/tests/api_resources/droplets/test_autoscale.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.droplets import ( +from gradientai.types.droplets import ( AutoscaleListResponse, AutoscaleCreateResponse, AutoscaleUpdateResponse, diff --git a/tests/api_resources/droplets/test_backups.py b/tests/api_resources/droplets/test_backups.py index abb95c19..f1c18a5f 100644 --- a/tests/api_resources/droplets/test_backups.py +++ b/tests/api_resources/droplets/test_backups.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.droplets import ( +from gradientai.types.droplets import ( BackupListResponse, BackupListPoliciesResponse, BackupRetrievePolicyResponse, diff --git a/tests/api_resources/droplets/test_destroy_with_associated_resources.py b/tests/api_resources/droplets/test_destroy_with_associated_resources.py index 9ad3c1dc..491de054 100644 --- a/tests/api_resources/droplets/test_destroy_with_associated_resources.py +++ b/tests/api_resources/droplets/test_destroy_with_associated_resources.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.droplets import ( +from gradientai.types.droplets import ( DestroyWithAssociatedResourceListResponse, DestroyWithAssociatedResourceCheckStatusResponse, ) diff --git a/tests/api_resources/firewalls/test_droplets.py b/tests/api_resources/firewalls/test_droplets.py index 3fb117f9..3df04735 100644 --- a/tests/api_resources/firewalls/test_droplets.py +++ b/tests/api_resources/firewalls/test_droplets.py @@ -7,7 +7,7 @@ import pytest -from do_gradientai import GradientAI, AsyncGradientAI +from gradientai import GradientAI, AsyncGradientAI base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/firewalls/test_rules.py b/tests/api_resources/firewalls/test_rules.py index 6f82e253..2f8a7de0 100644 --- a/tests/api_resources/firewalls/test_rules.py +++ b/tests/api_resources/firewalls/test_rules.py @@ -7,7 +7,7 @@ import pytest -from do_gradientai import GradientAI, AsyncGradientAI +from gradientai import GradientAI, AsyncGradientAI base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/firewalls/test_tags.py b/tests/api_resources/firewalls/test_tags.py index 04aa0975..68c8b107 100644 --- a/tests/api_resources/firewalls/test_tags.py +++ b/tests/api_resources/firewalls/test_tags.py @@ -7,7 +7,7 @@ import pytest -from do_gradientai import GradientAI, AsyncGradientAI +from gradientai import GradientAI, AsyncGradientAI base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/floating_ips/test_actions.py b/tests/api_resources/floating_ips/test_actions.py index e2e3c45e..a89a739c 100644 --- a/tests/api_resources/floating_ips/test_actions.py +++ b/tests/api_resources/floating_ips/test_actions.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.floating_ips import ( +from gradientai.types.floating_ips import ( ActionListResponse, ActionCreateResponse, ActionRetrieveResponse, diff --git a/tests/api_resources/images/test_actions.py b/tests/api_resources/images/test_actions.py index 93603a0a..8006bee1 100644 --- a/tests/api_resources/images/test_actions.py +++ b/tests/api_resources/images/test_actions.py @@ -7,10 +7,10 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.images import ActionListResponse -from do_gradientai.types.shared import Action +from gradientai.types.images import ActionListResponse +from gradientai.types.shared import Action base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/inference/test_api_keys.py b/tests/api_resources/inference/test_api_keys.py index 85ad49da..157a2e3d 100644 --- a/tests/api_resources/inference/test_api_keys.py +++ b/tests/api_resources/inference/test_api_keys.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.inference import ( +from gradientai.types.inference import ( APIKeyListResponse, APIKeyCreateResponse, APIKeyDeleteResponse, diff --git a/tests/api_resources/knowledge_bases/test_data_sources.py b/tests/api_resources/knowledge_bases/test_data_sources.py index ebb0841a..55b056b8 100644 --- a/tests/api_resources/knowledge_bases/test_data_sources.py +++ b/tests/api_resources/knowledge_bases/test_data_sources.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.knowledge_bases import ( +from gradientai.types.knowledge_bases import ( DataSourceListResponse, DataSourceCreateResponse, DataSourceDeleteResponse, diff --git a/tests/api_resources/knowledge_bases/test_indexing_jobs.py b/tests/api_resources/knowledge_bases/test_indexing_jobs.py index b0185941..ed32d7f8 100644 --- a/tests/api_resources/knowledge_bases/test_indexing_jobs.py +++ b/tests/api_resources/knowledge_bases/test_indexing_jobs.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.knowledge_bases import ( +from gradientai.types.knowledge_bases import ( IndexingJobListResponse, IndexingJobCreateResponse, IndexingJobRetrieveResponse, diff --git a/tests/api_resources/load_balancers/test_droplets.py b/tests/api_resources/load_balancers/test_droplets.py index cb439411..08ccf009 100644 --- a/tests/api_resources/load_balancers/test_droplets.py +++ b/tests/api_resources/load_balancers/test_droplets.py @@ -7,7 +7,7 @@ import pytest -from do_gradientai import GradientAI, AsyncGradientAI +from gradientai import GradientAI, AsyncGradientAI base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/load_balancers/test_forwarding_rules.py b/tests/api_resources/load_balancers/test_forwarding_rules.py index 2f09fa8a..3acf8287 100644 --- a/tests/api_resources/load_balancers/test_forwarding_rules.py +++ b/tests/api_resources/load_balancers/test_forwarding_rules.py @@ -7,7 +7,7 @@ import pytest -from do_gradientai import GradientAI, AsyncGradientAI +from gradientai import GradientAI, AsyncGradientAI base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/models/providers/test_anthropic.py b/tests/api_resources/models/providers/test_anthropic.py index 6b3d99a3..c61a97ea 100644 --- a/tests/api_resources/models/providers/test_anthropic.py +++ b/tests/api_resources/models/providers/test_anthropic.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.models.providers import ( +from gradientai.types.models.providers import ( AnthropicListResponse, AnthropicCreateResponse, AnthropicDeleteResponse, diff --git a/tests/api_resources/models/providers/test_openai.py b/tests/api_resources/models/providers/test_openai.py index bdde97ca..7fde1a69 100644 --- a/tests/api_resources/models/providers/test_openai.py +++ b/tests/api_resources/models/providers/test_openai.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.models.providers import ( +from gradientai.types.models.providers import ( OpenAIListResponse, OpenAICreateResponse, OpenAIDeleteResponse, diff --git a/tests/api_resources/test_account.py b/tests/api_resources/test_account.py index f2dd39bf..d6ee9b10 100644 --- a/tests/api_resources/test_account.py +++ b/tests/api_resources/test_account.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types import AccountRetrieveResponse +from gradientai.types import AccountRetrieveResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/test_agents.py b/tests/api_resources/test_agents.py index 2f68a06f..8a6a7d69 100644 --- a/tests/api_resources/test_agents.py +++ b/tests/api_resources/test_agents.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types import ( +from gradientai.types import ( AgentListResponse, AgentCreateResponse, AgentDeleteResponse, diff --git a/tests/api_resources/test_droplets.py b/tests/api_resources/test_droplets.py index e77cded1..e6d3b17d 100644 --- a/tests/api_resources/test_droplets.py +++ b/tests/api_resources/test_droplets.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types import ( +from gradientai.types import ( DropletListResponse, DropletCreateResponse, DropletRetrieveResponse, diff --git a/tests/api_resources/test_firewalls.py b/tests/api_resources/test_firewalls.py index fb41eb37..f41d1b9f 100644 --- a/tests/api_resources/test_firewalls.py +++ b/tests/api_resources/test_firewalls.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types import ( +from gradientai.types import ( FirewallListResponse, FirewallCreateResponse, FirewallUpdateResponse, diff --git a/tests/api_resources/test_floating_ips.py b/tests/api_resources/test_floating_ips.py index 40904ab8..c9119fb9 100644 --- a/tests/api_resources/test_floating_ips.py +++ b/tests/api_resources/test_floating_ips.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types import ( +from gradientai.types import ( FloatingIPListResponse, FloatingIPCreateResponse, FloatingIPRetrieveResponse, diff --git a/tests/api_resources/test_images.py b/tests/api_resources/test_images.py index 4ca6ee2d..1da2a301 100644 --- a/tests/api_resources/test_images.py +++ b/tests/api_resources/test_images.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types import ( +from gradientai.types import ( ImageListResponse, ImageCreateResponse, ImageUpdateResponse, diff --git a/tests/api_resources/test_knowledge_bases.py b/tests/api_resources/test_knowledge_bases.py index c4d179cc..8a331b52 100644 --- a/tests/api_resources/test_knowledge_bases.py +++ b/tests/api_resources/test_knowledge_bases.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types import ( +from gradientai.types import ( KnowledgeBaseListResponse, KnowledgeBaseCreateResponse, KnowledgeBaseDeleteResponse, diff --git a/tests/api_resources/test_load_balancers.py b/tests/api_resources/test_load_balancers.py index 257636b8..6beb02fc 100644 --- a/tests/api_resources/test_load_balancers.py +++ b/tests/api_resources/test_load_balancers.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types import ( +from gradientai.types import ( LoadBalancerListResponse, LoadBalancerCreateResponse, LoadBalancerUpdateResponse, diff --git a/tests/api_resources/test_models.py b/tests/api_resources/test_models.py index 803c5d5a..fe837973 100644 --- a/tests/api_resources/test_models.py +++ b/tests/api_resources/test_models.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types import ModelListResponse, ModelRetrieveResponse +from gradientai.types import ModelListResponse, ModelRetrieveResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/test_regions.py b/tests/api_resources/test_regions.py index f331342e..4f232293 100644 --- a/tests/api_resources/test_regions.py +++ b/tests/api_resources/test_regions.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types import RegionListResponse +from gradientai.types import RegionListResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/test_sizes.py b/tests/api_resources/test_sizes.py index 4e73485e..ea03f23b 100644 --- a/tests/api_resources/test_sizes.py +++ b/tests/api_resources/test_sizes.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types import SizeListResponse +from gradientai.types import SizeListResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/test_snapshots.py b/tests/api_resources/test_snapshots.py index 9910158a..5535fef1 100644 --- a/tests/api_resources/test_snapshots.py +++ b/tests/api_resources/test_snapshots.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types import SnapshotListResponse, SnapshotRetrieveResponse +from gradientai.types import SnapshotListResponse, SnapshotRetrieveResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/test_volumes.py b/tests/api_resources/test_volumes.py index 38d9cd91..1848bdfb 100644 --- a/tests/api_resources/test_volumes.py +++ b/tests/api_resources/test_volumes.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types import ( +from gradientai.types import ( VolumeListResponse, VolumeCreateResponse, VolumeRetrieveResponse, diff --git a/tests/api_resources/volumes/test_actions.py b/tests/api_resources/volumes/test_actions.py index 5b9c2786..e13b3a58 100644 --- a/tests/api_resources/volumes/test_actions.py +++ b/tests/api_resources/volumes/test_actions.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.volumes import ( +from gradientai.types.volumes import ( ActionListResponse, ActionRetrieveResponse, ActionInitiateByIDResponse, diff --git a/tests/api_resources/volumes/test_snapshots.py b/tests/api_resources/volumes/test_snapshots.py index dccca462..21ef565b 100644 --- a/tests/api_resources/volumes/test_snapshots.py +++ b/tests/api_resources/volumes/test_snapshots.py @@ -7,9 +7,9 @@ import pytest +from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai.types.volumes import ( +from gradientai.types.volumes import ( SnapshotListResponse, SnapshotCreateResponse, SnapshotRetrieveResponse, diff --git a/tests/conftest.py b/tests/conftest.py index d61eb8b7..a5fb13ce 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -10,15 +10,15 @@ import pytest from pytest_asyncio import is_async_test -from do_gradientai import GradientAI, AsyncGradientAI, DefaultAioHttpClient -from do_gradientai._utils import is_dict +from gradientai import GradientAI, AsyncGradientAI, DefaultAioHttpClient +from gradientai._utils import is_dict if TYPE_CHECKING: from _pytest.fixtures import FixtureRequest # pyright: ignore[reportPrivateImportUsage] pytest.register_assert_rewrite("tests.utils") -logging.getLogger("do_gradientai").setLevel(logging.DEBUG) +logging.getLogger("gradientai").setLevel(logging.DEBUG) # automatically add `pytest.mark.asyncio()` to all of our async tests diff --git a/tests/test_client.py b/tests/test_client.py index e88c4544..a9d8d9b7 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -21,12 +21,12 @@ from respx import MockRouter from pydantic import ValidationError -from do_gradientai import GradientAI, AsyncGradientAI, APIResponseValidationError -from do_gradientai._types import Omit -from do_gradientai._models import BaseModel, FinalRequestOptions -from do_gradientai._streaming import Stream, AsyncStream -from do_gradientai._exceptions import APIStatusError, APITimeoutError, APIResponseValidationError -from do_gradientai._base_client import ( +from gradientai import GradientAI, AsyncGradientAI, APIResponseValidationError +from gradientai._types import Omit +from gradientai._models import BaseModel, FinalRequestOptions +from gradientai._streaming import Stream, AsyncStream +from gradientai._exceptions import APIStatusError, APITimeoutError, APIResponseValidationError +from gradientai._base_client import ( DEFAULT_TIMEOUT, HTTPX_DEFAULT_TIMEOUT, BaseClient, @@ -259,10 +259,10 @@ def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.Statistic # to_raw_response_wrapper leaks through the @functools.wraps() decorator. # # removing the decorator fixes the leak for reasons we don't understand. - "do_gradientai/_legacy_response.py", - "do_gradientai/_response.py", + "gradientai/_legacy_response.py", + "gradientai/_response.py", # pydantic.BaseModel.model_dump || pydantic.BaseModel.dict leak memory for some reason. - "do_gradientai/_compat.py", + "gradientai/_compat.py", # Standard library leaks we don't care about. "/logging/__init__.py", ] @@ -873,7 +873,7 @@ def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str calculated = client._calculate_retry_timeout(remaining_retries, options, headers) assert calculated == pytest.approx(timeout, 0.5 * 0.875) # pyright: ignore[reportUnknownMemberType] - @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, client: GradientAI) -> None: respx_mock.post("/chat/completions").mock(side_effect=httpx.TimeoutException("Test timeout error")) @@ -891,7 +891,7 @@ def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, clien assert _get_open_connections(self.client) == 0 - @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, client: GradientAI) -> None: respx_mock.post("/chat/completions").mock(return_value=httpx.Response(500)) @@ -909,7 +909,7 @@ def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, client assert _get_open_connections(self.client) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) @pytest.mark.parametrize("failure_mode", ["status", "exception"]) def test_retries_taken( @@ -948,7 +948,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) def test_omit_retry_count_header( self, client: GradientAI, failures_before_success: int, respx_mock: MockRouter @@ -980,7 +980,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) def test_overwrite_retry_count_header( self, client: GradientAI, failures_before_success: int, respx_mock: MockRouter @@ -1261,10 +1261,10 @@ def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.Statistic # to_raw_response_wrapper leaks through the @functools.wraps() decorator. # # removing the decorator fixes the leak for reasons we don't understand. - "do_gradientai/_legacy_response.py", - "do_gradientai/_response.py", + "gradientai/_legacy_response.py", + "gradientai/_response.py", # pydantic.BaseModel.model_dump || pydantic.BaseModel.dict leak memory for some reason. - "do_gradientai/_compat.py", + "gradientai/_compat.py", # Standard library leaks we don't care about. "/logging/__init__.py", ] @@ -1880,7 +1880,7 @@ async def test_parse_retry_after_header(self, remaining_retries: int, retry_afte calculated = client._calculate_retry_timeout(remaining_retries, options, headers) assert calculated == pytest.approx(timeout, 0.5 * 0.875) # pyright: ignore[reportUnknownMemberType] - @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) async def test_retrying_timeout_errors_doesnt_leak( self, respx_mock: MockRouter, async_client: AsyncGradientAI @@ -1900,7 +1900,7 @@ async def test_retrying_timeout_errors_doesnt_leak( assert _get_open_connections(self.client) == 0 - @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) async def test_retrying_status_errors_doesnt_leak( self, respx_mock: MockRouter, async_client: AsyncGradientAI @@ -1920,7 +1920,7 @@ async def test_retrying_status_errors_doesnt_leak( assert _get_open_connections(self.client) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) @pytest.mark.asyncio @pytest.mark.parametrize("failure_mode", ["status", "exception"]) @@ -1960,7 +1960,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) @pytest.mark.asyncio async def test_omit_retry_count_header( @@ -1993,7 +1993,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) @pytest.mark.asyncio async def test_overwrite_retry_count_header( @@ -2036,8 +2036,8 @@ def test_get_platform(self) -> None: import nest_asyncio import threading - from do_gradientai._utils import asyncify - from do_gradientai._base_client import get_platform + from gradientai._utils import asyncify + from gradientai._base_client import get_platform async def test_main() -> None: result = await asyncify(get_platform)() diff --git a/tests/test_deepcopy.py b/tests/test_deepcopy.py index 5a98ce1b..9d1579a8 100644 --- a/tests/test_deepcopy.py +++ b/tests/test_deepcopy.py @@ -1,4 +1,4 @@ -from do_gradientai._utils import deepcopy_minimal +from gradientai._utils import deepcopy_minimal def assert_different_identities(obj1: object, obj2: object) -> None: diff --git a/tests/test_extract_files.py b/tests/test_extract_files.py index 341e65ae..2905d59c 100644 --- a/tests/test_extract_files.py +++ b/tests/test_extract_files.py @@ -4,8 +4,8 @@ import pytest -from do_gradientai._types import FileTypes -from do_gradientai._utils import extract_files +from gradientai._types import FileTypes +from gradientai._utils import extract_files def test_removes_files_from_input() -> None: diff --git a/tests/test_files.py b/tests/test_files.py index ff7914bb..4a723313 100644 --- a/tests/test_files.py +++ b/tests/test_files.py @@ -4,7 +4,7 @@ import pytest from dirty_equals import IsDict, IsList, IsBytes, IsTuple -from do_gradientai._files import to_httpx_files, async_to_httpx_files +from gradientai._files import to_httpx_files, async_to_httpx_files readme_path = Path(__file__).parent.parent.joinpath("README.md") diff --git a/tests/test_models.py b/tests/test_models.py index bfbef61a..3a857584 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -7,9 +7,9 @@ import pydantic from pydantic import Field -from do_gradientai._utils import PropertyInfo -from do_gradientai._compat import PYDANTIC_V2, parse_obj, model_dump, model_json -from do_gradientai._models import BaseModel, construct_type +from gradientai._utils import PropertyInfo +from gradientai._compat import PYDANTIC_V2, parse_obj, model_dump, model_json +from gradientai._models import BaseModel, construct_type class BasicModel(BaseModel): diff --git a/tests/test_qs.py b/tests/test_qs.py index c9213571..9080377b 100644 --- a/tests/test_qs.py +++ b/tests/test_qs.py @@ -4,7 +4,7 @@ import pytest -from do_gradientai._qs import Querystring, stringify +from gradientai._qs import Querystring, stringify def test_empty() -> None: diff --git a/tests/test_required_args.py b/tests/test_required_args.py index 434e9491..c4e6b9d8 100644 --- a/tests/test_required_args.py +++ b/tests/test_required_args.py @@ -2,7 +2,7 @@ import pytest -from do_gradientai._utils import required_args +from gradientai._utils import required_args def test_too_many_positional_params() -> None: diff --git a/tests/test_response.py b/tests/test_response.py index 001ce776..1a8f241e 100644 --- a/tests/test_response.py +++ b/tests/test_response.py @@ -6,8 +6,8 @@ import pytest import pydantic -from do_gradientai import BaseModel, GradientAI, AsyncGradientAI -from do_gradientai._response import ( +from gradientai import BaseModel, GradientAI, AsyncGradientAI +from gradientai._response import ( APIResponse, BaseAPIResponse, AsyncAPIResponse, @@ -15,8 +15,8 @@ AsyncBinaryAPIResponse, extract_response_type, ) -from do_gradientai._streaming import Stream -from do_gradientai._base_client import FinalRequestOptions +from gradientai._streaming import Stream +from gradientai._base_client import FinalRequestOptions class ConcreteBaseAPIResponse(APIResponse[bytes]): ... @@ -37,7 +37,7 @@ def test_extract_response_type_direct_classes() -> None: def test_extract_response_type_direct_class_missing_type_arg() -> None: with pytest.raises( RuntimeError, - match="Expected type to have a type argument at index 0 but it did not", + match="Expected type to have a type argument at index 0 but it did not", ): extract_response_type(AsyncAPIResponse) @@ -68,7 +68,7 @@ def test_response_parse_mismatched_basemodel(client: GradientAI) -> None: with pytest.raises( TypeError, - match="Pydantic models must subclass our base model type, e.g. `from do_gradientai import BaseModel`", + match="Pydantic models must subclass our base model type, e.g. `from gradientai import BaseModel`", ): response.parse(to=PydanticModel) @@ -86,7 +86,7 @@ async def test_async_response_parse_mismatched_basemodel(async_client: AsyncGrad with pytest.raises( TypeError, - match="Pydantic models must subclass our base model type, e.g. `from do_gradientai import BaseModel`", + match="Pydantic models must subclass our base model type, e.g. `from gradientai import BaseModel`", ): await response.parse(to=PydanticModel) diff --git a/tests/test_streaming.py b/tests/test_streaming.py index c1ce8e85..cdb41a77 100644 --- a/tests/test_streaming.py +++ b/tests/test_streaming.py @@ -5,8 +5,8 @@ import httpx import pytest -from do_gradientai import GradientAI, AsyncGradientAI -from do_gradientai._streaming import Stream, AsyncStream, ServerSentEvent +from gradientai import GradientAI, AsyncGradientAI +from gradientai._streaming import Stream, AsyncStream, ServerSentEvent @pytest.mark.asyncio diff --git a/tests/test_transform.py b/tests/test_transform.py index 30c06d6a..825fe048 100644 --- a/tests/test_transform.py +++ b/tests/test_transform.py @@ -8,15 +8,15 @@ import pytest -from do_gradientai._types import NOT_GIVEN, Base64FileInput -from do_gradientai._utils import ( +from gradientai._types import NOT_GIVEN, Base64FileInput +from gradientai._utils import ( PropertyInfo, transform as _transform, parse_datetime, async_transform as _async_transform, ) -from do_gradientai._compat import PYDANTIC_V2 -from do_gradientai._models import BaseModel +from gradientai._compat import PYDANTIC_V2 +from gradientai._models import BaseModel _T = TypeVar("_T") diff --git a/tests/test_utils/test_proxy.py b/tests/test_utils/test_proxy.py index 9ce2e0d3..3856b2c9 100644 --- a/tests/test_utils/test_proxy.py +++ b/tests/test_utils/test_proxy.py @@ -2,7 +2,7 @@ from typing import Any from typing_extensions import override -from do_gradientai._utils import LazyProxy +from gradientai._utils import LazyProxy class RecursiveLazyProxy(LazyProxy[Any]): diff --git a/tests/test_utils/test_typing.py b/tests/test_utils/test_typing.py index c9129fdc..66ad064f 100644 --- a/tests/test_utils/test_typing.py +++ b/tests/test_utils/test_typing.py @@ -2,7 +2,7 @@ from typing import Generic, TypeVar, cast -from do_gradientai._utils import extract_type_var_from_base +from gradientai._utils import extract_type_var_from_base _T = TypeVar("_T") _T2 = TypeVar("_T2") diff --git a/tests/utils.py b/tests/utils.py index 9def7c60..b539ed2c 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -8,8 +8,8 @@ from datetime import date, datetime from typing_extensions import Literal, get_args, get_origin, assert_type -from do_gradientai._types import Omit, NoneType -from do_gradientai._utils import ( +from gradientai._types import Omit, NoneType +from gradientai._utils import ( is_dict, is_list, is_list_type, @@ -18,8 +18,8 @@ is_annotated_type, is_type_alias_type, ) -from do_gradientai._compat import PYDANTIC_V2, field_outer_type, get_model_fields -from do_gradientai._models import BaseModel +from gradientai._compat import PYDANTIC_V2, field_outer_type, get_model_fields +from gradientai._models import BaseModel BaseModelT = TypeVar("BaseModelT", bound=BaseModel) From c4b2e95ca626c201d4431dc6e73f3d375156d64d Mon Sep 17 00:00:00 2001 From: meorphis Date: Thu, 17 Jul 2025 19:11:34 -0400 Subject: [PATCH 02/19] Revert "feat(api): update via SDK Studio" This reverts commit 5475a9460676d1c48e99e0d1e75e50de7caecf3a. --- .stats.yml | 8 +- api.md | 463 +--- src/gradientai/resources/__init__.py | 126 - src/gradientai/resources/account/__init__.py | 33 - src/gradientai/resources/account/account.py | 173 -- src/gradientai/resources/account/keys.py | 588 ----- src/gradientai/resources/agents/agents.py | 118 +- src/gradientai/resources/agents/api_keys.py | 28 +- .../resources/agents/chat/completions.py | 98 - .../agents/evaluation_metrics/__init__.py | 14 - .../evaluation_metrics/evaluation_metrics.py | 143 -- .../agents/evaluation_metrics/models.py | 254 -- .../evaluation_metrics/workspaces/agents.py | 24 +- .../workspaces/workspaces.py | 20 - .../resources/agents/evaluation_runs.py | 42 +- .../resources/agents/evaluation_test_cases.py | 12 +- src/gradientai/resources/agents/functions.py | 60 - src/gradientai/resources/agents/routes.py | 24 - src/gradientai/resources/agents/versions.py | 16 +- src/gradientai/resources/chat/completions.py | 98 - src/gradientai/resources/droplets/__init__.py | 75 - src/gradientai/resources/droplets/actions.py | 2048 --------------- .../resources/droplets/autoscale.py | 967 -------- src/gradientai/resources/droplets/backups.py | 460 ---- .../destroy_with_associated_resources.py | 622 ----- src/gradientai/resources/droplets/droplets.py | 1748 ------------- .../resources/firewalls/__init__.py | 61 - .../resources/firewalls/droplets.py | 296 --- .../resources/firewalls/firewalls.py | 647 ----- src/gradientai/resources/firewalls/rules.py | 320 --- src/gradientai/resources/firewalls/tags.py | 308 --- .../resources/floating_ips/__init__.py | 33 - .../resources/floating_ips/actions.py | 489 ---- .../resources/floating_ips/floating_ips.py | 635 ----- src/gradientai/resources/images/__init__.py | 33 - src/gradientai/resources/images/actions.py | 560 ----- src/gradientai/resources/images/images.py | 867 ------- .../resources/inference/api_keys.py | 20 +- .../resources/knowledge_bases/data_sources.py | 24 +- .../knowledge_bases/indexing_jobs.py | 18 +- .../knowledge_bases/knowledge_bases.py | 28 +- .../resources/load_balancers/__init__.py | 47 - .../resources/load_balancers/droplets.py | 302 --- .../load_balancers/forwarding_rules.py | 301 --- .../load_balancers/load_balancers.py | 2205 ----------------- src/gradientai/resources/models/models.py | 182 +- .../resources/models/providers/anthropic.py | 36 +- .../resources/models/providers/openai.py | 36 +- src/gradientai/resources/regions.py | 42 +- src/gradientai/resources/sizes.py | 199 -- src/gradientai/resources/snapshots.py | 425 ---- src/gradientai/resources/volumes/__init__.py | 47 - src/gradientai/resources/volumes/actions.py | 1554 ------------ src/gradientai/resources/volumes/snapshots.py | 499 ---- src/gradientai/resources/volumes/volumes.py | 1144 --------- src/gradientai/types/__init__.py | 97 +- src/gradientai/types/account/__init__.py | 11 - .../types/account/key_create_params.py | 22 - .../types/account/key_create_response.py | 39 - .../types/account/key_list_params.py | 15 - .../types/account/key_list_response.py | 46 - .../types/account/key_retrieve_response.py | 39 - .../types/account/key_update_params.py | 15 - .../types/account/key_update_response.py | 39 - .../types/account_retrieve_response.py | 55 - src/gradientai/types/agent_create_params.py | 8 - src/gradientai/types/agent_create_response.py | 1 - src/gradientai/types/agent_delete_response.py | 1 - src/gradientai/types/agent_list_params.py | 6 +- src/gradientai/types/agent_list_response.py | 75 - .../types/agent_retrieve_response.py | 1 - src/gradientai/types/agent_update_params.py | 18 - src/gradientai/types/agent_update_response.py | 1 - .../types/agent_update_status_params.py | 11 - .../types/agent_update_status_response.py | 1 - src/gradientai/types/agents/__init__.py | 7 - .../types/agents/api_evaluation_metric.py | 16 +- .../agents/api_evaluation_metric_result.py | 17 - .../types/agents/api_evaluation_prompt.py | 7 - .../types/agents/api_evaluation_run.py | 18 - .../types/agents/api_evaluation_test_case.py | 24 +- .../types/agents/api_key_create_params.py | 2 - .../types/agents/api_key_create_response.py | 1 - .../types/agents/api_key_delete_response.py | 1 - .../types/agents/api_key_list_params.py | 4 +- .../types/agents/api_key_list_response.py | 3 - .../agents/api_key_regenerate_response.py | 1 - .../types/agents/api_key_update_params.py | 3 - .../types/agents/api_key_update_response.py | 1 - .../agents/api_link_knowledge_base_output.py | 1 - .../types/agents/api_star_metric.py | 6 - .../types/agents/api_star_metric_param.py | 6 - .../agents/chat/completion_create_params.py | 113 - .../agents/chat/completion_create_response.py | 54 +- ...reate_file_upload_presigned_urls_params.py | 1 - .../evaluation_metric_list_regions_params.py | 15 - ...evaluation_metric_list_regions_response.py | 29 - .../agents/evaluation_metrics/__init__.py | 2 - .../evaluation_metrics/model_list_response.py | 21 - .../workspace_create_params.py | 3 - .../workspace_delete_response.py | 1 - .../workspace_list_response.py | 1 - .../workspace_update_params.py | 2 - .../workspaces/agent_list_params.py | 14 +- .../workspaces/agent_list_response.py | 2 - .../workspaces/agent_move_params.py | 2 - .../agents/evaluation_run_create_params.py | 1 - .../evaluation_run_list_results_params.py | 15 - .../evaluation_run_list_results_response.py | 8 - .../evaluation_test_case_list_response.py | 4 - .../evaluation_test_case_update_params.py | 1 - .../types/agents/function_create_params.py | 7 - .../types/agents/function_create_response.py | 1 - .../types/agents/function_delete_response.py | 1 - .../types/agents/function_update_params.py | 8 - .../types/agents/function_update_response.py | 1 - .../agents/knowledge_base_detach_response.py | 1 - .../types/agents/route_add_params.py | 2 - .../types/agents/route_add_response.py | 1 - .../types/agents/route_delete_response.py | 2 - .../types/agents/route_update_params.py | 4 - .../types/agents/route_update_response.py | 2 - .../types/agents/route_view_response.py | 1 - .../types/agents/version_list_params.py | 4 +- .../types/agents/version_list_response.py | 51 +- .../types/agents/version_update_params.py | 2 - .../types/agents/version_update_response.py | 1 - src/gradientai/types/api_agent.py | 104 - .../types/api_agent_api_key_info.py | 5 - src/gradientai/types/api_agent_model.py | 14 - .../types/api_anthropic_api_key_info.py | 6 - src/gradientai/types/api_knowledge_base.py | 10 - src/gradientai/types/api_model.py | 10 - src/gradientai/types/api_model_version.py | 3 - .../types/api_openai_api_key_info.py | 7 - src/gradientai/types/api_workspace.py | 10 - .../types/chat/completion_create_params.py | 113 - .../types/chat/completion_create_response.py | 54 +- src/gradientai/types/domains.py | 22 - src/gradientai/types/domains_param.py | 22 - src/gradientai/types/droplet_backup_policy.py | 28 - .../types/droplet_backup_policy_param.py | 21 - src/gradientai/types/droplet_create_params.py | 213 -- .../types/droplet_create_response.py | 39 - .../types/droplet_delete_by_tag_params.py | 12 - .../types/droplet_list_firewalls_params.py | 15 - .../types/droplet_list_firewalls_response.py | 19 - .../types/droplet_list_kernels_params.py | 15 - .../types/droplet_list_kernels_response.py | 19 - .../types/droplet_list_neighbors_response.py | 12 - src/gradientai/types/droplet_list_params.py | 34 - src/gradientai/types/droplet_list_response.py | 19 - .../types/droplet_list_snapshots_params.py | 15 - .../types/droplet_list_snapshots_response.py | 53 - .../types/droplet_retrieve_response.py | 12 - src/gradientai/types/droplets/__init__.py | 51 - .../droplets/action_bulk_initiate_params.py | 72 - .../droplets/action_bulk_initiate_response.py | 12 - .../types/droplets/action_initiate_params.py | 278 --- .../droplets/action_initiate_response.py | 12 - .../types/droplets/action_list_params.py | 15 - .../types/droplets/action_list_response.py | 19 - .../droplets/action_retrieve_response.py | 12 - .../types/droplets/associated_resource.py | 21 - .../types/droplets/autoscale_create_params.py | 28 - .../droplets/autoscale_create_response.py | 12 - .../droplets/autoscale_list_history_params.py | 15 - .../autoscale_list_history_response.py | 48 - .../droplets/autoscale_list_members_params.py | 15 - .../autoscale_list_members_response.py | 47 - .../types/droplets/autoscale_list_params.py | 18 - .../types/droplets/autoscale_list_response.py | 19 - .../types/droplets/autoscale_pool.py | 54 - .../autoscale_pool_droplet_template.py | 69 - .../autoscale_pool_droplet_template_param.py | 84 - .../droplets/autoscale_pool_dynamic_config.py | 27 - .../autoscale_pool_dynamic_config_param.py | 27 - .../droplets/autoscale_pool_static_config.py | 10 - .../autoscale_pool_static_config_param.py | 12 - .../droplets/autoscale_retrieve_response.py | 12 - .../types/droplets/autoscale_update_params.py | 28 - .../droplets/autoscale_update_response.py | 12 - .../types/droplets/backup_list_params.py | 15 - .../droplets/backup_list_policies_params.py | 15 - .../droplets/backup_list_policies_response.py | 41 - .../types/droplets/backup_list_response.py | 53 - ...backup_list_supported_policies_response.py | 28 - .../backup_retrieve_policy_response.py | 30 - .../types/droplets/current_utilization.py | 15 - ...sociated_resource_check_status_response.py | 41 - ...ciated_resource_delete_selective_params.py | 34 - ..._with_associated_resource_list_response.py | 37 - .../droplets/destroyed_associated_resource.py | 28 - src/gradientai/types/firewall.py | 98 - .../types/firewall_create_params.py | 17 - .../types/firewall_create_response.py | 12 - src/gradientai/types/firewall_list_params.py | 15 - .../types/firewall_list_response.py | 19 - src/gradientai/types/firewall_param.py | 67 - .../types/firewall_retrieve_response.py | 12 - .../types/firewall_update_params.py | 13 - .../types/firewall_update_response.py | 12 - src/gradientai/types/firewalls/__init__.py | 10 - .../types/firewalls/droplet_add_params.py | 13 - .../types/firewalls/droplet_remove_params.py | 13 - .../types/firewalls/rule_add_params.py | 46 - .../types/firewalls/rule_remove_params.py | 46 - .../types/firewalls/tag_add_params.py | 18 - .../types/firewalls/tag_remove_params.py | 18 - src/gradientai/types/floating_ip.py | 47 - .../types/floating_ip_create_params.py | 24 - .../types/floating_ip_create_response.py | 21 - .../types/floating_ip_list_params.py | 15 - .../types/floating_ip_list_response.py | 19 - .../types/floating_ip_retrieve_response.py | 12 - src/gradientai/types/floating_ips/__init__.py | 8 - .../floating_ips/action_create_params.py | 24 - .../floating_ips/action_create_response.py | 17 - .../floating_ips/action_list_response.py | 19 - .../floating_ips/action_retrieve_response.py | 17 - src/gradientai/types/forwarding_rule.py | 49 - src/gradientai/types/forwarding_rule_param.py | 48 - src/gradientai/types/glb_settings.py | 45 - src/gradientai/types/glb_settings_param.py | 45 - src/gradientai/types/health_check.py | 49 - src/gradientai/types/health_check_param.py | 48 - src/gradientai/types/image_create_params.py | 81 - src/gradientai/types/image_create_response.py | 12 - src/gradientai/types/image_list_params.py | 27 - src/gradientai/types/image_list_response.py | 19 - .../types/image_retrieve_response.py | 10 - src/gradientai/types/image_update_params.py | 42 - src/gradientai/types/image_update_response.py | 10 - src/gradientai/types/images/__init__.py | 6 - .../types/images/action_create_params.py | 45 - .../types/images/action_list_response.py | 19 - .../types/inference/api_key_create_params.py | 1 - .../inference/api_key_create_response.py | 1 - .../inference/api_key_delete_response.py | 1 - .../types/inference/api_key_list_params.py | 4 +- .../types/inference/api_key_list_response.py | 3 - .../types/inference/api_key_update_params.py | 2 - .../api_key_update_regenerate_response.py | 1 - .../inference/api_key_update_response.py | 1 - .../types/inference/api_model_api_key_info.py | 5 - .../types/knowledge_base_create_params.py | 6 - .../types/knowledge_base_create_response.py | 1 - .../types/knowledge_base_delete_response.py | 1 - .../types/knowledge_base_list_params.py | 4 +- .../types/knowledge_base_list_response.py | 3 - .../types/knowledge_base_retrieve_response.py | 1 - .../types/knowledge_base_update_params.py | 5 +- .../types/knowledge_base_update_response.py | 1 - .../api_file_upload_data_source.py | 3 - .../api_file_upload_data_source_param.py | 3 - .../api_indexed_data_source.py | 13 - .../types/knowledge_bases/api_indexing_job.py | 7 - .../api_knowledge_base_data_source.py | 12 - .../knowledge_bases/api_spaces_data_source.py | 2 - .../api_spaces_data_source_param.py | 2 - .../knowledge_bases/aws_data_source_param.py | 4 - .../data_source_create_params.py | 4 - .../data_source_create_response.py | 1 - .../data_source_delete_response.py | 2 - .../data_source_list_params.py | 4 +- .../data_source_list_response.py | 3 - .../indexing_job_create_params.py | 5 - .../indexing_job_create_response.py | 1 - .../indexing_job_list_params.py | 4 +- .../indexing_job_list_response.py | 3 - .../indexing_job_retrieve_response.py | 1 - .../indexing_job_update_cancel_response.py | 1 - src/gradientai/types/lb_firewall.py | 21 - src/gradientai/types/lb_firewall_param.py | 22 - src/gradientai/types/load_balancer.py | 185 -- .../types/load_balancer_create_params.py | 335 --- .../types/load_balancer_create_response.py | 12 - .../types/load_balancer_list_params.py | 15 - .../types/load_balancer_list_response.py | 19 - .../types/load_balancer_retrieve_response.py | 12 - .../types/load_balancer_update_params.py | 335 --- .../types/load_balancer_update_response.py | 12 - .../types/load_balancers/__init__.py | 8 - .../load_balancers/droplet_add_params.py | 13 - .../load_balancers/droplet_remove_params.py | 13 - .../forwarding_rule_add_params.py | 14 - .../forwarding_rule_remove_params.py | 14 - .../model_list_params.py | 8 +- src/gradientai/types/model_list_response.py | 28 +- .../types/model_retrieve_response.py | 21 - .../providers/anthropic_create_params.py | 2 - .../providers/anthropic_create_response.py | 1 - .../providers/anthropic_delete_response.py | 1 - .../providers/anthropic_list_agents_params.py | 4 +- .../anthropic_list_agents_response.py | 2 - .../models/providers/anthropic_list_params.py | 4 +- .../providers/anthropic_list_response.py | 3 - .../providers/anthropic_retrieve_response.py | 1 - .../providers/anthropic_update_params.py | 3 - .../providers/anthropic_update_response.py | 1 - .../models/providers/openai_create_params.py | 2 - .../providers/openai_create_response.py | 1 - .../providers/openai_delete_response.py | 1 - .../models/providers/openai_list_params.py | 4 +- .../models/providers/openai_list_response.py | 3 - .../openai_retrieve_agents_params.py | 4 +- .../openai_retrieve_agents_response.py | 2 - .../providers/openai_retrieve_response.py | 1 - .../models/providers/openai_update_params.py | 3 - .../providers/openai_update_response.py | 1 - src/gradientai/types/region_list_params.py | 8 +- src/gradientai/types/region_list_response.py | 22 +- src/gradientai/types/shared/__init__.py | 26 - src/gradientai/types/shared/action.py | 51 - src/gradientai/types/shared/action_link.py | 18 - src/gradientai/types/shared/api_links.py | 5 - src/gradientai/types/shared/api_meta.py | 3 - src/gradientai/types/shared/backward_links.py | 15 - .../types/shared/chat_completion_chunk.py | 55 +- .../types/shared/completion_usage.py | 16 - src/gradientai/types/shared/disk_info.py | 27 - src/gradientai/types/shared/droplet.py | 143 -- .../shared/droplet_next_backup_window.py | 22 - .../types/shared/firewall_rule_target.py | 41 - src/gradientai/types/shared/forward_links.py | 15 - .../types/shared/garbage_collection.py | 43 - src/gradientai/types/shared/gpu_info.py | 25 - src/gradientai/types/shared/image.py | 131 - src/gradientai/types/shared/kernel.py | 25 - .../types/shared/meta_properties.py | 12 - src/gradientai/types/shared/network_v4.py | 26 - src/gradientai/types/shared/network_v6.py | 25 - src/gradientai/types/shared/page_links.py | 16 - src/gradientai/types/shared/region.py | 36 - .../types/shared/repository_blob.py | 15 - .../types/shared/repository_manifest.py | 38 - src/gradientai/types/shared/repository_tag.py | 34 - src/gradientai/types/shared/size.py | 79 - src/gradientai/types/shared/snapshots.py | 47 - src/gradientai/types/shared/subscription.py | 19 - .../types/shared/subscription_tier_base.py | 44 - src/gradientai/types/shared/vpc_peering.py | 30 - .../types/shared_params/__init__.py | 3 - .../shared_params/firewall_rule_target.py | 42 - src/gradientai/types/size_list_params.py | 15 - src/gradientai/types/size_list_response.py | 19 - src/gradientai/types/snapshot_list_params.py | 18 - .../types/snapshot_list_response.py | 19 - .../types/snapshot_retrieve_response.py | 12 - src/gradientai/types/sticky_sessions.py | 30 - src/gradientai/types/sticky_sessions_param.py | 29 - src/gradientai/types/volume_create_params.py | 153 -- .../types/volume_create_response.py | 65 - .../types/volume_delete_by_name_params.py | 31 - src/gradientai/types/volume_list_params.py | 37 - src/gradientai/types/volume_list_response.py | 73 - .../types/volume_retrieve_response.py | 65 - src/gradientai/types/volumes/__init__.py | 18 - .../volumes/action_initiate_by_id_params.py | 133 - .../volumes/action_initiate_by_id_response.py | 12 - .../volumes/action_initiate_by_name_params.py | 97 - .../action_initiate_by_name_response.py | 12 - .../types/volumes/action_list_params.py | 15 - .../types/volumes/action_list_response.py | 19 - .../types/volumes/action_retrieve_params.py | 17 - .../types/volumes/action_retrieve_response.py | 12 - .../types/volumes/snapshot_create_params.py | 21 - .../types/volumes/snapshot_create_response.py | 12 - .../types/volumes/snapshot_list_params.py | 15 - .../types/volumes/snapshot_list_response.py | 19 - .../volumes/snapshot_retrieve_response.py | 12 - src/gradientai/types/volumes/volume_action.py | 18 - tests/api_resources/account/__init__.py | 1 - tests/api_resources/account/test_keys.py | 399 --- .../agents/chat/test_completions.py | 44 - .../agents/evaluation_metrics/test_models.py | 102 - .../evaluation_metrics/test_workspaces.py | 40 +- .../workspaces/test_agents.py | 42 +- tests/api_resources/agents/test_api_keys.py | 156 +- .../agents/test_evaluation_datasets.py | 20 +- .../agents/test_evaluation_metrics.py | 79 +- .../agents/test_evaluation_runs.py | 76 +- .../agents/test_evaluation_test_cases.py | 116 +- tests/api_resources/agents/test_functions.py | 132 +- .../agents/test_knowledge_bases.py | 64 +- tests/api_resources/agents/test_routes.py | 148 +- tests/api_resources/agents/test_versions.py | 40 +- tests/api_resources/chat/test_completions.py | 44 - tests/api_resources/droplets/__init__.py | 1 - tests/api_resources/droplets/test_actions.py | 1209 --------- .../api_resources/droplets/test_autoscale.py | 953 ------- tests/api_resources/droplets/test_backups.py | 315 --- .../test_destroy_with_associated_resources.py | 429 ---- tests/api_resources/firewalls/__init__.py | 1 - .../api_resources/firewalls/test_droplets.py | 206 -- tests/api_resources/firewalls/test_rules.py | 326 --- tests/api_resources/firewalls/test_tags.py | 206 -- tests/api_resources/floating_ips/__init__.py | 1 - .../floating_ips/test_actions.py | 396 --- tests/api_resources/images/__init__.py | 1 - tests/api_resources/images/test_actions.py | 321 --- .../api_resources/inference/test_api_keys.py | 28 +- .../knowledge_bases/test_data_sources.py | 104 +- .../knowledge_bases/test_indexing_jobs.py | 28 +- .../api_resources/load_balancers/__init__.py | 1 - .../load_balancers/test_droplets.py | 206 -- .../load_balancers/test_forwarding_rules.py | 318 --- .../models/providers/test_anthropic.py | 52 +- .../models/providers/test_openai.py | 52 +- tests/api_resources/test_account.py | 80 - tests/api_resources/test_agents.py | 130 +- tests/api_resources/test_droplets.py | 912 ------- tests/api_resources/test_firewalls.py | 617 ----- tests/api_resources/test_floating_ips.py | 424 ---- tests/api_resources/test_images.py | 417 ---- tests/api_resources/test_knowledge_bases.py | 128 +- tests/api_resources/test_load_balancers.py | 1443 ----------- tests/api_resources/test_models.py | 100 +- tests/api_resources/test_regions.py | 8 +- tests/api_resources/test_sizes.py | 98 - tests/api_resources/test_snapshots.py | 236 -- tests/api_resources/test_volumes.py | 568 ----- tests/api_resources/volumes/__init__.py | 1 - tests/api_resources/volumes/test_actions.py | 825 ------ tests/api_resources/volumes/test_snapshots.py | 412 --- 425 files changed, 1005 insertions(+), 39549 deletions(-) delete mode 100644 src/gradientai/resources/account/__init__.py delete mode 100644 src/gradientai/resources/account/account.py delete mode 100644 src/gradientai/resources/account/keys.py delete mode 100644 src/gradientai/resources/agents/evaluation_metrics/models.py delete mode 100644 src/gradientai/resources/droplets/__init__.py delete mode 100644 src/gradientai/resources/droplets/actions.py delete mode 100644 src/gradientai/resources/droplets/autoscale.py delete mode 100644 src/gradientai/resources/droplets/backups.py delete mode 100644 src/gradientai/resources/droplets/destroy_with_associated_resources.py delete mode 100644 src/gradientai/resources/droplets/droplets.py delete mode 100644 src/gradientai/resources/firewalls/__init__.py delete mode 100644 src/gradientai/resources/firewalls/droplets.py delete mode 100644 src/gradientai/resources/firewalls/firewalls.py delete mode 100644 src/gradientai/resources/firewalls/rules.py delete mode 100644 src/gradientai/resources/firewalls/tags.py delete mode 100644 src/gradientai/resources/floating_ips/__init__.py delete mode 100644 src/gradientai/resources/floating_ips/actions.py delete mode 100644 src/gradientai/resources/floating_ips/floating_ips.py delete mode 100644 src/gradientai/resources/images/__init__.py delete mode 100644 src/gradientai/resources/images/actions.py delete mode 100644 src/gradientai/resources/images/images.py delete mode 100644 src/gradientai/resources/load_balancers/__init__.py delete mode 100644 src/gradientai/resources/load_balancers/droplets.py delete mode 100644 src/gradientai/resources/load_balancers/forwarding_rules.py delete mode 100644 src/gradientai/resources/load_balancers/load_balancers.py delete mode 100644 src/gradientai/resources/sizes.py delete mode 100644 src/gradientai/resources/snapshots.py delete mode 100644 src/gradientai/resources/volumes/__init__.py delete mode 100644 src/gradientai/resources/volumes/actions.py delete mode 100644 src/gradientai/resources/volumes/snapshots.py delete mode 100644 src/gradientai/resources/volumes/volumes.py delete mode 100644 src/gradientai/types/account/__init__.py delete mode 100644 src/gradientai/types/account/key_create_params.py delete mode 100644 src/gradientai/types/account/key_create_response.py delete mode 100644 src/gradientai/types/account/key_list_params.py delete mode 100644 src/gradientai/types/account/key_list_response.py delete mode 100644 src/gradientai/types/account/key_retrieve_response.py delete mode 100644 src/gradientai/types/account/key_update_params.py delete mode 100644 src/gradientai/types/account/key_update_response.py delete mode 100644 src/gradientai/types/account_retrieve_response.py delete mode 100644 src/gradientai/types/agents/evaluation_metric_list_regions_params.py delete mode 100644 src/gradientai/types/agents/evaluation_metric_list_regions_response.py delete mode 100644 src/gradientai/types/agents/evaluation_metrics/model_list_response.py delete mode 100644 src/gradientai/types/agents/evaluation_run_list_results_params.py delete mode 100644 src/gradientai/types/domains.py delete mode 100644 src/gradientai/types/domains_param.py delete mode 100644 src/gradientai/types/droplet_backup_policy.py delete mode 100644 src/gradientai/types/droplet_backup_policy_param.py delete mode 100644 src/gradientai/types/droplet_create_params.py delete mode 100644 src/gradientai/types/droplet_create_response.py delete mode 100644 src/gradientai/types/droplet_delete_by_tag_params.py delete mode 100644 src/gradientai/types/droplet_list_firewalls_params.py delete mode 100644 src/gradientai/types/droplet_list_firewalls_response.py delete mode 100644 src/gradientai/types/droplet_list_kernels_params.py delete mode 100644 src/gradientai/types/droplet_list_kernels_response.py delete mode 100644 src/gradientai/types/droplet_list_neighbors_response.py delete mode 100644 src/gradientai/types/droplet_list_params.py delete mode 100644 src/gradientai/types/droplet_list_response.py delete mode 100644 src/gradientai/types/droplet_list_snapshots_params.py delete mode 100644 src/gradientai/types/droplet_list_snapshots_response.py delete mode 100644 src/gradientai/types/droplet_retrieve_response.py delete mode 100644 src/gradientai/types/droplets/__init__.py delete mode 100644 src/gradientai/types/droplets/action_bulk_initiate_params.py delete mode 100644 src/gradientai/types/droplets/action_bulk_initiate_response.py delete mode 100644 src/gradientai/types/droplets/action_initiate_params.py delete mode 100644 src/gradientai/types/droplets/action_initiate_response.py delete mode 100644 src/gradientai/types/droplets/action_list_params.py delete mode 100644 src/gradientai/types/droplets/action_list_response.py delete mode 100644 src/gradientai/types/droplets/action_retrieve_response.py delete mode 100644 src/gradientai/types/droplets/associated_resource.py delete mode 100644 src/gradientai/types/droplets/autoscale_create_params.py delete mode 100644 src/gradientai/types/droplets/autoscale_create_response.py delete mode 100644 src/gradientai/types/droplets/autoscale_list_history_params.py delete mode 100644 src/gradientai/types/droplets/autoscale_list_history_response.py delete mode 100644 src/gradientai/types/droplets/autoscale_list_members_params.py delete mode 100644 src/gradientai/types/droplets/autoscale_list_members_response.py delete mode 100644 src/gradientai/types/droplets/autoscale_list_params.py delete mode 100644 src/gradientai/types/droplets/autoscale_list_response.py delete mode 100644 src/gradientai/types/droplets/autoscale_pool.py delete mode 100644 src/gradientai/types/droplets/autoscale_pool_droplet_template.py delete mode 100644 src/gradientai/types/droplets/autoscale_pool_droplet_template_param.py delete mode 100644 src/gradientai/types/droplets/autoscale_pool_dynamic_config.py delete mode 100644 src/gradientai/types/droplets/autoscale_pool_dynamic_config_param.py delete mode 100644 src/gradientai/types/droplets/autoscale_pool_static_config.py delete mode 100644 src/gradientai/types/droplets/autoscale_pool_static_config_param.py delete mode 100644 src/gradientai/types/droplets/autoscale_retrieve_response.py delete mode 100644 src/gradientai/types/droplets/autoscale_update_params.py delete mode 100644 src/gradientai/types/droplets/autoscale_update_response.py delete mode 100644 src/gradientai/types/droplets/backup_list_params.py delete mode 100644 src/gradientai/types/droplets/backup_list_policies_params.py delete mode 100644 src/gradientai/types/droplets/backup_list_policies_response.py delete mode 100644 src/gradientai/types/droplets/backup_list_response.py delete mode 100644 src/gradientai/types/droplets/backup_list_supported_policies_response.py delete mode 100644 src/gradientai/types/droplets/backup_retrieve_policy_response.py delete mode 100644 src/gradientai/types/droplets/current_utilization.py delete mode 100644 src/gradientai/types/droplets/destroy_with_associated_resource_check_status_response.py delete mode 100644 src/gradientai/types/droplets/destroy_with_associated_resource_delete_selective_params.py delete mode 100644 src/gradientai/types/droplets/destroy_with_associated_resource_list_response.py delete mode 100644 src/gradientai/types/droplets/destroyed_associated_resource.py delete mode 100644 src/gradientai/types/firewall.py delete mode 100644 src/gradientai/types/firewall_create_params.py delete mode 100644 src/gradientai/types/firewall_create_response.py delete mode 100644 src/gradientai/types/firewall_list_params.py delete mode 100644 src/gradientai/types/firewall_list_response.py delete mode 100644 src/gradientai/types/firewall_param.py delete mode 100644 src/gradientai/types/firewall_retrieve_response.py delete mode 100644 src/gradientai/types/firewall_update_params.py delete mode 100644 src/gradientai/types/firewall_update_response.py delete mode 100644 src/gradientai/types/firewalls/__init__.py delete mode 100644 src/gradientai/types/firewalls/droplet_add_params.py delete mode 100644 src/gradientai/types/firewalls/droplet_remove_params.py delete mode 100644 src/gradientai/types/firewalls/rule_add_params.py delete mode 100644 src/gradientai/types/firewalls/rule_remove_params.py delete mode 100644 src/gradientai/types/firewalls/tag_add_params.py delete mode 100644 src/gradientai/types/firewalls/tag_remove_params.py delete mode 100644 src/gradientai/types/floating_ip.py delete mode 100644 src/gradientai/types/floating_ip_create_params.py delete mode 100644 src/gradientai/types/floating_ip_create_response.py delete mode 100644 src/gradientai/types/floating_ip_list_params.py delete mode 100644 src/gradientai/types/floating_ip_list_response.py delete mode 100644 src/gradientai/types/floating_ip_retrieve_response.py delete mode 100644 src/gradientai/types/floating_ips/__init__.py delete mode 100644 src/gradientai/types/floating_ips/action_create_params.py delete mode 100644 src/gradientai/types/floating_ips/action_create_response.py delete mode 100644 src/gradientai/types/floating_ips/action_list_response.py delete mode 100644 src/gradientai/types/floating_ips/action_retrieve_response.py delete mode 100644 src/gradientai/types/forwarding_rule.py delete mode 100644 src/gradientai/types/forwarding_rule_param.py delete mode 100644 src/gradientai/types/glb_settings.py delete mode 100644 src/gradientai/types/glb_settings_param.py delete mode 100644 src/gradientai/types/health_check.py delete mode 100644 src/gradientai/types/health_check_param.py delete mode 100644 src/gradientai/types/image_create_params.py delete mode 100644 src/gradientai/types/image_create_response.py delete mode 100644 src/gradientai/types/image_list_params.py delete mode 100644 src/gradientai/types/image_list_response.py delete mode 100644 src/gradientai/types/image_retrieve_response.py delete mode 100644 src/gradientai/types/image_update_params.py delete mode 100644 src/gradientai/types/image_update_response.py delete mode 100644 src/gradientai/types/images/__init__.py delete mode 100644 src/gradientai/types/images/action_create_params.py delete mode 100644 src/gradientai/types/images/action_list_response.py delete mode 100644 src/gradientai/types/lb_firewall.py delete mode 100644 src/gradientai/types/lb_firewall_param.py delete mode 100644 src/gradientai/types/load_balancer.py delete mode 100644 src/gradientai/types/load_balancer_create_params.py delete mode 100644 src/gradientai/types/load_balancer_create_response.py delete mode 100644 src/gradientai/types/load_balancer_list_params.py delete mode 100644 src/gradientai/types/load_balancer_list_response.py delete mode 100644 src/gradientai/types/load_balancer_retrieve_response.py delete mode 100644 src/gradientai/types/load_balancer_update_params.py delete mode 100644 src/gradientai/types/load_balancer_update_response.py delete mode 100644 src/gradientai/types/load_balancers/__init__.py delete mode 100644 src/gradientai/types/load_balancers/droplet_add_params.py delete mode 100644 src/gradientai/types/load_balancers/droplet_remove_params.py delete mode 100644 src/gradientai/types/load_balancers/forwarding_rule_add_params.py delete mode 100644 src/gradientai/types/load_balancers/forwarding_rule_remove_params.py rename src/gradientai/types/{agents/evaluation_metrics => }/model_list_params.py (87%) delete mode 100644 src/gradientai/types/model_retrieve_response.py delete mode 100644 src/gradientai/types/shared/action.py delete mode 100644 src/gradientai/types/shared/action_link.py delete mode 100644 src/gradientai/types/shared/backward_links.py delete mode 100644 src/gradientai/types/shared/completion_usage.py delete mode 100644 src/gradientai/types/shared/disk_info.py delete mode 100644 src/gradientai/types/shared/droplet.py delete mode 100644 src/gradientai/types/shared/droplet_next_backup_window.py delete mode 100644 src/gradientai/types/shared/firewall_rule_target.py delete mode 100644 src/gradientai/types/shared/forward_links.py delete mode 100644 src/gradientai/types/shared/garbage_collection.py delete mode 100644 src/gradientai/types/shared/gpu_info.py delete mode 100644 src/gradientai/types/shared/image.py delete mode 100644 src/gradientai/types/shared/kernel.py delete mode 100644 src/gradientai/types/shared/meta_properties.py delete mode 100644 src/gradientai/types/shared/network_v4.py delete mode 100644 src/gradientai/types/shared/network_v6.py delete mode 100644 src/gradientai/types/shared/page_links.py delete mode 100644 src/gradientai/types/shared/region.py delete mode 100644 src/gradientai/types/shared/repository_blob.py delete mode 100644 src/gradientai/types/shared/repository_manifest.py delete mode 100644 src/gradientai/types/shared/repository_tag.py delete mode 100644 src/gradientai/types/shared/size.py delete mode 100644 src/gradientai/types/shared/snapshots.py delete mode 100644 src/gradientai/types/shared/subscription.py delete mode 100644 src/gradientai/types/shared/subscription_tier_base.py delete mode 100644 src/gradientai/types/shared/vpc_peering.py delete mode 100644 src/gradientai/types/shared_params/__init__.py delete mode 100644 src/gradientai/types/shared_params/firewall_rule_target.py delete mode 100644 src/gradientai/types/size_list_params.py delete mode 100644 src/gradientai/types/size_list_response.py delete mode 100644 src/gradientai/types/snapshot_list_params.py delete mode 100644 src/gradientai/types/snapshot_list_response.py delete mode 100644 src/gradientai/types/snapshot_retrieve_response.py delete mode 100644 src/gradientai/types/sticky_sessions.py delete mode 100644 src/gradientai/types/sticky_sessions_param.py delete mode 100644 src/gradientai/types/volume_create_params.py delete mode 100644 src/gradientai/types/volume_create_response.py delete mode 100644 src/gradientai/types/volume_delete_by_name_params.py delete mode 100644 src/gradientai/types/volume_list_params.py delete mode 100644 src/gradientai/types/volume_list_response.py delete mode 100644 src/gradientai/types/volume_retrieve_response.py delete mode 100644 src/gradientai/types/volumes/__init__.py delete mode 100644 src/gradientai/types/volumes/action_initiate_by_id_params.py delete mode 100644 src/gradientai/types/volumes/action_initiate_by_id_response.py delete mode 100644 src/gradientai/types/volumes/action_initiate_by_name_params.py delete mode 100644 src/gradientai/types/volumes/action_initiate_by_name_response.py delete mode 100644 src/gradientai/types/volumes/action_list_params.py delete mode 100644 src/gradientai/types/volumes/action_list_response.py delete mode 100644 src/gradientai/types/volumes/action_retrieve_params.py delete mode 100644 src/gradientai/types/volumes/action_retrieve_response.py delete mode 100644 src/gradientai/types/volumes/snapshot_create_params.py delete mode 100644 src/gradientai/types/volumes/snapshot_create_response.py delete mode 100644 src/gradientai/types/volumes/snapshot_list_params.py delete mode 100644 src/gradientai/types/volumes/snapshot_list_response.py delete mode 100644 src/gradientai/types/volumes/snapshot_retrieve_response.py delete mode 100644 src/gradientai/types/volumes/volume_action.py delete mode 100644 tests/api_resources/account/__init__.py delete mode 100644 tests/api_resources/account/test_keys.py delete mode 100644 tests/api_resources/agents/evaluation_metrics/test_models.py delete mode 100644 tests/api_resources/droplets/__init__.py delete mode 100644 tests/api_resources/droplets/test_actions.py delete mode 100644 tests/api_resources/droplets/test_autoscale.py delete mode 100644 tests/api_resources/droplets/test_backups.py delete mode 100644 tests/api_resources/droplets/test_destroy_with_associated_resources.py delete mode 100644 tests/api_resources/firewalls/__init__.py delete mode 100644 tests/api_resources/firewalls/test_droplets.py delete mode 100644 tests/api_resources/firewalls/test_rules.py delete mode 100644 tests/api_resources/firewalls/test_tags.py delete mode 100644 tests/api_resources/floating_ips/__init__.py delete mode 100644 tests/api_resources/floating_ips/test_actions.py delete mode 100644 tests/api_resources/images/__init__.py delete mode 100644 tests/api_resources/images/test_actions.py delete mode 100644 tests/api_resources/load_balancers/__init__.py delete mode 100644 tests/api_resources/load_balancers/test_droplets.py delete mode 100644 tests/api_resources/load_balancers/test_forwarding_rules.py delete mode 100644 tests/api_resources/test_account.py delete mode 100644 tests/api_resources/test_droplets.py delete mode 100644 tests/api_resources/test_firewalls.py delete mode 100644 tests/api_resources/test_floating_ips.py delete mode 100644 tests/api_resources/test_images.py delete mode 100644 tests/api_resources/test_load_balancers.py delete mode 100644 tests/api_resources/test_sizes.py delete mode 100644 tests/api_resources/test_snapshots.py delete mode 100644 tests/api_resources/test_volumes.py delete mode 100644 tests/api_resources/volumes/__init__.py delete mode 100644 tests/api_resources/volumes/test_actions.py delete mode 100644 tests/api_resources/volumes/test_snapshots.py diff --git a/.stats.yml b/.stats.yml index 4aec10aa..89f80bc1 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 169 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-f8e8c290636c1e218efcf7bfe92ba7570c11690754d21287d838919fbc943a80.yml -openapi_spec_hash: 1eddf488ecbe415efb45445697716f5d -config_hash: 39b04f6247d3dc8917c3adab078ec8c4 +configured_endpoints: 77 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-391afaae764eb758523b67805cb47ae3bc319dc119d83414afdd66f123ceaf5c.yml +openapi_spec_hash: c773d792724f5647ae25a5ae4ccec208 +config_hash: 0bd094d86a010f7cbd5eb22ef548a29f diff --git a/api.md b/api.md index 231cef87..c6acd4ec 100644 --- a/api.md +++ b/api.md @@ -1,38 +1,7 @@ # Shared Types ```python -from gradientai.types import ( - Action, - ActionLink, - APILinks, - APIMeta, - BackwardLinks, - ChatCompletionChunk, - ChatCompletionTokenLogprob, - CompletionUsage, - DiskInfo, - Droplet, - DropletNextBackupWindow, - FirewallRuleTarget, - ForwardLinks, - GarbageCollection, - GPUInfo, - Image, - Kernel, - MetaProperties, - NetworkV4, - NetworkV6, - PageLinks, - Region, - RepositoryBlob, - RepositoryManifest, - RepositoryTag, - Size, - Snapshots, - Subscription, - SubscriptionTierBase, - VpcPeering, -) +from gradientai.types import APILinks, APIMeta, ChatCompletionChunk, ChatCompletionTokenLogprob ``` # Agents @@ -108,16 +77,12 @@ Methods: Types: ```python -from gradientai.types.agents import ( - EvaluationMetricListResponse, - EvaluationMetricListRegionsResponse, -) +from gradientai.types.agents import EvaluationMetricListResponse ``` Methods: - client.agents.evaluation_metrics.list() -> EvaluationMetricListResponse -- client.agents.evaluation_metrics.list_regions(\*\*params) -> EvaluationMetricListRegionsResponse ### Workspaces @@ -159,18 +124,6 @@ Methods: - client.agents.evaluation_metrics.workspaces.agents.list(workspace_uuid, \*\*params) -> AgentListResponse - client.agents.evaluation_metrics.workspaces.agents.move(path_workspace_uuid, \*\*params) -> AgentMoveResponse -### Models - -Types: - -```python -from gradientai.types.agents.evaluation_metrics import ModelListResponse -``` - -Methods: - -- client.agents.evaluation_metrics.models.list(\*\*params) -> ModelListResponse - ## EvaluationRuns Types: @@ -192,7 +145,7 @@ Methods: - client.agents.evaluation_runs.create(\*\*params) -> EvaluationRunCreateResponse - client.agents.evaluation_runs.retrieve(evaluation_run_uuid) -> EvaluationRunRetrieveResponse -- client.agents.evaluation_runs.list_results(evaluation_run_uuid, \*\*params) -> EvaluationRunListResultsResponse +- client.agents.evaluation_runs.list_results(evaluation_run_uuid) -> EvaluationRunListResultsResponse - client.agents.evaluation_runs.retrieve_results(prompt_id, \*, evaluation_run_uuid) -> EvaluationRunRetrieveResultsResponse ## EvaluationTestCases @@ -215,7 +168,7 @@ Methods: - client.agents.evaluation_test_cases.create(\*\*params) -> EvaluationTestCaseCreateResponse - client.agents.evaluation_test_cases.retrieve(test_case_uuid, \*\*params) -> EvaluationTestCaseRetrieveResponse -- client.agents.evaluation_test_cases.update(path_test_case_uuid, \*\*params) -> EvaluationTestCaseUpdateResponse +- client.agents.evaluation_test_cases.update(path_test_case_uuid, \*\*params) -> EvaluationTestCaseUpdateResponse - client.agents.evaluation_test_cases.list() -> EvaluationTestCaseListResponse - client.agents.evaluation_test_cases.list_evaluation_runs(evaluation_test_case_uuid, \*\*params) -> EvaluationTestCaseListEvaluationRunsResponse @@ -324,7 +277,7 @@ from gradientai.types import RegionListResponse Methods: -- client.regions.list(\*\*params) -> RegionListResponse +- client.regions.list(\*\*params) -> RegionListResponse # KnowledgeBases @@ -426,19 +379,12 @@ Methods: Types: ```python -from gradientai.types import ( - APIAgreement, - APIModel, - APIModelVersion, - ModelRetrieveResponse, - ModelListResponse, -) +from gradientai.types import APIAgreement, APIModel, APIModelVersion, ModelListResponse ``` Methods: -- client.models.retrieve(model) -> ModelRetrieveResponse -- client.models.list() -> ModelListResponse +- client.models.list(\*\*params) -> ModelListResponse ## Providers @@ -489,398 +435,3 @@ Methods: - client.models.providers.openai.list(\*\*params) -> OpenAIListResponse - client.models.providers.openai.delete(api_key_uuid) -> OpenAIDeleteResponse - client.models.providers.openai.retrieve_agents(uuid, \*\*params) -> OpenAIRetrieveAgentsResponse - -# Droplets - -Types: - -```python -from gradientai.types import ( - DropletBackupPolicy, - DropletCreateResponse, - DropletRetrieveResponse, - DropletListResponse, - DropletListFirewallsResponse, - DropletListKernelsResponse, - DropletListNeighborsResponse, - DropletListSnapshotsResponse, -) -``` - -Methods: - -- client.droplets.create(\*\*params) -> DropletCreateResponse -- client.droplets.retrieve(droplet_id) -> DropletRetrieveResponse -- client.droplets.list(\*\*params) -> DropletListResponse -- client.droplets.delete(droplet_id) -> None -- client.droplets.delete_by_tag(\*\*params) -> None -- client.droplets.list_firewalls(droplet_id, \*\*params) -> DropletListFirewallsResponse -- client.droplets.list_kernels(droplet_id, \*\*params) -> DropletListKernelsResponse -- client.droplets.list_neighbors(droplet_id) -> DropletListNeighborsResponse -- client.droplets.list_snapshots(droplet_id, \*\*params) -> DropletListSnapshotsResponse - -## Backups - -Types: - -```python -from gradientai.types.droplets import ( - BackupListResponse, - BackupListPoliciesResponse, - BackupListSupportedPoliciesResponse, - BackupRetrievePolicyResponse, -) -``` - -Methods: - -- client.droplets.backups.list(droplet_id, \*\*params) -> BackupListResponse -- client.droplets.backups.list_policies(\*\*params) -> BackupListPoliciesResponse -- client.droplets.backups.list_supported_policies() -> BackupListSupportedPoliciesResponse -- client.droplets.backups.retrieve_policy(droplet_id) -> BackupRetrievePolicyResponse - -## Actions - -Types: - -```python -from gradientai.types.droplets import ( - ActionRetrieveResponse, - ActionListResponse, - ActionBulkInitiateResponse, - ActionInitiateResponse, -) -``` - -Methods: - -- client.droplets.actions.retrieve(action_id, \*, droplet_id) -> ActionRetrieveResponse -- client.droplets.actions.list(droplet_id, \*\*params) -> ActionListResponse -- client.droplets.actions.bulk_initiate(\*\*params) -> ActionBulkInitiateResponse -- client.droplets.actions.initiate(droplet_id, \*\*params) -> ActionInitiateResponse - -## DestroyWithAssociatedResources - -Types: - -```python -from gradientai.types.droplets import ( - AssociatedResource, - DestroyedAssociatedResource, - DestroyWithAssociatedResourceListResponse, - DestroyWithAssociatedResourceCheckStatusResponse, -) -``` - -Methods: - -- client.droplets.destroy_with_associated_resources.list(droplet_id) -> DestroyWithAssociatedResourceListResponse -- client.droplets.destroy_with_associated_resources.check_status(droplet_id) -> DestroyWithAssociatedResourceCheckStatusResponse -- client.droplets.destroy_with_associated_resources.delete_dangerous(droplet_id) -> None -- client.droplets.destroy_with_associated_resources.delete_selective(droplet_id, \*\*params) -> None -- client.droplets.destroy_with_associated_resources.retry(droplet_id) -> None - -## Autoscale - -Types: - -```python -from gradientai.types.droplets import ( - AutoscalePool, - AutoscalePoolDropletTemplate, - AutoscalePoolDynamicConfig, - AutoscalePoolStaticConfig, - CurrentUtilization, - AutoscaleCreateResponse, - AutoscaleRetrieveResponse, - AutoscaleUpdateResponse, - AutoscaleListResponse, - AutoscaleListHistoryResponse, - AutoscaleListMembersResponse, -) -``` - -Methods: - -- client.droplets.autoscale.create(\*\*params) -> AutoscaleCreateResponse -- client.droplets.autoscale.retrieve(autoscale_pool_id) -> AutoscaleRetrieveResponse -- client.droplets.autoscale.update(autoscale_pool_id, \*\*params) -> AutoscaleUpdateResponse -- client.droplets.autoscale.list(\*\*params) -> AutoscaleListResponse -- client.droplets.autoscale.delete(autoscale_pool_id) -> None -- client.droplets.autoscale.delete_dangerous(autoscale_pool_id) -> None -- client.droplets.autoscale.list_history(autoscale_pool_id, \*\*params) -> AutoscaleListHistoryResponse -- client.droplets.autoscale.list_members(autoscale_pool_id, \*\*params) -> AutoscaleListMembersResponse - -# Firewalls - -Types: - -```python -from gradientai.types import ( - Firewall, - FirewallCreateResponse, - FirewallRetrieveResponse, - FirewallUpdateResponse, - FirewallListResponse, -) -``` - -Methods: - -- client.firewalls.create(\*\*params) -> FirewallCreateResponse -- client.firewalls.retrieve(firewall_id) -> FirewallRetrieveResponse -- client.firewalls.update(firewall_id, \*\*params) -> FirewallUpdateResponse -- client.firewalls.list(\*\*params) -> FirewallListResponse -- client.firewalls.delete(firewall_id) -> None - -## Droplets - -Methods: - -- client.firewalls.droplets.add(firewall_id, \*\*params) -> None -- client.firewalls.droplets.remove(firewall_id, \*\*params) -> None - -## Tags - -Methods: - -- client.firewalls.tags.add(firewall_id, \*\*params) -> None -- client.firewalls.tags.remove(firewall_id, \*\*params) -> None - -## Rules - -Methods: - -- client.firewalls.rules.add(firewall_id, \*\*params) -> None -- client.firewalls.rules.remove(firewall_id, \*\*params) -> None - -# FloatingIPs - -Types: - -```python -from gradientai.types import ( - FloatingIP, - FloatingIPCreateResponse, - FloatingIPRetrieveResponse, - FloatingIPListResponse, -) -``` - -Methods: - -- client.floating_ips.create(\*\*params) -> FloatingIPCreateResponse -- client.floating_ips.retrieve(floating_ip) -> FloatingIPRetrieveResponse -- client.floating_ips.list(\*\*params) -> FloatingIPListResponse -- client.floating_ips.delete(floating_ip) -> None - -## Actions - -Types: - -```python -from gradientai.types.floating_ips import ( - ActionCreateResponse, - ActionRetrieveResponse, - ActionListResponse, -) -``` - -Methods: - -- client.floating_ips.actions.create(floating_ip, \*\*params) -> ActionCreateResponse -- client.floating_ips.actions.retrieve(action_id, \*, floating_ip) -> ActionRetrieveResponse -- client.floating_ips.actions.list(floating_ip) -> ActionListResponse - -# Images - -Types: - -```python -from gradientai.types import ( - ImageCreateResponse, - ImageRetrieveResponse, - ImageUpdateResponse, - ImageListResponse, -) -``` - -Methods: - -- client.images.create(\*\*params) -> ImageCreateResponse -- client.images.retrieve(image_id) -> ImageRetrieveResponse -- client.images.update(image_id, \*\*params) -> ImageUpdateResponse -- client.images.list(\*\*params) -> ImageListResponse -- client.images.delete(image_id) -> None - -## Actions - -Types: - -```python -from gradientai.types.images import ActionListResponse -``` - -Methods: - -- client.images.actions.create(image_id, \*\*params) -> Action -- client.images.actions.retrieve(action_id, \*, image_id) -> Action -- client.images.actions.list(image_id) -> ActionListResponse - -# LoadBalancers - -Types: - -```python -from gradientai.types import ( - Domains, - ForwardingRule, - GlbSettings, - HealthCheck, - LbFirewall, - LoadBalancer, - StickySessions, - LoadBalancerCreateResponse, - LoadBalancerRetrieveResponse, - LoadBalancerUpdateResponse, - LoadBalancerListResponse, -) -``` - -Methods: - -- client.load_balancers.create(\*\*params) -> LoadBalancerCreateResponse -- client.load_balancers.retrieve(lb_id) -> LoadBalancerRetrieveResponse -- client.load_balancers.update(lb_id, \*\*params) -> LoadBalancerUpdateResponse -- client.load_balancers.list(\*\*params) -> LoadBalancerListResponse -- client.load_balancers.delete(lb_id) -> None -- client.load_balancers.delete_cache(lb_id) -> None - -## Droplets - -Methods: - -- client.load_balancers.droplets.add(lb_id, \*\*params) -> None -- client.load_balancers.droplets.remove(lb_id, \*\*params) -> None - -## ForwardingRules - -Methods: - -- client.load_balancers.forwarding_rules.add(lb_id, \*\*params) -> None -- client.load_balancers.forwarding_rules.remove(lb_id, \*\*params) -> None - -# Sizes - -Types: - -```python -from gradientai.types import SizeListResponse -``` - -Methods: - -- client.sizes.list(\*\*params) -> SizeListResponse - -# Snapshots - -Types: - -```python -from gradientai.types import SnapshotRetrieveResponse, SnapshotListResponse -``` - -Methods: - -- client.snapshots.retrieve(snapshot_id) -> SnapshotRetrieveResponse -- client.snapshots.list(\*\*params) -> SnapshotListResponse -- client.snapshots.delete(snapshot_id) -> None - -# Volumes - -Types: - -```python -from gradientai.types import VolumeCreateResponse, VolumeRetrieveResponse, VolumeListResponse -``` - -Methods: - -- client.volumes.create(\*\*params) -> VolumeCreateResponse -- client.volumes.retrieve(volume_id) -> VolumeRetrieveResponse -- client.volumes.list(\*\*params) -> VolumeListResponse -- client.volumes.delete(volume_id) -> None -- client.volumes.delete_by_name(\*\*params) -> None - -## Actions - -Types: - -```python -from gradientai.types.volumes import ( - VolumeAction, - ActionRetrieveResponse, - ActionListResponse, - ActionInitiateByIDResponse, - ActionInitiateByNameResponse, -) -``` - -Methods: - -- client.volumes.actions.retrieve(action_id, \*, volume_id, \*\*params) -> ActionRetrieveResponse -- client.volumes.actions.list(volume_id, \*\*params) -> ActionListResponse -- client.volumes.actions.initiate_by_id(volume_id, \*\*params) -> ActionInitiateByIDResponse -- client.volumes.actions.initiate_by_name(\*\*params) -> ActionInitiateByNameResponse - -## Snapshots - -Types: - -```python -from gradientai.types.volumes import ( - SnapshotCreateResponse, - SnapshotRetrieveResponse, - SnapshotListResponse, -) -``` - -Methods: - -- client.volumes.snapshots.create(volume_id, \*\*params) -> SnapshotCreateResponse -- client.volumes.snapshots.retrieve(snapshot_id) -> SnapshotRetrieveResponse -- client.volumes.snapshots.list(volume_id, \*\*params) -> SnapshotListResponse -- client.volumes.snapshots.delete(snapshot_id) -> None - -# Account - -Types: - -```python -from gradientai.types import AccountRetrieveResponse -``` - -Methods: - -- client.account.retrieve() -> AccountRetrieveResponse - -## Keys - -Types: - -```python -from gradientai.types.account import ( - KeyCreateResponse, - KeyRetrieveResponse, - KeyUpdateResponse, - KeyListResponse, -) -``` - -Methods: - -- client.account.keys.create(\*\*params) -> KeyCreateResponse -- client.account.keys.retrieve(ssh_key_identifier) -> KeyRetrieveResponse -- client.account.keys.update(ssh_key_identifier, \*\*params) -> KeyUpdateResponse -- client.account.keys.list(\*\*params) -> KeyListResponse -- client.account.keys.delete(ssh_key_identifier) -> None diff --git a/src/gradientai/resources/__init__.py b/src/gradientai/resources/__init__.py index 29fcc7e9..e1ed4a00 100644 --- a/src/gradientai/resources/__init__.py +++ b/src/gradientai/resources/__init__.py @@ -8,14 +8,6 @@ ChatResourceWithStreamingResponse, AsyncChatResourceWithStreamingResponse, ) -from .sizes import ( - SizesResource, - AsyncSizesResource, - SizesResourceWithRawResponse, - AsyncSizesResourceWithRawResponse, - SizesResourceWithStreamingResponse, - AsyncSizesResourceWithStreamingResponse, -) from .agents import ( AgentsResource, AsyncAgentsResource, @@ -24,14 +16,6 @@ AgentsResourceWithStreamingResponse, AsyncAgentsResourceWithStreamingResponse, ) -from .images import ( - ImagesResource, - AsyncImagesResource, - ImagesResourceWithRawResponse, - AsyncImagesResourceWithRawResponse, - ImagesResourceWithStreamingResponse, - AsyncImagesResourceWithStreamingResponse, -) from .models import ( ModelsResource, AsyncModelsResource, @@ -40,14 +24,6 @@ ModelsResourceWithStreamingResponse, AsyncModelsResourceWithStreamingResponse, ) -from .account import ( - AccountResource, - AsyncAccountResource, - AccountResourceWithRawResponse, - AsyncAccountResourceWithRawResponse, - AccountResourceWithStreamingResponse, - AsyncAccountResourceWithStreamingResponse, -) from .regions import ( RegionsResource, AsyncRegionsResource, @@ -56,30 +32,6 @@ RegionsResourceWithStreamingResponse, AsyncRegionsResourceWithStreamingResponse, ) -from .volumes import ( - VolumesResource, - AsyncVolumesResource, - VolumesResourceWithRawResponse, - AsyncVolumesResourceWithRawResponse, - VolumesResourceWithStreamingResponse, - AsyncVolumesResourceWithStreamingResponse, -) -from .droplets import ( - DropletsResource, - AsyncDropletsResource, - DropletsResourceWithRawResponse, - AsyncDropletsResourceWithRawResponse, - DropletsResourceWithStreamingResponse, - AsyncDropletsResourceWithStreamingResponse, -) -from .firewalls import ( - FirewallsResource, - AsyncFirewallsResource, - FirewallsResourceWithRawResponse, - AsyncFirewallsResourceWithRawResponse, - FirewallsResourceWithStreamingResponse, - AsyncFirewallsResourceWithStreamingResponse, -) from .inference import ( InferenceResource, AsyncInferenceResource, @@ -88,30 +40,6 @@ InferenceResourceWithStreamingResponse, AsyncInferenceResourceWithStreamingResponse, ) -from .snapshots import ( - SnapshotsResource, - AsyncSnapshotsResource, - SnapshotsResourceWithRawResponse, - AsyncSnapshotsResourceWithRawResponse, - SnapshotsResourceWithStreamingResponse, - AsyncSnapshotsResourceWithStreamingResponse, -) -from .floating_ips import ( - FloatingIPsResource, - AsyncFloatingIPsResource, - FloatingIPsResourceWithRawResponse, - AsyncFloatingIPsResourceWithRawResponse, - FloatingIPsResourceWithStreamingResponse, - AsyncFloatingIPsResourceWithStreamingResponse, -) -from .load_balancers import ( - LoadBalancersResource, - AsyncLoadBalancersResource, - LoadBalancersResourceWithRawResponse, - AsyncLoadBalancersResourceWithRawResponse, - LoadBalancersResourceWithStreamingResponse, - AsyncLoadBalancersResourceWithStreamingResponse, -) from .knowledge_bases import ( KnowledgeBasesResource, AsyncKnowledgeBasesResource, @@ -158,58 +86,4 @@ "AsyncModelsResourceWithRawResponse", "ModelsResourceWithStreamingResponse", "AsyncModelsResourceWithStreamingResponse", - "DropletsResource", - "AsyncDropletsResource", - "DropletsResourceWithRawResponse", - "AsyncDropletsResourceWithRawResponse", - "DropletsResourceWithStreamingResponse", - "AsyncDropletsResourceWithStreamingResponse", - "FirewallsResource", - "AsyncFirewallsResource", - "FirewallsResourceWithRawResponse", - "AsyncFirewallsResourceWithRawResponse", - "FirewallsResourceWithStreamingResponse", - "AsyncFirewallsResourceWithStreamingResponse", - "FloatingIPsResource", - "AsyncFloatingIPsResource", - "FloatingIPsResourceWithRawResponse", - "AsyncFloatingIPsResourceWithRawResponse", - "FloatingIPsResourceWithStreamingResponse", - "AsyncFloatingIPsResourceWithStreamingResponse", - "ImagesResource", - "AsyncImagesResource", - "ImagesResourceWithRawResponse", - "AsyncImagesResourceWithRawResponse", - "ImagesResourceWithStreamingResponse", - "AsyncImagesResourceWithStreamingResponse", - "LoadBalancersResource", - "AsyncLoadBalancersResource", - "LoadBalancersResourceWithRawResponse", - "AsyncLoadBalancersResourceWithRawResponse", - "LoadBalancersResourceWithStreamingResponse", - "AsyncLoadBalancersResourceWithStreamingResponse", - "SizesResource", - "AsyncSizesResource", - "SizesResourceWithRawResponse", - "AsyncSizesResourceWithRawResponse", - "SizesResourceWithStreamingResponse", - "AsyncSizesResourceWithStreamingResponse", - "SnapshotsResource", - "AsyncSnapshotsResource", - "SnapshotsResourceWithRawResponse", - "AsyncSnapshotsResourceWithRawResponse", - "SnapshotsResourceWithStreamingResponse", - "AsyncSnapshotsResourceWithStreamingResponse", - "VolumesResource", - "AsyncVolumesResource", - "VolumesResourceWithRawResponse", - "AsyncVolumesResourceWithRawResponse", - "VolumesResourceWithStreamingResponse", - "AsyncVolumesResourceWithStreamingResponse", - "AccountResource", - "AsyncAccountResource", - "AccountResourceWithRawResponse", - "AsyncAccountResourceWithRawResponse", - "AccountResourceWithStreamingResponse", - "AsyncAccountResourceWithStreamingResponse", ] diff --git a/src/gradientai/resources/account/__init__.py b/src/gradientai/resources/account/__init__.py deleted file mode 100644 index 33286c3f..00000000 --- a/src/gradientai/resources/account/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .keys import ( - KeysResource, - AsyncKeysResource, - KeysResourceWithRawResponse, - AsyncKeysResourceWithRawResponse, - KeysResourceWithStreamingResponse, - AsyncKeysResourceWithStreamingResponse, -) -from .account import ( - AccountResource, - AsyncAccountResource, - AccountResourceWithRawResponse, - AsyncAccountResourceWithRawResponse, - AccountResourceWithStreamingResponse, - AsyncAccountResourceWithStreamingResponse, -) - -__all__ = [ - "KeysResource", - "AsyncKeysResource", - "KeysResourceWithRawResponse", - "AsyncKeysResourceWithRawResponse", - "KeysResourceWithStreamingResponse", - "AsyncKeysResourceWithStreamingResponse", - "AccountResource", - "AsyncAccountResource", - "AccountResourceWithRawResponse", - "AsyncAccountResourceWithRawResponse", - "AccountResourceWithStreamingResponse", - "AsyncAccountResourceWithStreamingResponse", -] diff --git a/src/gradientai/resources/account/account.py b/src/gradientai/resources/account/account.py deleted file mode 100644 index 7af8d0e1..00000000 --- a/src/gradientai/resources/account/account.py +++ /dev/null @@ -1,173 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import httpx - -from .keys import ( - KeysResource, - AsyncKeysResource, - KeysResourceWithRawResponse, - AsyncKeysResourceWithRawResponse, - KeysResourceWithStreamingResponse, - AsyncKeysResourceWithStreamingResponse, -) -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ..._base_client import make_request_options -from ...types.account_retrieve_response import AccountRetrieveResponse - -__all__ = ["AccountResource", "AsyncAccountResource"] - - -class AccountResource(SyncAPIResource): - @cached_property - def keys(self) -> KeysResource: - return KeysResource(self._client) - - @cached_property - def with_raw_response(self) -> AccountResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return AccountResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AccountResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return AccountResourceWithStreamingResponse(self) - - def retrieve( - self, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AccountRetrieveResponse: - """ - To show information about the current user account, send a GET request to - `/v2/account`. - """ - return self._get( - "/v2/account" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/account", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=AccountRetrieveResponse, - ) - - -class AsyncAccountResource(AsyncAPIResource): - @cached_property - def keys(self) -> AsyncKeysResource: - return AsyncKeysResource(self._client) - - @cached_property - def with_raw_response(self) -> AsyncAccountResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return AsyncAccountResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncAccountResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return AsyncAccountResourceWithStreamingResponse(self) - - async def retrieve( - self, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AccountRetrieveResponse: - """ - To show information about the current user account, send a GET request to - `/v2/account`. - """ - return await self._get( - "/v2/account" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/account", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=AccountRetrieveResponse, - ) - - -class AccountResourceWithRawResponse: - def __init__(self, account: AccountResource) -> None: - self._account = account - - self.retrieve = to_raw_response_wrapper( - account.retrieve, - ) - - @cached_property - def keys(self) -> KeysResourceWithRawResponse: - return KeysResourceWithRawResponse(self._account.keys) - - -class AsyncAccountResourceWithRawResponse: - def __init__(self, account: AsyncAccountResource) -> None: - self._account = account - - self.retrieve = async_to_raw_response_wrapper( - account.retrieve, - ) - - @cached_property - def keys(self) -> AsyncKeysResourceWithRawResponse: - return AsyncKeysResourceWithRawResponse(self._account.keys) - - -class AccountResourceWithStreamingResponse: - def __init__(self, account: AccountResource) -> None: - self._account = account - - self.retrieve = to_streamed_response_wrapper( - account.retrieve, - ) - - @cached_property - def keys(self) -> KeysResourceWithStreamingResponse: - return KeysResourceWithStreamingResponse(self._account.keys) - - -class AsyncAccountResourceWithStreamingResponse: - def __init__(self, account: AsyncAccountResource) -> None: - self._account = account - - self.retrieve = async_to_streamed_response_wrapper( - account.retrieve, - ) - - @cached_property - def keys(self) -> AsyncKeysResourceWithStreamingResponse: - return AsyncKeysResourceWithStreamingResponse(self._account.keys) diff --git a/src/gradientai/resources/account/keys.py b/src/gradientai/resources/account/keys.py deleted file mode 100644 index 2cfd5c6a..00000000 --- a/src/gradientai/resources/account/keys.py +++ /dev/null @@ -1,588 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Union - -import httpx - -from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven -from ..._utils import maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ..._base_client import make_request_options -from ...types.account import key_list_params, key_create_params, key_update_params -from ...types.account.key_list_response import KeyListResponse -from ...types.account.key_create_response import KeyCreateResponse -from ...types.account.key_update_response import KeyUpdateResponse -from ...types.account.key_retrieve_response import KeyRetrieveResponse - -__all__ = ["KeysResource", "AsyncKeysResource"] - - -class KeysResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> KeysResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return KeysResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> KeysResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return KeysResourceWithStreamingResponse(self) - - def create( - self, - *, - name: str, - public_key: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyCreateResponse: - """ - To add a new SSH public key to your DigitalOcean account, send a POST request to - `/v2/account/keys`. Set the `name` attribute to the name you wish to use and the - `public_key` attribute to the full public key you are adding. - - Args: - name: A human-readable display name for this key, used to easily identify the SSH keys - when they are displayed. - - public_key: The entire public key string that was uploaded. Embedded into the root user's - `authorized_keys` file if you include this key during Droplet creation. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._post( - "/v2/account/keys" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/account/keys", - body=maybe_transform( - { - "name": name, - "public_key": public_key, - }, - key_create_params.KeyCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyCreateResponse, - ) - - def retrieve( - self, - ssh_key_identifier: Union[int, str], - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyRetrieveResponse: - """ - To get information about a key, send a GET request to `/v2/account/keys/$KEY_ID` - or `/v2/account/keys/$KEY_FINGERPRINT`. The response will be a JSON object with - the key `ssh_key` and value an ssh_key object which contains the standard - ssh_key attributes. - - Args: - ssh_key_identifier: A unique identification number for this key. Can be used to embed a specific SSH - key into a Droplet. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - f"/v2/account/keys/{ssh_key_identifier}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/account/keys/{ssh_key_identifier}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyRetrieveResponse, - ) - - def update( - self, - ssh_key_identifier: Union[int, str], - *, - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyUpdateResponse: - """ - To update the name of an SSH key, send a PUT request to either - `/v2/account/keys/$SSH_KEY_ID` or `/v2/account/keys/$SSH_KEY_FINGERPRINT`. Set - the `name` attribute to the new name you want to use. - - Args: - ssh_key_identifier: A unique identification number for this key. Can be used to embed a specific SSH - key into a Droplet. - - name: A human-readable display name for this key, used to easily identify the SSH keys - when they are displayed. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._put( - f"/v2/account/keys/{ssh_key_identifier}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/account/keys/{ssh_key_identifier}", - body=maybe_transform({"name": name}, key_update_params.KeyUpdateParams), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyUpdateResponse, - ) - - def list( - self, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyListResponse: - """ - To list all of the keys in your account, send a GET request to - `/v2/account/keys`. The response will be a JSON object with a key set to - `ssh_keys`. The value of this will be an array of ssh_key objects, each of which - contains the standard ssh_key attributes. - - Args: - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/v2/account/keys" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/account/keys", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - }, - key_list_params.KeyListParams, - ), - ), - cast_to=KeyListResponse, - ) - - def delete( - self, - ssh_key_identifier: Union[int, str], - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - To destroy a public SSH key that you have in your account, send a DELETE request - to `/v2/account/keys/$KEY_ID` or `/v2/account/keys/$KEY_FINGERPRINT`. A 204 - status will be returned, indicating that the action was successful and that the - response body is empty. - - Args: - ssh_key_identifier: A unique identification number for this key. Can be used to embed a specific SSH - key into a Droplet. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return self._delete( - f"/v2/account/keys/{ssh_key_identifier}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/account/keys/{ssh_key_identifier}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - -class AsyncKeysResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncKeysResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return AsyncKeysResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncKeysResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return AsyncKeysResourceWithStreamingResponse(self) - - async def create( - self, - *, - name: str, - public_key: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyCreateResponse: - """ - To add a new SSH public key to your DigitalOcean account, send a POST request to - `/v2/account/keys`. Set the `name` attribute to the name you wish to use and the - `public_key` attribute to the full public key you are adding. - - Args: - name: A human-readable display name for this key, used to easily identify the SSH keys - when they are displayed. - - public_key: The entire public key string that was uploaded. Embedded into the root user's - `authorized_keys` file if you include this key during Droplet creation. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._post( - "/v2/account/keys" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/account/keys", - body=await async_maybe_transform( - { - "name": name, - "public_key": public_key, - }, - key_create_params.KeyCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyCreateResponse, - ) - - async def retrieve( - self, - ssh_key_identifier: Union[int, str], - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyRetrieveResponse: - """ - To get information about a key, send a GET request to `/v2/account/keys/$KEY_ID` - or `/v2/account/keys/$KEY_FINGERPRINT`. The response will be a JSON object with - the key `ssh_key` and value an ssh_key object which contains the standard - ssh_key attributes. - - Args: - ssh_key_identifier: A unique identification number for this key. Can be used to embed a specific SSH - key into a Droplet. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - f"/v2/account/keys/{ssh_key_identifier}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/account/keys/{ssh_key_identifier}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyRetrieveResponse, - ) - - async def update( - self, - ssh_key_identifier: Union[int, str], - *, - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyUpdateResponse: - """ - To update the name of an SSH key, send a PUT request to either - `/v2/account/keys/$SSH_KEY_ID` or `/v2/account/keys/$SSH_KEY_FINGERPRINT`. Set - the `name` attribute to the new name you want to use. - - Args: - ssh_key_identifier: A unique identification number for this key. Can be used to embed a specific SSH - key into a Droplet. - - name: A human-readable display name for this key, used to easily identify the SSH keys - when they are displayed. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._put( - f"/v2/account/keys/{ssh_key_identifier}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/account/keys/{ssh_key_identifier}", - body=await async_maybe_transform({"name": name}, key_update_params.KeyUpdateParams), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=KeyUpdateResponse, - ) - - async def list( - self, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> KeyListResponse: - """ - To list all of the keys in your account, send a GET request to - `/v2/account/keys`. The response will be a JSON object with a key set to - `ssh_keys`. The value of this will be an array of ssh_key objects, each of which - contains the standard ssh_key attributes. - - Args: - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/v2/account/keys" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/account/keys", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - }, - key_list_params.KeyListParams, - ), - ), - cast_to=KeyListResponse, - ) - - async def delete( - self, - ssh_key_identifier: Union[int, str], - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - To destroy a public SSH key that you have in your account, send a DELETE request - to `/v2/account/keys/$KEY_ID` or `/v2/account/keys/$KEY_FINGERPRINT`. A 204 - status will be returned, indicating that the action was successful and that the - response body is empty. - - Args: - ssh_key_identifier: A unique identification number for this key. Can be used to embed a specific SSH - key into a Droplet. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return await self._delete( - f"/v2/account/keys/{ssh_key_identifier}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/account/keys/{ssh_key_identifier}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - -class KeysResourceWithRawResponse: - def __init__(self, keys: KeysResource) -> None: - self._keys = keys - - self.create = to_raw_response_wrapper( - keys.create, - ) - self.retrieve = to_raw_response_wrapper( - keys.retrieve, - ) - self.update = to_raw_response_wrapper( - keys.update, - ) - self.list = to_raw_response_wrapper( - keys.list, - ) - self.delete = to_raw_response_wrapper( - keys.delete, - ) - - -class AsyncKeysResourceWithRawResponse: - def __init__(self, keys: AsyncKeysResource) -> None: - self._keys = keys - - self.create = async_to_raw_response_wrapper( - keys.create, - ) - self.retrieve = async_to_raw_response_wrapper( - keys.retrieve, - ) - self.update = async_to_raw_response_wrapper( - keys.update, - ) - self.list = async_to_raw_response_wrapper( - keys.list, - ) - self.delete = async_to_raw_response_wrapper( - keys.delete, - ) - - -class KeysResourceWithStreamingResponse: - def __init__(self, keys: KeysResource) -> None: - self._keys = keys - - self.create = to_streamed_response_wrapper( - keys.create, - ) - self.retrieve = to_streamed_response_wrapper( - keys.retrieve, - ) - self.update = to_streamed_response_wrapper( - keys.update, - ) - self.list = to_streamed_response_wrapper( - keys.list, - ) - self.delete = to_streamed_response_wrapper( - keys.delete, - ) - - -class AsyncKeysResourceWithStreamingResponse: - def __init__(self, keys: AsyncKeysResource) -> None: - self._keys = keys - - self.create = async_to_streamed_response_wrapper( - keys.create, - ) - self.retrieve = async_to_streamed_response_wrapper( - keys.retrieve, - ) - self.update = async_to_streamed_response_wrapper( - keys.update, - ) - self.list = async_to_streamed_response_wrapper( - keys.list, - ) - self.delete = async_to_streamed_response_wrapper( - keys.delete, - ) diff --git a/src/gradientai/resources/agents/agents.py b/src/gradientai/resources/agents/agents.py index 92d696ba..200e9fc0 100644 --- a/src/gradientai/resources/agents/agents.py +++ b/src/gradientai/resources/agents/agents.py @@ -203,29 +203,13 @@ def create( body contains a JSON object with the newly created agent object. Args: - anthropic_key_uuid: Optional Anthropic API key ID to use with Anthropic models - - description: A text description of the agent, not used in inference - instruction: Agent instruction. Instructions help your agent to perform its job effectively. See [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) for best practices. - knowledge_base_uuid: Ids of the knowledge base(s) to attach to the agent - model_uuid: Identifier for the foundation model. - name: Agent name - - openai_key_uuid: Optional OpenAI API key ID to use with OpenAI models - - project_id: The id of the DigitalOcean project this agent will belong to - - region: The DigitalOcean region to deploy your agent in - - tags: Agent tag to organize related resources - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -301,7 +285,6 @@ def update( path_uuid: str, *, anthropic_key_uuid: str | NotGiven = NOT_GIVEN, - conversation_logs_enabled: bool | NotGiven = NOT_GIVEN, description: str | NotGiven = NOT_GIVEN, instruction: str | NotGiven = NOT_GIVEN, k: int | NotGiven = NOT_GIVEN, @@ -329,39 +312,17 @@ def update( response body is a JSON object containing the agent. Args: - anthropic_key_uuid: Optional anthropic key uuid for use with anthropic models - - conversation_logs_enabled: Optional update of conversation logs enabled - - description: Agent description - instruction: Agent instruction. Instructions help your agent to perform its job effectively. See [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) for best practices. - k: How many results should be considered from an attached knowledge base - max_tokens: Specifies the maximum number of tokens the model can process in a single input or output, set as a number between 1 and 512. This determines the length of each response. model_uuid: Identifier for the foundation model. - name: Agent name - - openai_key_uuid: Optional OpenAI key uuid for use with OpenAI models - - project_id: The id of the DigitalOcean project this agent will belong to - - retrieval_method: - RETRIEVAL_METHOD_UNKNOWN: The retrieval method is unknown - - RETRIEVAL_METHOD_REWRITE: The retrieval method is rewrite - - RETRIEVAL_METHOD_STEP_BACK: The retrieval method is step back - - RETRIEVAL_METHOD_SUB_QUERIES: The retrieval method is sub queries - - RETRIEVAL_METHOD_NONE: The retrieval method is none - - tags: A set of abitrary tags to organize your agent - temperature: Controls the model’s creativity, specified as a number between 0 and 1. Lower values produce more predictable and conservative responses, while higher values encourage creativity and variation. @@ -370,8 +331,6 @@ def update( number between 0 and 1. Higher values allow for more diverse outputs, while lower values ensure focused and coherent responses. - body_uuid: Unique agent id - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -389,7 +348,6 @@ def update( body=maybe_transform( { "anthropic_key_uuid": anthropic_key_uuid, - "conversation_logs_enabled": conversation_logs_enabled, "description": description, "instruction": instruction, "k": k, @@ -430,11 +388,11 @@ def list( To list all agents, send a GET request to `/v2/gen-ai/agents`. Args: - only_deployed: Only list agents that are deployed. + only_deployed: only list agents that are deployed. - page: Page number. + page: page number. - per_page: Items per page. + per_page: items per page. extra_headers: Send extra headers @@ -519,17 +477,6 @@ def update_status( PUT request to `/v2/gen-ai/agents/{uuid}/deployment_visibility`. Args: - body_uuid: Unique id - - visibility: - VISIBILITY_UNKNOWN: The status of the deployment is unknown - - VISIBILITY_DISABLED: The deployment is disabled and will no longer service - requests - - VISIBILITY_PLAYGROUND: Deprecated: No longer a valid state - - VISIBILITY_PUBLIC: The deployment is public and will service requests from the - public internet - - VISIBILITY_PRIVATE: The deployment is private and will only service requests - from other agents, or through API keys - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -644,29 +591,13 @@ async def create( body contains a JSON object with the newly created agent object. Args: - anthropic_key_uuid: Optional Anthropic API key ID to use with Anthropic models - - description: A text description of the agent, not used in inference - instruction: Agent instruction. Instructions help your agent to perform its job effectively. See [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) for best practices. - knowledge_base_uuid: Ids of the knowledge base(s) to attach to the agent - model_uuid: Identifier for the foundation model. - name: Agent name - - openai_key_uuid: Optional OpenAI API key ID to use with OpenAI models - - project_id: The id of the DigitalOcean project this agent will belong to - - region: The DigitalOcean region to deploy your agent in - - tags: Agent tag to organize related resources - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -742,7 +673,6 @@ async def update( path_uuid: str, *, anthropic_key_uuid: str | NotGiven = NOT_GIVEN, - conversation_logs_enabled: bool | NotGiven = NOT_GIVEN, description: str | NotGiven = NOT_GIVEN, instruction: str | NotGiven = NOT_GIVEN, k: int | NotGiven = NOT_GIVEN, @@ -770,39 +700,17 @@ async def update( response body is a JSON object containing the agent. Args: - anthropic_key_uuid: Optional anthropic key uuid for use with anthropic models - - conversation_logs_enabled: Optional update of conversation logs enabled - - description: Agent description - instruction: Agent instruction. Instructions help your agent to perform its job effectively. See [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) for best practices. - k: How many results should be considered from an attached knowledge base - max_tokens: Specifies the maximum number of tokens the model can process in a single input or output, set as a number between 1 and 512. This determines the length of each response. model_uuid: Identifier for the foundation model. - name: Agent name - - openai_key_uuid: Optional OpenAI key uuid for use with OpenAI models - - project_id: The id of the DigitalOcean project this agent will belong to - - retrieval_method: - RETRIEVAL_METHOD_UNKNOWN: The retrieval method is unknown - - RETRIEVAL_METHOD_REWRITE: The retrieval method is rewrite - - RETRIEVAL_METHOD_STEP_BACK: The retrieval method is step back - - RETRIEVAL_METHOD_SUB_QUERIES: The retrieval method is sub queries - - RETRIEVAL_METHOD_NONE: The retrieval method is none - - tags: A set of abitrary tags to organize your agent - temperature: Controls the model’s creativity, specified as a number between 0 and 1. Lower values produce more predictable and conservative responses, while higher values encourage creativity and variation. @@ -811,8 +719,6 @@ async def update( number between 0 and 1. Higher values allow for more diverse outputs, while lower values ensure focused and coherent responses. - body_uuid: Unique agent id - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -830,7 +736,6 @@ async def update( body=await async_maybe_transform( { "anthropic_key_uuid": anthropic_key_uuid, - "conversation_logs_enabled": conversation_logs_enabled, "description": description, "instruction": instruction, "k": k, @@ -871,11 +776,11 @@ async def list( To list all agents, send a GET request to `/v2/gen-ai/agents`. Args: - only_deployed: Only list agents that are deployed. + only_deployed: only list agents that are deployed. - page: Page number. + page: page number. - per_page: Items per page. + per_page: items per page. extra_headers: Send extra headers @@ -960,17 +865,6 @@ async def update_status( PUT request to `/v2/gen-ai/agents/{uuid}/deployment_visibility`. Args: - body_uuid: Unique id - - visibility: - VISIBILITY_UNKNOWN: The status of the deployment is unknown - - VISIBILITY_DISABLED: The deployment is disabled and will no longer service - requests - - VISIBILITY_PLAYGROUND: Deprecated: No longer a valid state - - VISIBILITY_PUBLIC: The deployment is public and will service requests from the - public internet - - VISIBILITY_PRIVATE: The deployment is private and will only service requests - from other agents, or through API keys - extra_headers: Send extra headers extra_query: Add additional query parameters to the request diff --git a/src/gradientai/resources/agents/api_keys.py b/src/gradientai/resources/agents/api_keys.py index 9f4d9660..1cf2278e 100644 --- a/src/gradientai/resources/agents/api_keys.py +++ b/src/gradientai/resources/agents/api_keys.py @@ -63,10 +63,6 @@ def create( `/v2/gen-ai/agents/{agent_uuid}/api_keys`. Args: - body_agent_uuid: Agent id - - name: A human friendly name to identify the key - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -114,12 +110,6 @@ def update( `/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}`. Args: - body_agent_uuid: Agent id - - body_api_key_uuid: API key ID - - name: Name - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -168,9 +158,9 @@ def list( `/v2/gen-ai/agents/{agent_uuid}/api_keys`. Args: - page: Page number. + page: page number. - per_page: Items per page. + per_page: items per page. extra_headers: Send extra headers @@ -319,10 +309,6 @@ async def create( `/v2/gen-ai/agents/{agent_uuid}/api_keys`. Args: - body_agent_uuid: Agent id - - name: A human friendly name to identify the key - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -370,12 +356,6 @@ async def update( `/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}`. Args: - body_agent_uuid: Agent id - - body_api_key_uuid: API key ID - - name: Name - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -424,9 +404,9 @@ async def list( `/v2/gen-ai/agents/{agent_uuid}/api_keys`. Args: - page: Page number. + page: page number. - per_page: Items per page. + per_page: items per page. extra_headers: Send extra headers diff --git a/src/gradientai/resources/agents/chat/completions.py b/src/gradientai/resources/agents/chat/completions.py index 96a6d843..604bffb3 100644 --- a/src/gradientai/resources/agents/chat/completions.py +++ b/src/gradientai/resources/agents/chat/completions.py @@ -66,8 +66,6 @@ def create( Optional[completion_create_params.StreamOptions] | NotGiven ) = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, - tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -140,19 +138,6 @@ def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. - tool_choice: Controls which (if any) tool is called by the model. `none` means the model will - not call any tool and instead generates a message. `auto` means the model can - pick between generating a message or calling one or more tools. `required` means - the model must call one or more tools. Specifying a particular tool via - `{"type": "function", "function": {"name": "my_function"}}` forces the model to - call that tool. - - `none` is the default when no tools are present. `auto` is the default if tools - are present. - - tools: A list of tools the model may call. Currently, only functions are supported as a - tool. - top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @@ -196,8 +181,6 @@ def create( Optional[completion_create_params.StreamOptions] | NotGiven ) = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, - tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -270,19 +253,6 @@ def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. - tool_choice: Controls which (if any) tool is called by the model. `none` means the model will - not call any tool and instead generates a message. `auto` means the model can - pick between generating a message or calling one or more tools. `required` means - the model must call one or more tools. Specifying a particular tool via - `{"type": "function", "function": {"name": "my_function"}}` forces the model to - call that tool. - - `none` is the default when no tools are present. `auto` is the default if tools - are present. - - tools: A list of tools the model may call. Currently, only functions are supported as a - tool. - top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @@ -326,8 +296,6 @@ def create( Optional[completion_create_params.StreamOptions] | NotGiven ) = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, - tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -400,19 +368,6 @@ def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. - tool_choice: Controls which (if any) tool is called by the model. `none` means the model will - not call any tool and instead generates a message. `auto` means the model can - pick between generating a message or calling one or more tools. `required` means - the model must call one or more tools. Specifying a particular tool via - `{"type": "function", "function": {"name": "my_function"}}` forces the model to - call that tool. - - `none` is the default when no tools are present. `auto` is the default if tools - are present. - - tools: A list of tools the model may call. Currently, only functions are supported as a - tool. - top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @@ -459,8 +414,6 @@ def create( Optional[completion_create_params.StreamOptions] | NotGiven ) = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, - tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -501,8 +454,6 @@ def create( "stream": stream, "stream_options": stream_options, "temperature": temperature, - "tool_choice": tool_choice, - "tools": tools, "top_logprobs": top_logprobs, "top_p": top_p, "user": user, @@ -565,8 +516,6 @@ async def create( Optional[completion_create_params.StreamOptions] | NotGiven ) = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, - tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -639,19 +588,6 @@ async def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. - tool_choice: Controls which (if any) tool is called by the model. `none` means the model will - not call any tool and instead generates a message. `auto` means the model can - pick between generating a message or calling one or more tools. `required` means - the model must call one or more tools. Specifying a particular tool via - `{"type": "function", "function": {"name": "my_function"}}` forces the model to - call that tool. - - `none` is the default when no tools are present. `auto` is the default if tools - are present. - - tools: A list of tools the model may call. Currently, only functions are supported as a - tool. - top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @@ -695,8 +631,6 @@ async def create( Optional[completion_create_params.StreamOptions] | NotGiven ) = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, - tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -769,19 +703,6 @@ async def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. - tool_choice: Controls which (if any) tool is called by the model. `none` means the model will - not call any tool and instead generates a message. `auto` means the model can - pick between generating a message or calling one or more tools. `required` means - the model must call one or more tools. Specifying a particular tool via - `{"type": "function", "function": {"name": "my_function"}}` forces the model to - call that tool. - - `none` is the default when no tools are present. `auto` is the default if tools - are present. - - tools: A list of tools the model may call. Currently, only functions are supported as a - tool. - top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @@ -825,8 +746,6 @@ async def create( Optional[completion_create_params.StreamOptions] | NotGiven ) = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, - tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -899,19 +818,6 @@ async def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. - tool_choice: Controls which (if any) tool is called by the model. `none` means the model will - not call any tool and instead generates a message. `auto` means the model can - pick between generating a message or calling one or more tools. `required` means - the model must call one or more tools. Specifying a particular tool via - `{"type": "function", "function": {"name": "my_function"}}` forces the model to - call that tool. - - `none` is the default when no tools are present. `auto` is the default if tools - are present. - - tools: A list of tools the model may call. Currently, only functions are supported as a - tool. - top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @@ -955,8 +861,6 @@ async def create( Optional[completion_create_params.StreamOptions] | NotGiven ) = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, - tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -997,8 +901,6 @@ async def create( "stream": stream, "stream_options": stream_options, "temperature": temperature, - "tool_choice": tool_choice, - "tools": tools, "top_logprobs": top_logprobs, "top_p": top_p, "user": user, diff --git a/src/gradientai/resources/agents/evaluation_metrics/__init__.py b/src/gradientai/resources/agents/evaluation_metrics/__init__.py index ce687621..1c0ec1ea 100644 --- a/src/gradientai/resources/agents/evaluation_metrics/__init__.py +++ b/src/gradientai/resources/agents/evaluation_metrics/__init__.py @@ -1,13 +1,5 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from .models import ( - ModelsResource, - AsyncModelsResource, - ModelsResourceWithRawResponse, - AsyncModelsResourceWithRawResponse, - ModelsResourceWithStreamingResponse, - AsyncModelsResourceWithStreamingResponse, -) from .workspaces import ( WorkspacesResource, AsyncWorkspacesResource, @@ -32,12 +24,6 @@ "AsyncWorkspacesResourceWithRawResponse", "WorkspacesResourceWithStreamingResponse", "AsyncWorkspacesResourceWithStreamingResponse", - "ModelsResource", - "AsyncModelsResource", - "ModelsResourceWithRawResponse", - "AsyncModelsResourceWithRawResponse", - "ModelsResourceWithStreamingResponse", - "AsyncModelsResourceWithStreamingResponse", "EvaluationMetricsResource", "AsyncEvaluationMetricsResource", "EvaluationMetricsResourceWithRawResponse", diff --git a/src/gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py b/src/gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py index edf708df..ce549527 100644 --- a/src/gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py +++ b/src/gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py @@ -4,16 +4,7 @@ import httpx -from .models import ( - ModelsResource, - AsyncModelsResource, - ModelsResourceWithRawResponse, - AsyncModelsResourceWithRawResponse, - ModelsResourceWithStreamingResponse, - AsyncModelsResourceWithStreamingResponse, -) from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ...._utils import maybe_transform, async_maybe_transform from ...._compat import cached_property from ...._resource import SyncAPIResource, AsyncAPIResource from ...._response import ( @@ -23,7 +14,6 @@ async_to_streamed_response_wrapper, ) from ...._base_client import make_request_options -from ....types.agents import evaluation_metric_list_regions_params from .workspaces.workspaces import ( WorkspacesResource, AsyncWorkspacesResource, @@ -33,7 +23,6 @@ AsyncWorkspacesResourceWithStreamingResponse, ) from ....types.agents.evaluation_metric_list_response import EvaluationMetricListResponse -from ....types.agents.evaluation_metric_list_regions_response import EvaluationMetricListRegionsResponse __all__ = ["EvaluationMetricsResource", "AsyncEvaluationMetricsResource"] @@ -43,10 +32,6 @@ class EvaluationMetricsResource(SyncAPIResource): def workspaces(self) -> WorkspacesResource: return WorkspacesResource(self._client) - @cached_property - def models(self) -> ModelsResource: - return ModelsResource(self._client) - @cached_property def with_raw_response(self) -> EvaluationMetricsResourceWithRawResponse: """ @@ -90,64 +75,12 @@ def list( cast_to=EvaluationMetricListResponse, ) - def list_regions( - self, - *, - serves_batch: bool | NotGiven = NOT_GIVEN, - serves_inference: bool | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> EvaluationMetricListRegionsResponse: - """ - To list all datacenter regions, send a GET request to `/v2/gen-ai/regions`. - - Args: - serves_batch: Include datacenters that are capable of running batch jobs. - - serves_inference: Include datacenters that serve inference. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/v2/gen-ai/regions" - if self._client._base_url_overridden - else "https://api.digitalocean.com/v2/gen-ai/regions", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "serves_batch": serves_batch, - "serves_inference": serves_inference, - }, - evaluation_metric_list_regions_params.EvaluationMetricListRegionsParams, - ), - ), - cast_to=EvaluationMetricListRegionsResponse, - ) - class AsyncEvaluationMetricsResource(AsyncAPIResource): @cached_property def workspaces(self) -> AsyncWorkspacesResource: return AsyncWorkspacesResource(self._client) - @cached_property - def models(self) -> AsyncModelsResource: - return AsyncModelsResource(self._client) - @cached_property def with_raw_response(self) -> AsyncEvaluationMetricsResourceWithRawResponse: """ @@ -191,54 +124,6 @@ async def list( cast_to=EvaluationMetricListResponse, ) - async def list_regions( - self, - *, - serves_batch: bool | NotGiven = NOT_GIVEN, - serves_inference: bool | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> EvaluationMetricListRegionsResponse: - """ - To list all datacenter regions, send a GET request to `/v2/gen-ai/regions`. - - Args: - serves_batch: Include datacenters that are capable of running batch jobs. - - serves_inference: Include datacenters that serve inference. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/v2/gen-ai/regions" - if self._client._base_url_overridden - else "https://api.digitalocean.com/v2/gen-ai/regions", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "serves_batch": serves_batch, - "serves_inference": serves_inference, - }, - evaluation_metric_list_regions_params.EvaluationMetricListRegionsParams, - ), - ), - cast_to=EvaluationMetricListRegionsResponse, - ) - class EvaluationMetricsResourceWithRawResponse: def __init__(self, evaluation_metrics: EvaluationMetricsResource) -> None: @@ -247,18 +132,11 @@ def __init__(self, evaluation_metrics: EvaluationMetricsResource) -> None: self.list = to_raw_response_wrapper( evaluation_metrics.list, ) - self.list_regions = to_raw_response_wrapper( - evaluation_metrics.list_regions, - ) @cached_property def workspaces(self) -> WorkspacesResourceWithRawResponse: return WorkspacesResourceWithRawResponse(self._evaluation_metrics.workspaces) - @cached_property - def models(self) -> ModelsResourceWithRawResponse: - return ModelsResourceWithRawResponse(self._evaluation_metrics.models) - class AsyncEvaluationMetricsResourceWithRawResponse: def __init__(self, evaluation_metrics: AsyncEvaluationMetricsResource) -> None: @@ -267,18 +145,11 @@ def __init__(self, evaluation_metrics: AsyncEvaluationMetricsResource) -> None: self.list = async_to_raw_response_wrapper( evaluation_metrics.list, ) - self.list_regions = async_to_raw_response_wrapper( - evaluation_metrics.list_regions, - ) @cached_property def workspaces(self) -> AsyncWorkspacesResourceWithRawResponse: return AsyncWorkspacesResourceWithRawResponse(self._evaluation_metrics.workspaces) - @cached_property - def models(self) -> AsyncModelsResourceWithRawResponse: - return AsyncModelsResourceWithRawResponse(self._evaluation_metrics.models) - class EvaluationMetricsResourceWithStreamingResponse: def __init__(self, evaluation_metrics: EvaluationMetricsResource) -> None: @@ -287,18 +158,11 @@ def __init__(self, evaluation_metrics: EvaluationMetricsResource) -> None: self.list = to_streamed_response_wrapper( evaluation_metrics.list, ) - self.list_regions = to_streamed_response_wrapper( - evaluation_metrics.list_regions, - ) @cached_property def workspaces(self) -> WorkspacesResourceWithStreamingResponse: return WorkspacesResourceWithStreamingResponse(self._evaluation_metrics.workspaces) - @cached_property - def models(self) -> ModelsResourceWithStreamingResponse: - return ModelsResourceWithStreamingResponse(self._evaluation_metrics.models) - class AsyncEvaluationMetricsResourceWithStreamingResponse: def __init__(self, evaluation_metrics: AsyncEvaluationMetricsResource) -> None: @@ -307,14 +171,7 @@ def __init__(self, evaluation_metrics: AsyncEvaluationMetricsResource) -> None: self.list = async_to_streamed_response_wrapper( evaluation_metrics.list, ) - self.list_regions = async_to_streamed_response_wrapper( - evaluation_metrics.list_regions, - ) @cached_property def workspaces(self) -> AsyncWorkspacesResourceWithStreamingResponse: return AsyncWorkspacesResourceWithStreamingResponse(self._evaluation_metrics.workspaces) - - @cached_property - def models(self) -> AsyncModelsResourceWithStreamingResponse: - return AsyncModelsResourceWithStreamingResponse(self._evaluation_metrics.models) diff --git a/src/gradientai/resources/agents/evaluation_metrics/models.py b/src/gradientai/resources/agents/evaluation_metrics/models.py deleted file mode 100644 index 20a44a22..00000000 --- a/src/gradientai/resources/agents/evaluation_metrics/models.py +++ /dev/null @@ -1,254 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import Literal - -import httpx - -from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ...._utils import maybe_transform, async_maybe_transform -from ...._compat import cached_property -from ...._resource import SyncAPIResource, AsyncAPIResource -from ...._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ...._base_client import make_request_options -from ....types.agents.evaluation_metrics import model_list_params -from ....types.agents.evaluation_metrics.model_list_response import ModelListResponse - -__all__ = ["ModelsResource", "AsyncModelsResource"] - - -class ModelsResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> ModelsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return ModelsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> ModelsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return ModelsResourceWithStreamingResponse(self) - - def list( - self, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - public_only: bool | NotGiven = NOT_GIVEN, - usecases: List[ - Literal[ - "MODEL_USECASE_UNKNOWN", - "MODEL_USECASE_AGENT", - "MODEL_USECASE_FINETUNED", - "MODEL_USECASE_KNOWLEDGEBASE", - "MODEL_USECASE_GUARDRAIL", - "MODEL_USECASE_REASONING", - "MODEL_USECASE_SERVERLESS", - ] - ] - | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ModelListResponse: - """ - To list all models, send a GET request to `/v2/gen-ai/models`. - - Args: - page: Page number. - - per_page: Items per page. - - public_only: Only include models that are publicly available. - - usecases: Include only models defined for the listed usecases. - - - MODEL_USECASE_UNKNOWN: The use case of the model is unknown - - MODEL_USECASE_AGENT: The model maybe used in an agent - - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning - - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases - (embedding models) - - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails - - MODEL_USECASE_REASONING: The model usecase for reasoning - - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/v2/gen-ai/models" - if self._client._base_url_overridden - else "https://api.digitalocean.com/v2/gen-ai/models", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - "public_only": public_only, - "usecases": usecases, - }, - model_list_params.ModelListParams, - ), - ), - cast_to=ModelListResponse, - ) - - -class AsyncModelsResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncModelsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return AsyncModelsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncModelsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return AsyncModelsResourceWithStreamingResponse(self) - - async def list( - self, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - public_only: bool | NotGiven = NOT_GIVEN, - usecases: List[ - Literal[ - "MODEL_USECASE_UNKNOWN", - "MODEL_USECASE_AGENT", - "MODEL_USECASE_FINETUNED", - "MODEL_USECASE_KNOWLEDGEBASE", - "MODEL_USECASE_GUARDRAIL", - "MODEL_USECASE_REASONING", - "MODEL_USECASE_SERVERLESS", - ] - ] - | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ModelListResponse: - """ - To list all models, send a GET request to `/v2/gen-ai/models`. - - Args: - page: Page number. - - per_page: Items per page. - - public_only: Only include models that are publicly available. - - usecases: Include only models defined for the listed usecases. - - - MODEL_USECASE_UNKNOWN: The use case of the model is unknown - - MODEL_USECASE_AGENT: The model maybe used in an agent - - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning - - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases - (embedding models) - - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails - - MODEL_USECASE_REASONING: The model usecase for reasoning - - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/v2/gen-ai/models" - if self._client._base_url_overridden - else "https://api.digitalocean.com/v2/gen-ai/models", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - "public_only": public_only, - "usecases": usecases, - }, - model_list_params.ModelListParams, - ), - ), - cast_to=ModelListResponse, - ) - - -class ModelsResourceWithRawResponse: - def __init__(self, models: ModelsResource) -> None: - self._models = models - - self.list = to_raw_response_wrapper( - models.list, - ) - - -class AsyncModelsResourceWithRawResponse: - def __init__(self, models: AsyncModelsResource) -> None: - self._models = models - - self.list = async_to_raw_response_wrapper( - models.list, - ) - - -class ModelsResourceWithStreamingResponse: - def __init__(self, models: ModelsResource) -> None: - self._models = models - - self.list = to_streamed_response_wrapper( - models.list, - ) - - -class AsyncModelsResourceWithStreamingResponse: - def __init__(self, models: AsyncModelsResource) -> None: - self._models = models - - self.list = async_to_streamed_response_wrapper( - models.list, - ) diff --git a/src/gradientai/resources/agents/evaluation_metrics/workspaces/agents.py b/src/gradientai/resources/agents/evaluation_metrics/workspaces/agents.py index a5e68a45..1e11739f 100644 --- a/src/gradientai/resources/agents/evaluation_metrics/workspaces/agents.py +++ b/src/gradientai/resources/agents/evaluation_metrics/workspaces/agents.py @@ -48,6 +48,7 @@ def list( self, workspace_uuid: str, *, + field_mask: agent_list_params.FieldMask | NotGiven = NOT_GIVEN, only_deployed: bool | NotGiven = NOT_GIVEN, page: int | NotGiven = NOT_GIVEN, per_page: int | NotGiven = NOT_GIVEN, @@ -65,9 +66,9 @@ def list( Args: only_deployed: Only list agents that are deployed. - page: Page number. + page: page number. - per_page: Items per page. + per_page: items per page. extra_headers: Send extra headers @@ -90,6 +91,7 @@ def list( timeout=timeout, query=maybe_transform( { + "field_mask": field_mask, "only_deployed": only_deployed, "page": page, "per_page": per_page, @@ -114,14 +116,10 @@ def move( timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> AgentMoveResponse: """ - To move all listed agents a given workspace, send a PUT request to + To move all listed agetns a given workspace, send a PUT request to `/v2/gen-ai/workspaces/{workspace_uuid}/agents`. Args: - agent_uuids: Agent uuids - - body_workspace_uuid: Workspace uuid to move agents to - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -176,6 +174,7 @@ async def list( self, workspace_uuid: str, *, + field_mask: agent_list_params.FieldMask | NotGiven = NOT_GIVEN, only_deployed: bool | NotGiven = NOT_GIVEN, page: int | NotGiven = NOT_GIVEN, per_page: int | NotGiven = NOT_GIVEN, @@ -193,9 +192,9 @@ async def list( Args: only_deployed: Only list agents that are deployed. - page: Page number. + page: page number. - per_page: Items per page. + per_page: items per page. extra_headers: Send extra headers @@ -218,6 +217,7 @@ async def list( timeout=timeout, query=await async_maybe_transform( { + "field_mask": field_mask, "only_deployed": only_deployed, "page": page, "per_page": per_page, @@ -242,14 +242,10 @@ async def move( timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> AgentMoveResponse: """ - To move all listed agents a given workspace, send a PUT request to + To move all listed agetns a given workspace, send a PUT request to `/v2/gen-ai/workspaces/{workspace_uuid}/agents`. Args: - agent_uuids: Agent uuids - - body_workspace_uuid: Workspace uuid to move agents to - extra_headers: Send extra headers extra_query: Add additional query parameters to the request diff --git a/src/gradientai/resources/agents/evaluation_metrics/workspaces/workspaces.py b/src/gradientai/resources/agents/evaluation_metrics/workspaces/workspaces.py index cb213e1d..0f506118 100644 --- a/src/gradientai/resources/agents/evaluation_metrics/workspaces/workspaces.py +++ b/src/gradientai/resources/agents/evaluation_metrics/workspaces/workspaces.py @@ -81,12 +81,6 @@ def create( response body contains a JSON object with the newly created workspace object. Args: - agent_uuids: Ids of the agents(s) to attach to the workspace - - description: Description of the workspace - - name: Name of the workspace - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -170,10 +164,6 @@ def update( containing the workspace. Args: - description: The new description of the workspace - - name: The new name of the workspace - body_workspace_uuid: Workspace UUID. extra_headers: Send extra headers @@ -343,12 +333,6 @@ async def create( response body contains a JSON object with the newly created workspace object. Args: - agent_uuids: Ids of the agents(s) to attach to the workspace - - description: Description of the workspace - - name: Name of the workspace - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -432,10 +416,6 @@ async def update( containing the workspace. Args: - description: The new description of the workspace - - name: The new name of the workspace - body_workspace_uuid: Workspace UUID. extra_headers: Send extra headers diff --git a/src/gradientai/resources/agents/evaluation_runs.py b/src/gradientai/resources/agents/evaluation_runs.py index c5ea2520..47045132 100644 --- a/src/gradientai/resources/agents/evaluation_runs.py +++ b/src/gradientai/resources/agents/evaluation_runs.py @@ -17,7 +17,7 @@ async_to_streamed_response_wrapper, ) from ..._base_client import make_request_options -from ...types.agents import evaluation_run_create_params, evaluation_run_list_results_params +from ...types.agents import evaluation_run_create_params from ...types.agents.evaluation_run_create_response import EvaluationRunCreateResponse from ...types.agents.evaluation_run_retrieve_response import EvaluationRunRetrieveResponse from ...types.agents.evaluation_run_list_results_response import EvaluationRunListResultsResponse @@ -68,8 +68,6 @@ def create( run_name: The name of the run. - test_case_uuid: Test-case UUID to run - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -138,8 +136,6 @@ def list_results( self, evaluation_run_uuid: str, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -152,10 +148,6 @@ def list_results( `/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results`. Args: - page: Page number. - - per_page: Items per page. - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -173,17 +165,7 @@ def list_results( if self._client._base_url_overridden else f"https://api.digitalocean.com/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results", options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - }, - evaluation_run_list_results_params.EvaluationRunListResultsParams, - ), + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=EvaluationRunListResultsResponse, ) @@ -270,8 +252,6 @@ async def create( run_name: The name of the run. - test_case_uuid: Test-case UUID to run - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -340,8 +320,6 @@ async def list_results( self, evaluation_run_uuid: str, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -354,10 +332,6 @@ async def list_results( `/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results`. Args: - page: Page number. - - per_page: Items per page. - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -375,17 +349,7 @@ async def list_results( if self._client._base_url_overridden else f"https://api.digitalocean.com/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results", options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - }, - evaluation_run_list_results_params.EvaluationRunListResultsParams, - ), + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=EvaluationRunListResultsResponse, ) diff --git a/src/gradientai/resources/agents/evaluation_test_cases.py b/src/gradientai/resources/agents/evaluation_test_cases.py index e33f9f91..beff8752 100644 --- a/src/gradientai/resources/agents/evaluation_test_cases.py +++ b/src/gradientai/resources/agents/evaluation_test_cases.py @@ -179,7 +179,7 @@ def update( timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> EvaluationTestCaseUpdateResponse: """ - To update an evaluation test-case send a PUT request to + To update an evaluation test-case send a POST request to `/v2/gen-ai/evaluation_test_cases/{test_case_uuid}`. Args: @@ -189,8 +189,6 @@ def update( name: Name of the test case. - body_test_case_uuid: Test-case UUID to update - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -203,7 +201,7 @@ def update( raise ValueError( f"Expected a non-empty value for `path_test_case_uuid` but received {path_test_case_uuid!r}" ) - return self._put( + return self._post( f"/v2/gen-ai/evaluation_test_cases/{path_test_case_uuid}" if self._client._base_url_overridden else f"https://api.digitalocean.com/v2/gen-ai/evaluation_test_cases/{path_test_case_uuid}", @@ -441,7 +439,7 @@ async def update( timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> EvaluationTestCaseUpdateResponse: """ - To update an evaluation test-case send a PUT request to + To update an evaluation test-case send a POST request to `/v2/gen-ai/evaluation_test_cases/{test_case_uuid}`. Args: @@ -451,8 +449,6 @@ async def update( name: Name of the test case. - body_test_case_uuid: Test-case UUID to update - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -465,7 +461,7 @@ async def update( raise ValueError( f"Expected a non-empty value for `path_test_case_uuid` but received {path_test_case_uuid!r}" ) - return await self._put( + return await self._post( f"/v2/gen-ai/evaluation_test_cases/{path_test_case_uuid}" if self._client._base_url_overridden else f"https://api.digitalocean.com/v2/gen-ai/evaluation_test_cases/{path_test_case_uuid}", diff --git a/src/gradientai/resources/agents/functions.py b/src/gradientai/resources/agents/functions.py index 1c5b2015..8c5f3f49 100644 --- a/src/gradientai/resources/agents/functions.py +++ b/src/gradientai/resources/agents/functions.py @@ -66,20 +66,6 @@ def create( `/v2/gen-ai/agents/{agent_uuid}/functions`. Args: - body_agent_uuid: Agent id - - description: Function description - - faas_name: The name of the function in the DigitalOcean functions platform - - faas_namespace: The namespace of the function in the DigitalOcean functions platform - - function_name: Function name - - input_schema: Describe the input schema for the function so the agent may call it - - output_schema: Describe the output schema for the function so the agent handle its response - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -137,22 +123,6 @@ def update( `/v2/gen-ai/agents/{agent_uuid}/functions/{function_uuid}`. Args: - body_agent_uuid: Agent id - - description: Funciton description - - faas_name: The name of the function in the DigitalOcean functions platform - - faas_namespace: The namespace of the function in the DigitalOcean functions platform - - function_name: Function name - - body_function_uuid: Function id - - input_schema: Describe the input schema for the function so the agent may call it - - output_schema: Describe the output schema for the function so the agent handle its response - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -271,20 +241,6 @@ async def create( `/v2/gen-ai/agents/{agent_uuid}/functions`. Args: - body_agent_uuid: Agent id - - description: Function description - - faas_name: The name of the function in the DigitalOcean functions platform - - faas_namespace: The namespace of the function in the DigitalOcean functions platform - - function_name: Function name - - input_schema: Describe the input schema for the function so the agent may call it - - output_schema: Describe the output schema for the function so the agent handle its response - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -342,22 +298,6 @@ async def update( `/v2/gen-ai/agents/{agent_uuid}/functions/{function_uuid}`. Args: - body_agent_uuid: Agent id - - description: Funciton description - - faas_name: The name of the function in the DigitalOcean functions platform - - faas_namespace: The namespace of the function in the DigitalOcean functions platform - - function_name: Function name - - body_function_uuid: Function id - - input_schema: Describe the input schema for the function so the agent may call it - - output_schema: Describe the output schema for the function so the agent handle its response - extra_headers: Send extra headers extra_query: Add additional query parameters to the request diff --git a/src/gradientai/resources/agents/routes.py b/src/gradientai/resources/agents/routes.py index a7a298f2..ed25d795 100644 --- a/src/gradientai/resources/agents/routes.py +++ b/src/gradientai/resources/agents/routes.py @@ -66,16 +66,8 @@ def update( `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`. Args: - body_child_agent_uuid: Routed agent id - - if_case: Describes the case in which the child agent should be used - body_parent_agent_uuid: A unique identifier for the parent agent. - route_name: Route name - - uuid: Unique id of linkage - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -172,12 +164,8 @@ def add( `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`. Args: - body_child_agent_uuid: Routed agent id - body_parent_agent_uuid: A unique identifier for the parent agent. - route_name: Name of route - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -292,16 +280,8 @@ async def update( `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`. Args: - body_child_agent_uuid: Routed agent id - - if_case: Describes the case in which the child agent should be used - body_parent_agent_uuid: A unique identifier for the parent agent. - route_name: Route name - - uuid: Unique id of linkage - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -398,12 +378,8 @@ async def add( `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`. Args: - body_child_agent_uuid: Routed agent id - body_parent_agent_uuid: A unique identifier for the parent agent. - route_name: Name of route - extra_headers: Send extra headers extra_query: Add additional query parameters to the request diff --git a/src/gradientai/resources/agents/versions.py b/src/gradientai/resources/agents/versions.py index 77eabea9..65a35472 100644 --- a/src/gradientai/resources/agents/versions.py +++ b/src/gradientai/resources/agents/versions.py @@ -60,10 +60,6 @@ def update( `/v2/gen-ai/agents/{uuid}/versions`. Args: - body_uuid: Agent unique identifier - - version_hash: Unique identifier - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -109,9 +105,9 @@ def list( `/v2/gen-ai/agents/{uuid}/versions`. Args: - page: Page number. + page: page number. - per_page: Items per page. + per_page: items per page. extra_headers: Send extra headers @@ -182,10 +178,6 @@ async def update( `/v2/gen-ai/agents/{uuid}/versions`. Args: - body_uuid: Agent unique identifier - - version_hash: Unique identifier - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -231,9 +223,9 @@ async def list( `/v2/gen-ai/agents/{uuid}/versions`. Args: - page: Page number. + page: page number. - per_page: Items per page. + per_page: items per page. extra_headers: Send extra headers diff --git a/src/gradientai/resources/chat/completions.py b/src/gradientai/resources/chat/completions.py index ff5c25b8..ec351ea1 100644 --- a/src/gradientai/resources/chat/completions.py +++ b/src/gradientai/resources/chat/completions.py @@ -64,8 +64,6 @@ def create( stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, - tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -138,19 +136,6 @@ def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. - tool_choice: Controls which (if any) tool is called by the model. `none` means the model will - not call any tool and instead generates a message. `auto` means the model can - pick between generating a message or calling one or more tools. `required` means - the model must call one or more tools. Specifying a particular tool via - `{"type": "function", "function": {"name": "my_function"}}` forces the model to - call that tool. - - `none` is the default when no tools are present. `auto` is the default if tools - are present. - - tools: A list of tools the model may call. Currently, only functions are supported as a - tool. - top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @@ -193,8 +178,6 @@ def create( stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, - tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -267,19 +250,6 @@ def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. - tool_choice: Controls which (if any) tool is called by the model. `none` means the model will - not call any tool and instead generates a message. `auto` means the model can - pick between generating a message or calling one or more tools. `required` means - the model must call one or more tools. Specifying a particular tool via - `{"type": "function", "function": {"name": "my_function"}}` forces the model to - call that tool. - - `none` is the default when no tools are present. `auto` is the default if tools - are present. - - tools: A list of tools the model may call. Currently, only functions are supported as a - tool. - top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @@ -321,8 +291,6 @@ def create( stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, - tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -395,19 +363,6 @@ def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. - tool_choice: Controls which (if any) tool is called by the model. `none` means the model will - not call any tool and instead generates a message. `auto` means the model can - pick between generating a message or calling one or more tools. `required` means - the model must call one or more tools. Specifying a particular tool via - `{"type": "function", "function": {"name": "my_function"}}` forces the model to - call that tool. - - `none` is the default when no tools are present. `auto` is the default if tools - are present. - - tools: A list of tools the model may call. Currently, only functions are supported as a - tool. - top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @@ -449,8 +404,6 @@ def create( stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, - tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -489,8 +442,6 @@ def create( "stream": stream, "stream_options": stream_options, "temperature": temperature, - "tool_choice": tool_choice, - "tools": tools, "top_logprobs": top_logprobs, "top_p": top_p, "user": user, @@ -546,8 +497,6 @@ async def create( stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, - tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -620,19 +569,6 @@ async def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. - tool_choice: Controls which (if any) tool is called by the model. `none` means the model will - not call any tool and instead generates a message. `auto` means the model can - pick between generating a message or calling one or more tools. `required` means - the model must call one or more tools. Specifying a particular tool via - `{"type": "function", "function": {"name": "my_function"}}` forces the model to - call that tool. - - `none` is the default when no tools are present. `auto` is the default if tools - are present. - - tools: A list of tools the model may call. Currently, only functions are supported as a - tool. - top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @@ -675,8 +611,6 @@ async def create( stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, - tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -749,19 +683,6 @@ async def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. - tool_choice: Controls which (if any) tool is called by the model. `none` means the model will - not call any tool and instead generates a message. `auto` means the model can - pick between generating a message or calling one or more tools. `required` means - the model must call one or more tools. Specifying a particular tool via - `{"type": "function", "function": {"name": "my_function"}}` forces the model to - call that tool. - - `none` is the default when no tools are present. `auto` is the default if tools - are present. - - tools: A list of tools the model may call. Currently, only functions are supported as a - tool. - top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @@ -803,8 +724,6 @@ async def create( stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, - tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -877,19 +796,6 @@ async def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. - tool_choice: Controls which (if any) tool is called by the model. `none` means the model will - not call any tool and instead generates a message. `auto` means the model can - pick between generating a message or calling one or more tools. `required` means - the model must call one or more tools. Specifying a particular tool via - `{"type": "function", "function": {"name": "my_function"}}` forces the model to - call that tool. - - `none` is the default when no tools are present. `auto` is the default if tools - are present. - - tools: A list of tools the model may call. Currently, only functions are supported as a - tool. - top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @@ -931,8 +837,6 @@ async def create( stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, - tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -971,8 +875,6 @@ async def create( "stream": stream, "stream_options": stream_options, "temperature": temperature, - "tool_choice": tool_choice, - "tools": tools, "top_logprobs": top_logprobs, "top_p": top_p, "user": user, diff --git a/src/gradientai/resources/droplets/__init__.py b/src/gradientai/resources/droplets/__init__.py deleted file mode 100644 index 284925dc..00000000 --- a/src/gradientai/resources/droplets/__init__.py +++ /dev/null @@ -1,75 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .actions import ( - ActionsResource, - AsyncActionsResource, - ActionsResourceWithRawResponse, - AsyncActionsResourceWithRawResponse, - ActionsResourceWithStreamingResponse, - AsyncActionsResourceWithStreamingResponse, -) -from .backups import ( - BackupsResource, - AsyncBackupsResource, - BackupsResourceWithRawResponse, - AsyncBackupsResourceWithRawResponse, - BackupsResourceWithStreamingResponse, - AsyncBackupsResourceWithStreamingResponse, -) -from .droplets import ( - DropletsResource, - AsyncDropletsResource, - DropletsResourceWithRawResponse, - AsyncDropletsResourceWithRawResponse, - DropletsResourceWithStreamingResponse, - AsyncDropletsResourceWithStreamingResponse, -) -from .autoscale import ( - AutoscaleResource, - AsyncAutoscaleResource, - AutoscaleResourceWithRawResponse, - AsyncAutoscaleResourceWithRawResponse, - AutoscaleResourceWithStreamingResponse, - AsyncAutoscaleResourceWithStreamingResponse, -) -from .destroy_with_associated_resources import ( - DestroyWithAssociatedResourcesResource, - AsyncDestroyWithAssociatedResourcesResource, - DestroyWithAssociatedResourcesResourceWithRawResponse, - AsyncDestroyWithAssociatedResourcesResourceWithRawResponse, - DestroyWithAssociatedResourcesResourceWithStreamingResponse, - AsyncDestroyWithAssociatedResourcesResourceWithStreamingResponse, -) - -__all__ = [ - "BackupsResource", - "AsyncBackupsResource", - "BackupsResourceWithRawResponse", - "AsyncBackupsResourceWithRawResponse", - "BackupsResourceWithStreamingResponse", - "AsyncBackupsResourceWithStreamingResponse", - "ActionsResource", - "AsyncActionsResource", - "ActionsResourceWithRawResponse", - "AsyncActionsResourceWithRawResponse", - "ActionsResourceWithStreamingResponse", - "AsyncActionsResourceWithStreamingResponse", - "DestroyWithAssociatedResourcesResource", - "AsyncDestroyWithAssociatedResourcesResource", - "DestroyWithAssociatedResourcesResourceWithRawResponse", - "AsyncDestroyWithAssociatedResourcesResourceWithRawResponse", - "DestroyWithAssociatedResourcesResourceWithStreamingResponse", - "AsyncDestroyWithAssociatedResourcesResourceWithStreamingResponse", - "AutoscaleResource", - "AsyncAutoscaleResource", - "AutoscaleResourceWithRawResponse", - "AsyncAutoscaleResourceWithRawResponse", - "AutoscaleResourceWithStreamingResponse", - "AsyncAutoscaleResourceWithStreamingResponse", - "DropletsResource", - "AsyncDropletsResource", - "DropletsResourceWithRawResponse", - "AsyncDropletsResourceWithRawResponse", - "DropletsResourceWithStreamingResponse", - "AsyncDropletsResourceWithStreamingResponse", -] diff --git a/src/gradientai/resources/droplets/actions.py b/src/gradientai/resources/droplets/actions.py deleted file mode 100644 index 93d03c2d..00000000 --- a/src/gradientai/resources/droplets/actions.py +++ /dev/null @@ -1,2048 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Union -from typing_extensions import Literal, overload - -import httpx - -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._utils import required_args, maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ..._base_client import make_request_options -from ...types.droplets import action_list_params, action_initiate_params, action_bulk_initiate_params -from ...types.droplet_backup_policy_param import DropletBackupPolicyParam -from ...types.droplets.action_list_response import ActionListResponse -from ...types.droplets.action_initiate_response import ActionInitiateResponse -from ...types.droplets.action_retrieve_response import ActionRetrieveResponse -from ...types.droplets.action_bulk_initiate_response import ActionBulkInitiateResponse - -__all__ = ["ActionsResource", "AsyncActionsResource"] - - -class ActionsResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> ActionsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return ActionsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> ActionsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return ActionsResourceWithStreamingResponse(self) - - def retrieve( - self, - action_id: int, - *, - droplet_id: int, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionRetrieveResponse: - """ - To retrieve a Droplet action, send a GET request to - `/v2/droplets/$DROPLET_ID/actions/$ACTION_ID`. - - The response will be a JSON object with a key called `action`. The value will be - a Droplet action object. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - f"/v2/droplets/{droplet_id}/actions/{action_id}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/actions/{action_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ActionRetrieveResponse, - ) - - def list( - self, - droplet_id: int, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionListResponse: - """ - To retrieve a list of all actions that have been executed for a Droplet, send a - GET request to `/v2/droplets/$DROPLET_ID/actions`. - - The results will be returned as a JSON object with an `actions` key. This will - be set to an array filled with `action` objects containing the standard `action` - attributes. - - Args: - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - f"/v2/droplets/{droplet_id}/actions" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/actions", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - }, - action_list_params.ActionListParams, - ), - ), - cast_to=ActionListResponse, - ) - - @overload - def bulk_initiate( - self, - *, - type: Literal[ - "enable_backups", - "disable_backups", - "reboot", - "power_cycle", - "shutdown", - "power_off", - "power_on", - "restore", - "password_reset", - "resize", - "rebuild", - "rename", - "change_kernel", - "enable_ipv6", - "snapshot", - ], - tag_name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionBulkInitiateResponse: - """Some actions can be performed in bulk on tagged Droplets. - - The actions can be - initiated by sending a POST to `/v2/droplets/actions?tag_name=$TAG_NAME` with - the action arguments. - - Only a sub-set of action types are supported: - - - `power_cycle` - - `power_on` - - `power_off` - - `shutdown` - - `enable_ipv6` - - `enable_backups` - - `disable_backups` - - `snapshot` (also requires `image:create` permission) - - Args: - type: The type of action to initiate for the Droplet. - - tag_name: Used to filter Droplets by a specific tag. Can not be combined with `name` or - `type`. Requires `tag:read` scope. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @overload - def bulk_initiate( - self, - *, - type: Literal[ - "enable_backups", - "disable_backups", - "reboot", - "power_cycle", - "shutdown", - "power_off", - "power_on", - "restore", - "password_reset", - "resize", - "rebuild", - "rename", - "change_kernel", - "enable_ipv6", - "snapshot", - ], - tag_name: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionBulkInitiateResponse: - """Some actions can be performed in bulk on tagged Droplets. - - The actions can be - initiated by sending a POST to `/v2/droplets/actions?tag_name=$TAG_NAME` with - the action arguments. - - Only a sub-set of action types are supported: - - - `power_cycle` - - `power_on` - - `power_off` - - `shutdown` - - `enable_ipv6` - - `enable_backups` - - `disable_backups` - - `snapshot` (also requires `image:create` permission) - - Args: - type: The type of action to initiate for the Droplet. - - tag_name: Used to filter Droplets by a specific tag. Can not be combined with `name` or - `type`. Requires `tag:read` scope. - - name: The name to give the new snapshot of the Droplet. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @required_args(["type"]) - def bulk_initiate( - self, - *, - type: Literal[ - "enable_backups", - "disable_backups", - "reboot", - "power_cycle", - "shutdown", - "power_off", - "power_on", - "restore", - "password_reset", - "resize", - "rebuild", - "rename", - "change_kernel", - "enable_ipv6", - "snapshot", - ], - tag_name: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionBulkInitiateResponse: - return self._post( - "/v2/droplets/actions" - if self._client._base_url_overridden - else "https://api.digitalocean.com/v2/droplets/actions", - body=maybe_transform( - { - "type": type, - "name": name, - }, - action_bulk_initiate_params.ActionBulkInitiateParams, - ), - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform({"tag_name": tag_name}, action_bulk_initiate_params.ActionBulkInitiateParams), - ), - cast_to=ActionBulkInitiateResponse, - ) - - @overload - def initiate( - self, - droplet_id: int, - *, - type: Literal[ - "enable_backups", - "disable_backups", - "reboot", - "power_cycle", - "shutdown", - "power_off", - "power_on", - "restore", - "password_reset", - "resize", - "rebuild", - "rename", - "change_kernel", - "enable_ipv6", - "snapshot", - ], - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionInitiateResponse: - """ - To initiate an action on a Droplet send a POST request to - `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the - `type` attribute to on of the supported action types: - - | Action | Details | Additionally Required Permission | - | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | - | `enable_backups` | Enables backups for a Droplet | | - | `disable_backups` | Disables backups for a Droplet | | - | `change_backup_policy` | Update the backup policy for a Droplet | | - | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | - | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | - | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | - | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | - | `power_on` | Powers on a Droplet. | | - | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | - | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | - | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | - | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | - | `rename` | Renames a Droplet. | | - | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | - | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | - | `snapshot` | Takes a snapshot of a Droplet. | image:create | - - Args: - type: The type of action to initiate for the Droplet. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @overload - def initiate( - self, - droplet_id: int, - *, - type: Literal[ - "enable_backups", - "disable_backups", - "reboot", - "power_cycle", - "shutdown", - "power_off", - "power_on", - "restore", - "password_reset", - "resize", - "rebuild", - "rename", - "change_kernel", - "enable_ipv6", - "snapshot", - ], - backup_policy: DropletBackupPolicyParam | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionInitiateResponse: - """ - To initiate an action on a Droplet send a POST request to - `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the - `type` attribute to on of the supported action types: - - | Action | Details | Additionally Required Permission | - | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | - | `enable_backups` | Enables backups for a Droplet | | - | `disable_backups` | Disables backups for a Droplet | | - | `change_backup_policy` | Update the backup policy for a Droplet | | - | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | - | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | - | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | - | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | - | `power_on` | Powers on a Droplet. | | - | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | - | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | - | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | - | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | - | `rename` | Renames a Droplet. | | - | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | - | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | - | `snapshot` | Takes a snapshot of a Droplet. | image:create | - - Args: - type: The type of action to initiate for the Droplet. - - backup_policy: An object specifying the backup policy for the Droplet. If omitted, the backup - plan will default to daily. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @overload - def initiate( - self, - droplet_id: int, - *, - type: Literal[ - "enable_backups", - "disable_backups", - "reboot", - "power_cycle", - "shutdown", - "power_off", - "power_on", - "restore", - "password_reset", - "resize", - "rebuild", - "rename", - "change_kernel", - "enable_ipv6", - "snapshot", - ], - backup_policy: DropletBackupPolicyParam | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionInitiateResponse: - """ - To initiate an action on a Droplet send a POST request to - `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the - `type` attribute to on of the supported action types: - - | Action | Details | Additionally Required Permission | - | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | - | `enable_backups` | Enables backups for a Droplet | | - | `disable_backups` | Disables backups for a Droplet | | - | `change_backup_policy` | Update the backup policy for a Droplet | | - | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | - | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | - | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | - | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | - | `power_on` | Powers on a Droplet. | | - | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | - | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | - | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | - | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | - | `rename` | Renames a Droplet. | | - | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | - | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | - | `snapshot` | Takes a snapshot of a Droplet. | image:create | - - Args: - type: The type of action to initiate for the Droplet. - - backup_policy: An object specifying the backup policy for the Droplet. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @overload - def initiate( - self, - droplet_id: int, - *, - type: Literal[ - "enable_backups", - "disable_backups", - "reboot", - "power_cycle", - "shutdown", - "power_off", - "power_on", - "restore", - "password_reset", - "resize", - "rebuild", - "rename", - "change_kernel", - "enable_ipv6", - "snapshot", - ], - image: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionInitiateResponse: - """ - To initiate an action on a Droplet send a POST request to - `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the - `type` attribute to on of the supported action types: - - | Action | Details | Additionally Required Permission | - | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | - | `enable_backups` | Enables backups for a Droplet | | - | `disable_backups` | Disables backups for a Droplet | | - | `change_backup_policy` | Update the backup policy for a Droplet | | - | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | - | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | - | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | - | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | - | `power_on` | Powers on a Droplet. | | - | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | - | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | - | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | - | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | - | `rename` | Renames a Droplet. | | - | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | - | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | - | `snapshot` | Takes a snapshot of a Droplet. | image:create | - - Args: - type: The type of action to initiate for the Droplet. - - image: The ID of a backup of the current Droplet instance to restore from. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @overload - def initiate( - self, - droplet_id: int, - *, - type: Literal[ - "enable_backups", - "disable_backups", - "reboot", - "power_cycle", - "shutdown", - "power_off", - "power_on", - "restore", - "password_reset", - "resize", - "rebuild", - "rename", - "change_kernel", - "enable_ipv6", - "snapshot", - ], - disk: bool | NotGiven = NOT_GIVEN, - size: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionInitiateResponse: - """ - To initiate an action on a Droplet send a POST request to - `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the - `type` attribute to on of the supported action types: - - | Action | Details | Additionally Required Permission | - | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | - | `enable_backups` | Enables backups for a Droplet | | - | `disable_backups` | Disables backups for a Droplet | | - | `change_backup_policy` | Update the backup policy for a Droplet | | - | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | - | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | - | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | - | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | - | `power_on` | Powers on a Droplet. | | - | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | - | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | - | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | - | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | - | `rename` | Renames a Droplet. | | - | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | - | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | - | `snapshot` | Takes a snapshot of a Droplet. | image:create | - - Args: - type: The type of action to initiate for the Droplet. - - disk: When `true`, the Droplet's disk will be resized in addition to its RAM and CPU. - This is a permanent change and cannot be reversed as a Droplet's disk size - cannot be decreased. - - size: The slug identifier for the size to which you wish to resize the Droplet. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @overload - def initiate( - self, - droplet_id: int, - *, - type: Literal[ - "enable_backups", - "disable_backups", - "reboot", - "power_cycle", - "shutdown", - "power_off", - "power_on", - "restore", - "password_reset", - "resize", - "rebuild", - "rename", - "change_kernel", - "enable_ipv6", - "snapshot", - ], - image: Union[str, int] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionInitiateResponse: - """ - To initiate an action on a Droplet send a POST request to - `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the - `type` attribute to on of the supported action types: - - | Action | Details | Additionally Required Permission | - | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | - | `enable_backups` | Enables backups for a Droplet | | - | `disable_backups` | Disables backups for a Droplet | | - | `change_backup_policy` | Update the backup policy for a Droplet | | - | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | - | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | - | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | - | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | - | `power_on` | Powers on a Droplet. | | - | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | - | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | - | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | - | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | - | `rename` | Renames a Droplet. | | - | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | - | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | - | `snapshot` | Takes a snapshot of a Droplet. | image:create | - - Args: - type: The type of action to initiate for the Droplet. - - image: The image ID of a public or private image or the slug identifier for a public - image. The Droplet will be rebuilt using this image as its base. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @overload - def initiate( - self, - droplet_id: int, - *, - type: Literal[ - "enable_backups", - "disable_backups", - "reboot", - "power_cycle", - "shutdown", - "power_off", - "power_on", - "restore", - "password_reset", - "resize", - "rebuild", - "rename", - "change_kernel", - "enable_ipv6", - "snapshot", - ], - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionInitiateResponse: - """ - To initiate an action on a Droplet send a POST request to - `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the - `type` attribute to on of the supported action types: - - | Action | Details | Additionally Required Permission | - | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | - | `enable_backups` | Enables backups for a Droplet | | - | `disable_backups` | Disables backups for a Droplet | | - | `change_backup_policy` | Update the backup policy for a Droplet | | - | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | - | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | - | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | - | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | - | `power_on` | Powers on a Droplet. | | - | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | - | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | - | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | - | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | - | `rename` | Renames a Droplet. | | - | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | - | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | - | `snapshot` | Takes a snapshot of a Droplet. | image:create | - - Args: - type: The type of action to initiate for the Droplet. - - name: The new name for the Droplet. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @overload - def initiate( - self, - droplet_id: int, - *, - type: Literal[ - "enable_backups", - "disable_backups", - "reboot", - "power_cycle", - "shutdown", - "power_off", - "power_on", - "restore", - "password_reset", - "resize", - "rebuild", - "rename", - "change_kernel", - "enable_ipv6", - "snapshot", - ], - kernel: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionInitiateResponse: - """ - To initiate an action on a Droplet send a POST request to - `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the - `type` attribute to on of the supported action types: - - | Action | Details | Additionally Required Permission | - | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | - | `enable_backups` | Enables backups for a Droplet | | - | `disable_backups` | Disables backups for a Droplet | | - | `change_backup_policy` | Update the backup policy for a Droplet | | - | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | - | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | - | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | - | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | - | `power_on` | Powers on a Droplet. | | - | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | - | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | - | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | - | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | - | `rename` | Renames a Droplet. | | - | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | - | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | - | `snapshot` | Takes a snapshot of a Droplet. | image:create | - - Args: - type: The type of action to initiate for the Droplet. - - kernel: A unique number used to identify and reference a specific kernel. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @overload - def initiate( - self, - droplet_id: int, - *, - type: Literal[ - "enable_backups", - "disable_backups", - "reboot", - "power_cycle", - "shutdown", - "power_off", - "power_on", - "restore", - "password_reset", - "resize", - "rebuild", - "rename", - "change_kernel", - "enable_ipv6", - "snapshot", - ], - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionInitiateResponse: - """ - To initiate an action on a Droplet send a POST request to - `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the - `type` attribute to on of the supported action types: - - | Action | Details | Additionally Required Permission | - | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | - | `enable_backups` | Enables backups for a Droplet | | - | `disable_backups` | Disables backups for a Droplet | | - | `change_backup_policy` | Update the backup policy for a Droplet | | - | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | - | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | - | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | - | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | - | `power_on` | Powers on a Droplet. | | - | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | - | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | - | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | - | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | - | `rename` | Renames a Droplet. | | - | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | - | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | - | `snapshot` | Takes a snapshot of a Droplet. | image:create | - - Args: - type: The type of action to initiate for the Droplet. - - name: The name to give the new snapshot of the Droplet. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @required_args(["type"]) - def initiate( - self, - droplet_id: int, - *, - type: Literal[ - "enable_backups", - "disable_backups", - "reboot", - "power_cycle", - "shutdown", - "power_off", - "power_on", - "restore", - "password_reset", - "resize", - "rebuild", - "rename", - "change_kernel", - "enable_ipv6", - "snapshot", - ], - backup_policy: DropletBackupPolicyParam | NotGiven = NOT_GIVEN, - image: int | Union[str, int] | NotGiven = NOT_GIVEN, - disk: bool | NotGiven = NOT_GIVEN, - size: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - kernel: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionInitiateResponse: - return self._post( - f"/v2/droplets/{droplet_id}/actions" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/actions", - body=maybe_transform( - { - "type": type, - "backup_policy": backup_policy, - "image": image, - "disk": disk, - "size": size, - "name": name, - "kernel": kernel, - }, - action_initiate_params.ActionInitiateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ActionInitiateResponse, - ) - - -class AsyncActionsResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncActionsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return AsyncActionsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncActionsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return AsyncActionsResourceWithStreamingResponse(self) - - async def retrieve( - self, - action_id: int, - *, - droplet_id: int, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionRetrieveResponse: - """ - To retrieve a Droplet action, send a GET request to - `/v2/droplets/$DROPLET_ID/actions/$ACTION_ID`. - - The response will be a JSON object with a key called `action`. The value will be - a Droplet action object. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - f"/v2/droplets/{droplet_id}/actions/{action_id}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/actions/{action_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ActionRetrieveResponse, - ) - - async def list( - self, - droplet_id: int, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionListResponse: - """ - To retrieve a list of all actions that have been executed for a Droplet, send a - GET request to `/v2/droplets/$DROPLET_ID/actions`. - - The results will be returned as a JSON object with an `actions` key. This will - be set to an array filled with `action` objects containing the standard `action` - attributes. - - Args: - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - f"/v2/droplets/{droplet_id}/actions" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/actions", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - }, - action_list_params.ActionListParams, - ), - ), - cast_to=ActionListResponse, - ) - - @overload - async def bulk_initiate( - self, - *, - type: Literal[ - "enable_backups", - "disable_backups", - "reboot", - "power_cycle", - "shutdown", - "power_off", - "power_on", - "restore", - "password_reset", - "resize", - "rebuild", - "rename", - "change_kernel", - "enable_ipv6", - "snapshot", - ], - tag_name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionBulkInitiateResponse: - """Some actions can be performed in bulk on tagged Droplets. - - The actions can be - initiated by sending a POST to `/v2/droplets/actions?tag_name=$TAG_NAME` with - the action arguments. - - Only a sub-set of action types are supported: - - - `power_cycle` - - `power_on` - - `power_off` - - `shutdown` - - `enable_ipv6` - - `enable_backups` - - `disable_backups` - - `snapshot` (also requires `image:create` permission) - - Args: - type: The type of action to initiate for the Droplet. - - tag_name: Used to filter Droplets by a specific tag. Can not be combined with `name` or - `type`. Requires `tag:read` scope. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @overload - async def bulk_initiate( - self, - *, - type: Literal[ - "enable_backups", - "disable_backups", - "reboot", - "power_cycle", - "shutdown", - "power_off", - "power_on", - "restore", - "password_reset", - "resize", - "rebuild", - "rename", - "change_kernel", - "enable_ipv6", - "snapshot", - ], - tag_name: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionBulkInitiateResponse: - """Some actions can be performed in bulk on tagged Droplets. - - The actions can be - initiated by sending a POST to `/v2/droplets/actions?tag_name=$TAG_NAME` with - the action arguments. - - Only a sub-set of action types are supported: - - - `power_cycle` - - `power_on` - - `power_off` - - `shutdown` - - `enable_ipv6` - - `enable_backups` - - `disable_backups` - - `snapshot` (also requires `image:create` permission) - - Args: - type: The type of action to initiate for the Droplet. - - tag_name: Used to filter Droplets by a specific tag. Can not be combined with `name` or - `type`. Requires `tag:read` scope. - - name: The name to give the new snapshot of the Droplet. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @required_args(["type"]) - async def bulk_initiate( - self, - *, - type: Literal[ - "enable_backups", - "disable_backups", - "reboot", - "power_cycle", - "shutdown", - "power_off", - "power_on", - "restore", - "password_reset", - "resize", - "rebuild", - "rename", - "change_kernel", - "enable_ipv6", - "snapshot", - ], - tag_name: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionBulkInitiateResponse: - return await self._post( - "/v2/droplets/actions" - if self._client._base_url_overridden - else "https://api.digitalocean.com/v2/droplets/actions", - body=await async_maybe_transform( - { - "type": type, - "name": name, - }, - action_bulk_initiate_params.ActionBulkInitiateParams, - ), - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - {"tag_name": tag_name}, action_bulk_initiate_params.ActionBulkInitiateParams - ), - ), - cast_to=ActionBulkInitiateResponse, - ) - - @overload - async def initiate( - self, - droplet_id: int, - *, - type: Literal[ - "enable_backups", - "disable_backups", - "reboot", - "power_cycle", - "shutdown", - "power_off", - "power_on", - "restore", - "password_reset", - "resize", - "rebuild", - "rename", - "change_kernel", - "enable_ipv6", - "snapshot", - ], - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionInitiateResponse: - """ - To initiate an action on a Droplet send a POST request to - `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the - `type` attribute to on of the supported action types: - - | Action | Details | Additionally Required Permission | - | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | - | `enable_backups` | Enables backups for a Droplet | | - | `disable_backups` | Disables backups for a Droplet | | - | `change_backup_policy` | Update the backup policy for a Droplet | | - | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | - | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | - | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | - | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | - | `power_on` | Powers on a Droplet. | | - | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | - | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | - | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | - | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | - | `rename` | Renames a Droplet. | | - | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | - | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | - | `snapshot` | Takes a snapshot of a Droplet. | image:create | - - Args: - type: The type of action to initiate for the Droplet. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @overload - async def initiate( - self, - droplet_id: int, - *, - type: Literal[ - "enable_backups", - "disable_backups", - "reboot", - "power_cycle", - "shutdown", - "power_off", - "power_on", - "restore", - "password_reset", - "resize", - "rebuild", - "rename", - "change_kernel", - "enable_ipv6", - "snapshot", - ], - backup_policy: DropletBackupPolicyParam | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionInitiateResponse: - """ - To initiate an action on a Droplet send a POST request to - `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the - `type` attribute to on of the supported action types: - - | Action | Details | Additionally Required Permission | - | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | - | `enable_backups` | Enables backups for a Droplet | | - | `disable_backups` | Disables backups for a Droplet | | - | `change_backup_policy` | Update the backup policy for a Droplet | | - | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | - | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | - | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | - | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | - | `power_on` | Powers on a Droplet. | | - | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | - | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | - | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | - | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | - | `rename` | Renames a Droplet. | | - | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | - | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | - | `snapshot` | Takes a snapshot of a Droplet. | image:create | - - Args: - type: The type of action to initiate for the Droplet. - - backup_policy: An object specifying the backup policy for the Droplet. If omitted, the backup - plan will default to daily. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @overload - async def initiate( - self, - droplet_id: int, - *, - type: Literal[ - "enable_backups", - "disable_backups", - "reboot", - "power_cycle", - "shutdown", - "power_off", - "power_on", - "restore", - "password_reset", - "resize", - "rebuild", - "rename", - "change_kernel", - "enable_ipv6", - "snapshot", - ], - backup_policy: DropletBackupPolicyParam | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionInitiateResponse: - """ - To initiate an action on a Droplet send a POST request to - `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the - `type` attribute to on of the supported action types: - - | Action | Details | Additionally Required Permission | - | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | - | `enable_backups` | Enables backups for a Droplet | | - | `disable_backups` | Disables backups for a Droplet | | - | `change_backup_policy` | Update the backup policy for a Droplet | | - | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | - | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | - | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | - | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | - | `power_on` | Powers on a Droplet. | | - | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | - | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | - | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | - | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | - | `rename` | Renames a Droplet. | | - | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | - | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | - | `snapshot` | Takes a snapshot of a Droplet. | image:create | - - Args: - type: The type of action to initiate for the Droplet. - - backup_policy: An object specifying the backup policy for the Droplet. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @overload - async def initiate( - self, - droplet_id: int, - *, - type: Literal[ - "enable_backups", - "disable_backups", - "reboot", - "power_cycle", - "shutdown", - "power_off", - "power_on", - "restore", - "password_reset", - "resize", - "rebuild", - "rename", - "change_kernel", - "enable_ipv6", - "snapshot", - ], - image: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionInitiateResponse: - """ - To initiate an action on a Droplet send a POST request to - `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the - `type` attribute to on of the supported action types: - - | Action | Details | Additionally Required Permission | - | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | - | `enable_backups` | Enables backups for a Droplet | | - | `disable_backups` | Disables backups for a Droplet | | - | `change_backup_policy` | Update the backup policy for a Droplet | | - | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | - | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | - | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | - | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | - | `power_on` | Powers on a Droplet. | | - | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | - | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | - | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | - | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | - | `rename` | Renames a Droplet. | | - | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | - | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | - | `snapshot` | Takes a snapshot of a Droplet. | image:create | - - Args: - type: The type of action to initiate for the Droplet. - - image: The ID of a backup of the current Droplet instance to restore from. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @overload - async def initiate( - self, - droplet_id: int, - *, - type: Literal[ - "enable_backups", - "disable_backups", - "reboot", - "power_cycle", - "shutdown", - "power_off", - "power_on", - "restore", - "password_reset", - "resize", - "rebuild", - "rename", - "change_kernel", - "enable_ipv6", - "snapshot", - ], - disk: bool | NotGiven = NOT_GIVEN, - size: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionInitiateResponse: - """ - To initiate an action on a Droplet send a POST request to - `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the - `type` attribute to on of the supported action types: - - | Action | Details | Additionally Required Permission | - | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | - | `enable_backups` | Enables backups for a Droplet | | - | `disable_backups` | Disables backups for a Droplet | | - | `change_backup_policy` | Update the backup policy for a Droplet | | - | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | - | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | - | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | - | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | - | `power_on` | Powers on a Droplet. | | - | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | - | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | - | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | - | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | - | `rename` | Renames a Droplet. | | - | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | - | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | - | `snapshot` | Takes a snapshot of a Droplet. | image:create | - - Args: - type: The type of action to initiate for the Droplet. - - disk: When `true`, the Droplet's disk will be resized in addition to its RAM and CPU. - This is a permanent change and cannot be reversed as a Droplet's disk size - cannot be decreased. - - size: The slug identifier for the size to which you wish to resize the Droplet. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @overload - async def initiate( - self, - droplet_id: int, - *, - type: Literal[ - "enable_backups", - "disable_backups", - "reboot", - "power_cycle", - "shutdown", - "power_off", - "power_on", - "restore", - "password_reset", - "resize", - "rebuild", - "rename", - "change_kernel", - "enable_ipv6", - "snapshot", - ], - image: Union[str, int] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionInitiateResponse: - """ - To initiate an action on a Droplet send a POST request to - `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the - `type` attribute to on of the supported action types: - - | Action | Details | Additionally Required Permission | - | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | - | `enable_backups` | Enables backups for a Droplet | | - | `disable_backups` | Disables backups for a Droplet | | - | `change_backup_policy` | Update the backup policy for a Droplet | | - | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | - | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | - | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | - | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | - | `power_on` | Powers on a Droplet. | | - | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | - | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | - | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | - | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | - | `rename` | Renames a Droplet. | | - | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | - | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | - | `snapshot` | Takes a snapshot of a Droplet. | image:create | - - Args: - type: The type of action to initiate for the Droplet. - - image: The image ID of a public or private image or the slug identifier for a public - image. The Droplet will be rebuilt using this image as its base. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @overload - async def initiate( - self, - droplet_id: int, - *, - type: Literal[ - "enable_backups", - "disable_backups", - "reboot", - "power_cycle", - "shutdown", - "power_off", - "power_on", - "restore", - "password_reset", - "resize", - "rebuild", - "rename", - "change_kernel", - "enable_ipv6", - "snapshot", - ], - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionInitiateResponse: - """ - To initiate an action on a Droplet send a POST request to - `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the - `type` attribute to on of the supported action types: - - | Action | Details | Additionally Required Permission | - | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | - | `enable_backups` | Enables backups for a Droplet | | - | `disable_backups` | Disables backups for a Droplet | | - | `change_backup_policy` | Update the backup policy for a Droplet | | - | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | - | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | - | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | - | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | - | `power_on` | Powers on a Droplet. | | - | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | - | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | - | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | - | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | - | `rename` | Renames a Droplet. | | - | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | - | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | - | `snapshot` | Takes a snapshot of a Droplet. | image:create | - - Args: - type: The type of action to initiate for the Droplet. - - name: The new name for the Droplet. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @overload - async def initiate( - self, - droplet_id: int, - *, - type: Literal[ - "enable_backups", - "disable_backups", - "reboot", - "power_cycle", - "shutdown", - "power_off", - "power_on", - "restore", - "password_reset", - "resize", - "rebuild", - "rename", - "change_kernel", - "enable_ipv6", - "snapshot", - ], - kernel: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionInitiateResponse: - """ - To initiate an action on a Droplet send a POST request to - `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the - `type` attribute to on of the supported action types: - - | Action | Details | Additionally Required Permission | - | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | - | `enable_backups` | Enables backups for a Droplet | | - | `disable_backups` | Disables backups for a Droplet | | - | `change_backup_policy` | Update the backup policy for a Droplet | | - | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | - | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | - | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | - | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | - | `power_on` | Powers on a Droplet. | | - | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | - | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | - | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | - | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | - | `rename` | Renames a Droplet. | | - | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | - | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | - | `snapshot` | Takes a snapshot of a Droplet. | image:create | - - Args: - type: The type of action to initiate for the Droplet. - - kernel: A unique number used to identify and reference a specific kernel. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @overload - async def initiate( - self, - droplet_id: int, - *, - type: Literal[ - "enable_backups", - "disable_backups", - "reboot", - "power_cycle", - "shutdown", - "power_off", - "power_on", - "restore", - "password_reset", - "resize", - "rebuild", - "rename", - "change_kernel", - "enable_ipv6", - "snapshot", - ], - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionInitiateResponse: - """ - To initiate an action on a Droplet send a POST request to - `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the - `type` attribute to on of the supported action types: - - | Action | Details | Additionally Required Permission | - | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | - | `enable_backups` | Enables backups for a Droplet | | - | `disable_backups` | Disables backups for a Droplet | | - | `change_backup_policy` | Update the backup policy for a Droplet | | - | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | - | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | - | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | - | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | - | `power_on` | Powers on a Droplet. | | - | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | - | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | - | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | - | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | - | `rename` | Renames a Droplet. | | - | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | - | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | - | `snapshot` | Takes a snapshot of a Droplet. | image:create | - - Args: - type: The type of action to initiate for the Droplet. - - name: The name to give the new snapshot of the Droplet. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @required_args(["type"]) - async def initiate( - self, - droplet_id: int, - *, - type: Literal[ - "enable_backups", - "disable_backups", - "reboot", - "power_cycle", - "shutdown", - "power_off", - "power_on", - "restore", - "password_reset", - "resize", - "rebuild", - "rename", - "change_kernel", - "enable_ipv6", - "snapshot", - ], - backup_policy: DropletBackupPolicyParam | NotGiven = NOT_GIVEN, - image: int | Union[str, int] | NotGiven = NOT_GIVEN, - disk: bool | NotGiven = NOT_GIVEN, - size: str | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - kernel: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionInitiateResponse: - return await self._post( - f"/v2/droplets/{droplet_id}/actions" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/actions", - body=await async_maybe_transform( - { - "type": type, - "backup_policy": backup_policy, - "image": image, - "disk": disk, - "size": size, - "name": name, - "kernel": kernel, - }, - action_initiate_params.ActionInitiateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ActionInitiateResponse, - ) - - -class ActionsResourceWithRawResponse: - def __init__(self, actions: ActionsResource) -> None: - self._actions = actions - - self.retrieve = to_raw_response_wrapper( - actions.retrieve, - ) - self.list = to_raw_response_wrapper( - actions.list, - ) - self.bulk_initiate = to_raw_response_wrapper( - actions.bulk_initiate, - ) - self.initiate = to_raw_response_wrapper( - actions.initiate, - ) - - -class AsyncActionsResourceWithRawResponse: - def __init__(self, actions: AsyncActionsResource) -> None: - self._actions = actions - - self.retrieve = async_to_raw_response_wrapper( - actions.retrieve, - ) - self.list = async_to_raw_response_wrapper( - actions.list, - ) - self.bulk_initiate = async_to_raw_response_wrapper( - actions.bulk_initiate, - ) - self.initiate = async_to_raw_response_wrapper( - actions.initiate, - ) - - -class ActionsResourceWithStreamingResponse: - def __init__(self, actions: ActionsResource) -> None: - self._actions = actions - - self.retrieve = to_streamed_response_wrapper( - actions.retrieve, - ) - self.list = to_streamed_response_wrapper( - actions.list, - ) - self.bulk_initiate = to_streamed_response_wrapper( - actions.bulk_initiate, - ) - self.initiate = to_streamed_response_wrapper( - actions.initiate, - ) - - -class AsyncActionsResourceWithStreamingResponse: - def __init__(self, actions: AsyncActionsResource) -> None: - self._actions = actions - - self.retrieve = async_to_streamed_response_wrapper( - actions.retrieve, - ) - self.list = async_to_streamed_response_wrapper( - actions.list, - ) - self.bulk_initiate = async_to_streamed_response_wrapper( - actions.bulk_initiate, - ) - self.initiate = async_to_streamed_response_wrapper( - actions.initiate, - ) diff --git a/src/gradientai/resources/droplets/autoscale.py b/src/gradientai/resources/droplets/autoscale.py deleted file mode 100644 index 7522385f..00000000 --- a/src/gradientai/resources/droplets/autoscale.py +++ /dev/null @@ -1,967 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import httpx - -from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven -from ..._utils import maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ..._base_client import make_request_options -from ...types.droplets import ( - autoscale_list_params, - autoscale_create_params, - autoscale_update_params, - autoscale_list_history_params, - autoscale_list_members_params, -) -from ...types.droplets.autoscale_list_response import AutoscaleListResponse -from ...types.droplets.autoscale_create_response import AutoscaleCreateResponse -from ...types.droplets.autoscale_update_response import AutoscaleUpdateResponse -from ...types.droplets.autoscale_retrieve_response import AutoscaleRetrieveResponse -from ...types.droplets.autoscale_list_history_response import AutoscaleListHistoryResponse -from ...types.droplets.autoscale_list_members_response import AutoscaleListMembersResponse -from ...types.droplets.autoscale_pool_droplet_template_param import AutoscalePoolDropletTemplateParam - -__all__ = ["AutoscaleResource", "AsyncAutoscaleResource"] - - -class AutoscaleResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> AutoscaleResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return AutoscaleResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AutoscaleResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return AutoscaleResourceWithStreamingResponse(self) - - def create( - self, - *, - config: autoscale_create_params.Config, - droplet_template: AutoscalePoolDropletTemplateParam, - name: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AutoscaleCreateResponse: - """ - To create a new autoscale pool, send a POST request to `/v2/droplets/autoscale` - setting the required attributes. - - The response body will contain a JSON object with a key called `autoscale_pool` - containing the standard attributes for the new autoscale pool. - - Args: - config: The scaling configuration for an autoscale pool, which is how the pool scales up - and down (either by resource utilization or static configuration). - - name: The human-readable name of the autoscale pool. This field cannot be updated - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._post( - "/v2/droplets/autoscale" - if self._client._base_url_overridden - else "https://api.digitalocean.com/v2/droplets/autoscale", - body=maybe_transform( - { - "config": config, - "droplet_template": droplet_template, - "name": name, - }, - autoscale_create_params.AutoscaleCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=AutoscaleCreateResponse, - ) - - def retrieve( - self, - autoscale_pool_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AutoscaleRetrieveResponse: - """ - To show information about an individual autoscale pool, send a GET request to - `/v2/droplets/autoscale/$AUTOSCALE_POOL_ID`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not autoscale_pool_id: - raise ValueError(f"Expected a non-empty value for `autoscale_pool_id` but received {autoscale_pool_id!r}") - return self._get( - f"/v2/droplets/autoscale/{autoscale_pool_id}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/droplets/autoscale/{autoscale_pool_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=AutoscaleRetrieveResponse, - ) - - def update( - self, - autoscale_pool_id: str, - *, - config: autoscale_update_params.Config, - droplet_template: AutoscalePoolDropletTemplateParam, - name: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AutoscaleUpdateResponse: - """ - To update the configuration of an existing autoscale pool, send a PUT request to - `/v2/droplets/autoscale/$AUTOSCALE_POOL_ID`. The request must contain a full - representation of the autoscale pool including existing attributes. - - Args: - config: The scaling configuration for an autoscale pool, which is how the pool scales up - and down (either by resource utilization or static configuration). - - name: The human-readable name of the autoscale pool. This field cannot be updated - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not autoscale_pool_id: - raise ValueError(f"Expected a non-empty value for `autoscale_pool_id` but received {autoscale_pool_id!r}") - return self._put( - f"/v2/droplets/autoscale/{autoscale_pool_id}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/droplets/autoscale/{autoscale_pool_id}", - body=maybe_transform( - { - "config": config, - "droplet_template": droplet_template, - "name": name, - }, - autoscale_update_params.AutoscaleUpdateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=AutoscaleUpdateResponse, - ) - - def list( - self, - *, - name: str | NotGiven = NOT_GIVEN, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AutoscaleListResponse: - """ - To list all autoscale pools in your team, send a GET request to - `/v2/droplets/autoscale`. The response body will be a JSON object with a key of - `autoscale_pools` containing an array of autoscale pool objects. These each - contain the standard autoscale pool attributes. - - Args: - name: The name of the autoscale pool - - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/v2/droplets/autoscale" - if self._client._base_url_overridden - else "https://api.digitalocean.com/v2/droplets/autoscale", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "name": name, - "page": page, - "per_page": per_page, - }, - autoscale_list_params.AutoscaleListParams, - ), - ), - cast_to=AutoscaleListResponse, - ) - - def delete( - self, - autoscale_pool_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - To destroy an autoscale pool, send a DELETE request to the - `/v2/droplets/autoscale/$AUTOSCALE_POOL_ID` endpoint. - - A successful response will include a 202 response code and no content. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not autoscale_pool_id: - raise ValueError(f"Expected a non-empty value for `autoscale_pool_id` but received {autoscale_pool_id!r}") - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return self._delete( - f"/v2/droplets/autoscale/{autoscale_pool_id}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/droplets/autoscale/{autoscale_pool_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - def delete_dangerous( - self, - autoscale_pool_id: str, - *, - x_dangerous: bool, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - To destroy an autoscale pool and its associated resources (Droplets), send a - DELETE request to the `/v2/droplets/autoscale/$AUTOSCALE_POOL_ID/dangerous` - endpoint. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not autoscale_pool_id: - raise ValueError(f"Expected a non-empty value for `autoscale_pool_id` but received {autoscale_pool_id!r}") - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - extra_headers.update({"X-Dangerous": ("true" if x_dangerous else "false")}) - return self._delete( - f"/v2/droplets/autoscale/{autoscale_pool_id}/dangerous" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/droplets/autoscale/{autoscale_pool_id}/dangerous", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - def list_history( - self, - autoscale_pool_id: str, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AutoscaleListHistoryResponse: - """ - To list all of the scaling history events of an autoscale pool, send a GET - request to `/v2/droplets/autoscale/$AUTOSCALE_POOL_ID/history`. - - The response body will be a JSON object with a key of `history`. This will be - set to an array containing objects each representing a history event. - - Args: - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not autoscale_pool_id: - raise ValueError(f"Expected a non-empty value for `autoscale_pool_id` but received {autoscale_pool_id!r}") - return self._get( - f"/v2/droplets/autoscale/{autoscale_pool_id}/history" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/droplets/autoscale/{autoscale_pool_id}/history", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - }, - autoscale_list_history_params.AutoscaleListHistoryParams, - ), - ), - cast_to=AutoscaleListHistoryResponse, - ) - - def list_members( - self, - autoscale_pool_id: str, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AutoscaleListMembersResponse: - """ - To list the Droplets in an autoscale pool, send a GET request to - `/v2/droplets/autoscale/$AUTOSCALE_POOL_ID/members`. - - The response body will be a JSON object with a key of `droplets`. This will be - set to an array containing information about each of the Droplets in the - autoscale pool. - - Args: - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not autoscale_pool_id: - raise ValueError(f"Expected a non-empty value for `autoscale_pool_id` but received {autoscale_pool_id!r}") - return self._get( - f"/v2/droplets/autoscale/{autoscale_pool_id}/members" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/droplets/autoscale/{autoscale_pool_id}/members", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - }, - autoscale_list_members_params.AutoscaleListMembersParams, - ), - ), - cast_to=AutoscaleListMembersResponse, - ) - - -class AsyncAutoscaleResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncAutoscaleResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return AsyncAutoscaleResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncAutoscaleResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return AsyncAutoscaleResourceWithStreamingResponse(self) - - async def create( - self, - *, - config: autoscale_create_params.Config, - droplet_template: AutoscalePoolDropletTemplateParam, - name: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AutoscaleCreateResponse: - """ - To create a new autoscale pool, send a POST request to `/v2/droplets/autoscale` - setting the required attributes. - - The response body will contain a JSON object with a key called `autoscale_pool` - containing the standard attributes for the new autoscale pool. - - Args: - config: The scaling configuration for an autoscale pool, which is how the pool scales up - and down (either by resource utilization or static configuration). - - name: The human-readable name of the autoscale pool. This field cannot be updated - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._post( - "/v2/droplets/autoscale" - if self._client._base_url_overridden - else "https://api.digitalocean.com/v2/droplets/autoscale", - body=await async_maybe_transform( - { - "config": config, - "droplet_template": droplet_template, - "name": name, - }, - autoscale_create_params.AutoscaleCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=AutoscaleCreateResponse, - ) - - async def retrieve( - self, - autoscale_pool_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AutoscaleRetrieveResponse: - """ - To show information about an individual autoscale pool, send a GET request to - `/v2/droplets/autoscale/$AUTOSCALE_POOL_ID`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not autoscale_pool_id: - raise ValueError(f"Expected a non-empty value for `autoscale_pool_id` but received {autoscale_pool_id!r}") - return await self._get( - f"/v2/droplets/autoscale/{autoscale_pool_id}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/droplets/autoscale/{autoscale_pool_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=AutoscaleRetrieveResponse, - ) - - async def update( - self, - autoscale_pool_id: str, - *, - config: autoscale_update_params.Config, - droplet_template: AutoscalePoolDropletTemplateParam, - name: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AutoscaleUpdateResponse: - """ - To update the configuration of an existing autoscale pool, send a PUT request to - `/v2/droplets/autoscale/$AUTOSCALE_POOL_ID`. The request must contain a full - representation of the autoscale pool including existing attributes. - - Args: - config: The scaling configuration for an autoscale pool, which is how the pool scales up - and down (either by resource utilization or static configuration). - - name: The human-readable name of the autoscale pool. This field cannot be updated - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not autoscale_pool_id: - raise ValueError(f"Expected a non-empty value for `autoscale_pool_id` but received {autoscale_pool_id!r}") - return await self._put( - f"/v2/droplets/autoscale/{autoscale_pool_id}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/droplets/autoscale/{autoscale_pool_id}", - body=await async_maybe_transform( - { - "config": config, - "droplet_template": droplet_template, - "name": name, - }, - autoscale_update_params.AutoscaleUpdateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=AutoscaleUpdateResponse, - ) - - async def list( - self, - *, - name: str | NotGiven = NOT_GIVEN, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AutoscaleListResponse: - """ - To list all autoscale pools in your team, send a GET request to - `/v2/droplets/autoscale`. The response body will be a JSON object with a key of - `autoscale_pools` containing an array of autoscale pool objects. These each - contain the standard autoscale pool attributes. - - Args: - name: The name of the autoscale pool - - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/v2/droplets/autoscale" - if self._client._base_url_overridden - else "https://api.digitalocean.com/v2/droplets/autoscale", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "name": name, - "page": page, - "per_page": per_page, - }, - autoscale_list_params.AutoscaleListParams, - ), - ), - cast_to=AutoscaleListResponse, - ) - - async def delete( - self, - autoscale_pool_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - To destroy an autoscale pool, send a DELETE request to the - `/v2/droplets/autoscale/$AUTOSCALE_POOL_ID` endpoint. - - A successful response will include a 202 response code and no content. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not autoscale_pool_id: - raise ValueError(f"Expected a non-empty value for `autoscale_pool_id` but received {autoscale_pool_id!r}") - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return await self._delete( - f"/v2/droplets/autoscale/{autoscale_pool_id}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/droplets/autoscale/{autoscale_pool_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - async def delete_dangerous( - self, - autoscale_pool_id: str, - *, - x_dangerous: bool, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - To destroy an autoscale pool and its associated resources (Droplets), send a - DELETE request to the `/v2/droplets/autoscale/$AUTOSCALE_POOL_ID/dangerous` - endpoint. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not autoscale_pool_id: - raise ValueError(f"Expected a non-empty value for `autoscale_pool_id` but received {autoscale_pool_id!r}") - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - extra_headers.update({"X-Dangerous": ("true" if x_dangerous else "false")}) - return await self._delete( - f"/v2/droplets/autoscale/{autoscale_pool_id}/dangerous" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/droplets/autoscale/{autoscale_pool_id}/dangerous", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - async def list_history( - self, - autoscale_pool_id: str, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AutoscaleListHistoryResponse: - """ - To list all of the scaling history events of an autoscale pool, send a GET - request to `/v2/droplets/autoscale/$AUTOSCALE_POOL_ID/history`. - - The response body will be a JSON object with a key of `history`. This will be - set to an array containing objects each representing a history event. - - Args: - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not autoscale_pool_id: - raise ValueError(f"Expected a non-empty value for `autoscale_pool_id` but received {autoscale_pool_id!r}") - return await self._get( - f"/v2/droplets/autoscale/{autoscale_pool_id}/history" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/droplets/autoscale/{autoscale_pool_id}/history", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - }, - autoscale_list_history_params.AutoscaleListHistoryParams, - ), - ), - cast_to=AutoscaleListHistoryResponse, - ) - - async def list_members( - self, - autoscale_pool_id: str, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> AutoscaleListMembersResponse: - """ - To list the Droplets in an autoscale pool, send a GET request to - `/v2/droplets/autoscale/$AUTOSCALE_POOL_ID/members`. - - The response body will be a JSON object with a key of `droplets`. This will be - set to an array containing information about each of the Droplets in the - autoscale pool. - - Args: - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not autoscale_pool_id: - raise ValueError(f"Expected a non-empty value for `autoscale_pool_id` but received {autoscale_pool_id!r}") - return await self._get( - f"/v2/droplets/autoscale/{autoscale_pool_id}/members" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/droplets/autoscale/{autoscale_pool_id}/members", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - }, - autoscale_list_members_params.AutoscaleListMembersParams, - ), - ), - cast_to=AutoscaleListMembersResponse, - ) - - -class AutoscaleResourceWithRawResponse: - def __init__(self, autoscale: AutoscaleResource) -> None: - self._autoscale = autoscale - - self.create = to_raw_response_wrapper( - autoscale.create, - ) - self.retrieve = to_raw_response_wrapper( - autoscale.retrieve, - ) - self.update = to_raw_response_wrapper( - autoscale.update, - ) - self.list = to_raw_response_wrapper( - autoscale.list, - ) - self.delete = to_raw_response_wrapper( - autoscale.delete, - ) - self.delete_dangerous = to_raw_response_wrapper( - autoscale.delete_dangerous, - ) - self.list_history = to_raw_response_wrapper( - autoscale.list_history, - ) - self.list_members = to_raw_response_wrapper( - autoscale.list_members, - ) - - -class AsyncAutoscaleResourceWithRawResponse: - def __init__(self, autoscale: AsyncAutoscaleResource) -> None: - self._autoscale = autoscale - - self.create = async_to_raw_response_wrapper( - autoscale.create, - ) - self.retrieve = async_to_raw_response_wrapper( - autoscale.retrieve, - ) - self.update = async_to_raw_response_wrapper( - autoscale.update, - ) - self.list = async_to_raw_response_wrapper( - autoscale.list, - ) - self.delete = async_to_raw_response_wrapper( - autoscale.delete, - ) - self.delete_dangerous = async_to_raw_response_wrapper( - autoscale.delete_dangerous, - ) - self.list_history = async_to_raw_response_wrapper( - autoscale.list_history, - ) - self.list_members = async_to_raw_response_wrapper( - autoscale.list_members, - ) - - -class AutoscaleResourceWithStreamingResponse: - def __init__(self, autoscale: AutoscaleResource) -> None: - self._autoscale = autoscale - - self.create = to_streamed_response_wrapper( - autoscale.create, - ) - self.retrieve = to_streamed_response_wrapper( - autoscale.retrieve, - ) - self.update = to_streamed_response_wrapper( - autoscale.update, - ) - self.list = to_streamed_response_wrapper( - autoscale.list, - ) - self.delete = to_streamed_response_wrapper( - autoscale.delete, - ) - self.delete_dangerous = to_streamed_response_wrapper( - autoscale.delete_dangerous, - ) - self.list_history = to_streamed_response_wrapper( - autoscale.list_history, - ) - self.list_members = to_streamed_response_wrapper( - autoscale.list_members, - ) - - -class AsyncAutoscaleResourceWithStreamingResponse: - def __init__(self, autoscale: AsyncAutoscaleResource) -> None: - self._autoscale = autoscale - - self.create = async_to_streamed_response_wrapper( - autoscale.create, - ) - self.retrieve = async_to_streamed_response_wrapper( - autoscale.retrieve, - ) - self.update = async_to_streamed_response_wrapper( - autoscale.update, - ) - self.list = async_to_streamed_response_wrapper( - autoscale.list, - ) - self.delete = async_to_streamed_response_wrapper( - autoscale.delete, - ) - self.delete_dangerous = async_to_streamed_response_wrapper( - autoscale.delete_dangerous, - ) - self.list_history = async_to_streamed_response_wrapper( - autoscale.list_history, - ) - self.list_members = async_to_streamed_response_wrapper( - autoscale.list_members, - ) diff --git a/src/gradientai/resources/droplets/backups.py b/src/gradientai/resources/droplets/backups.py deleted file mode 100644 index d8635c46..00000000 --- a/src/gradientai/resources/droplets/backups.py +++ /dev/null @@ -1,460 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import httpx - -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._utils import maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ..._base_client import make_request_options -from ...types.droplets import backup_list_params, backup_list_policies_params -from ...types.droplets.backup_list_response import BackupListResponse -from ...types.droplets.backup_list_policies_response import BackupListPoliciesResponse -from ...types.droplets.backup_retrieve_policy_response import BackupRetrievePolicyResponse -from ...types.droplets.backup_list_supported_policies_response import BackupListSupportedPoliciesResponse - -__all__ = ["BackupsResource", "AsyncBackupsResource"] - - -class BackupsResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> BackupsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return BackupsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> BackupsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return BackupsResourceWithStreamingResponse(self) - - def list( - self, - droplet_id: int, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> BackupListResponse: - """ - To retrieve any backups associated with a Droplet, send a GET request to - `/v2/droplets/$DROPLET_ID/backups`. - - You will get back a JSON object that has a `backups` key. This will be set to an - array of backup objects, each of which contain the standard Droplet backup - attributes. - - Args: - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - f"/v2/droplets/{droplet_id}/backups" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/backups", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - }, - backup_list_params.BackupListParams, - ), - ), - cast_to=BackupListResponse, - ) - - def list_policies( - self, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> BackupListPoliciesResponse: - """ - To list information about the backup policies for all Droplets in the account, - send a GET request to `/v2/droplets/backups/policies`. - - Args: - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/v2/droplets/backups/policies" - if self._client._base_url_overridden - else "https://api.digitalocean.com/v2/droplets/backups/policies", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - }, - backup_list_policies_params.BackupListPoliciesParams, - ), - ), - cast_to=BackupListPoliciesResponse, - ) - - def list_supported_policies( - self, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> BackupListSupportedPoliciesResponse: - """ - To retrieve a list of all supported Droplet backup policies, send a GET request - to `/v2/droplets/backups/supported_policies`. - """ - return self._get( - "/v2/droplets/backups/supported_policies" - if self._client._base_url_overridden - else "https://api.digitalocean.com/v2/droplets/backups/supported_policies", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=BackupListSupportedPoliciesResponse, - ) - - def retrieve_policy( - self, - droplet_id: int, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> BackupRetrievePolicyResponse: - """ - To show information about an individual Droplet's backup policy, send a GET - request to `/v2/droplets/$DROPLET_ID/backups/policy`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - f"/v2/droplets/{droplet_id}/backups/policy" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/backups/policy", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=BackupRetrievePolicyResponse, - ) - - -class AsyncBackupsResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncBackupsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return AsyncBackupsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncBackupsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return AsyncBackupsResourceWithStreamingResponse(self) - - async def list( - self, - droplet_id: int, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> BackupListResponse: - """ - To retrieve any backups associated with a Droplet, send a GET request to - `/v2/droplets/$DROPLET_ID/backups`. - - You will get back a JSON object that has a `backups` key. This will be set to an - array of backup objects, each of which contain the standard Droplet backup - attributes. - - Args: - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - f"/v2/droplets/{droplet_id}/backups" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/backups", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - }, - backup_list_params.BackupListParams, - ), - ), - cast_to=BackupListResponse, - ) - - async def list_policies( - self, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> BackupListPoliciesResponse: - """ - To list information about the backup policies for all Droplets in the account, - send a GET request to `/v2/droplets/backups/policies`. - - Args: - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/v2/droplets/backups/policies" - if self._client._base_url_overridden - else "https://api.digitalocean.com/v2/droplets/backups/policies", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - }, - backup_list_policies_params.BackupListPoliciesParams, - ), - ), - cast_to=BackupListPoliciesResponse, - ) - - async def list_supported_policies( - self, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> BackupListSupportedPoliciesResponse: - """ - To retrieve a list of all supported Droplet backup policies, send a GET request - to `/v2/droplets/backups/supported_policies`. - """ - return await self._get( - "/v2/droplets/backups/supported_policies" - if self._client._base_url_overridden - else "https://api.digitalocean.com/v2/droplets/backups/supported_policies", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=BackupListSupportedPoliciesResponse, - ) - - async def retrieve_policy( - self, - droplet_id: int, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> BackupRetrievePolicyResponse: - """ - To show information about an individual Droplet's backup policy, send a GET - request to `/v2/droplets/$DROPLET_ID/backups/policy`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - f"/v2/droplets/{droplet_id}/backups/policy" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/backups/policy", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=BackupRetrievePolicyResponse, - ) - - -class BackupsResourceWithRawResponse: - def __init__(self, backups: BackupsResource) -> None: - self._backups = backups - - self.list = to_raw_response_wrapper( - backups.list, - ) - self.list_policies = to_raw_response_wrapper( - backups.list_policies, - ) - self.list_supported_policies = to_raw_response_wrapper( - backups.list_supported_policies, - ) - self.retrieve_policy = to_raw_response_wrapper( - backups.retrieve_policy, - ) - - -class AsyncBackupsResourceWithRawResponse: - def __init__(self, backups: AsyncBackupsResource) -> None: - self._backups = backups - - self.list = async_to_raw_response_wrapper( - backups.list, - ) - self.list_policies = async_to_raw_response_wrapper( - backups.list_policies, - ) - self.list_supported_policies = async_to_raw_response_wrapper( - backups.list_supported_policies, - ) - self.retrieve_policy = async_to_raw_response_wrapper( - backups.retrieve_policy, - ) - - -class BackupsResourceWithStreamingResponse: - def __init__(self, backups: BackupsResource) -> None: - self._backups = backups - - self.list = to_streamed_response_wrapper( - backups.list, - ) - self.list_policies = to_streamed_response_wrapper( - backups.list_policies, - ) - self.list_supported_policies = to_streamed_response_wrapper( - backups.list_supported_policies, - ) - self.retrieve_policy = to_streamed_response_wrapper( - backups.retrieve_policy, - ) - - -class AsyncBackupsResourceWithStreamingResponse: - def __init__(self, backups: AsyncBackupsResource) -> None: - self._backups = backups - - self.list = async_to_streamed_response_wrapper( - backups.list, - ) - self.list_policies = async_to_streamed_response_wrapper( - backups.list_policies, - ) - self.list_supported_policies = async_to_streamed_response_wrapper( - backups.list_supported_policies, - ) - self.retrieve_policy = async_to_streamed_response_wrapper( - backups.retrieve_policy, - ) diff --git a/src/gradientai/resources/droplets/destroy_with_associated_resources.py b/src/gradientai/resources/droplets/destroy_with_associated_resources.py deleted file mode 100644 index 96cc0615..00000000 --- a/src/gradientai/resources/droplets/destroy_with_associated_resources.py +++ /dev/null @@ -1,622 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List - -import httpx - -from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven -from ..._utils import maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ..._base_client import make_request_options -from ...types.droplets import destroy_with_associated_resource_delete_selective_params -from ...types.droplets.destroy_with_associated_resource_list_response import DestroyWithAssociatedResourceListResponse -from ...types.droplets.destroy_with_associated_resource_check_status_response import ( - DestroyWithAssociatedResourceCheckStatusResponse, -) - -__all__ = ["DestroyWithAssociatedResourcesResource", "AsyncDestroyWithAssociatedResourcesResource"] - - -class DestroyWithAssociatedResourcesResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> DestroyWithAssociatedResourcesResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return DestroyWithAssociatedResourcesResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> DestroyWithAssociatedResourcesResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return DestroyWithAssociatedResourcesResourceWithStreamingResponse(self) - - def list( - self, - droplet_id: int, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DestroyWithAssociatedResourceListResponse: - """ - To list the associated billable resources that can be destroyed along with a - Droplet, send a GET request to the - `/v2/droplets/$DROPLET_ID/destroy_with_associated_resources` endpoint. - - This endpoint will only return resources that you are authorized to see. For - example, to see associated Reserved IPs, include the `reserved_ip:read` scope. - - The response will be a JSON object containing `snapshots`, `volumes`, and - `volume_snapshots` keys. Each will be set to an array of objects containing - information about the associated resources. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - f"/v2/droplets/{droplet_id}/destroy_with_associated_resources" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/destroy_with_associated_resources", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=DestroyWithAssociatedResourceListResponse, - ) - - def check_status( - self, - droplet_id: int, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DestroyWithAssociatedResourceCheckStatusResponse: - """ - To check on the status of a request to destroy a Droplet with its associated - resources, send a GET request to the - `/v2/droplets/$DROPLET_ID/destroy_with_associated_resources/status` endpoint. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - f"/v2/droplets/{droplet_id}/destroy_with_associated_resources/status" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/destroy_with_associated_resources/status", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=DestroyWithAssociatedResourceCheckStatusResponse, - ) - - def delete_dangerous( - self, - droplet_id: int, - *, - x_dangerous: bool, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - To destroy a Droplet along with all of its associated resources, send a DELETE - request to the - `/v2/droplets/$DROPLET_ID/destroy_with_associated_resources/dangerous` endpoint. - The headers of this request must include an `X-Dangerous` key set to `true`. To - preview which resources will be destroyed, first query the Droplet's associated - resources. This operation _can not_ be reverse and should be used with caution. - - A successful response will include a 202 response code and no content. Use the - status endpoint to check on the success or failure of the destruction of the - individual resources. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - extra_headers.update({"X-Dangerous": ("true" if x_dangerous else "false")}) - return self._delete( - f"/v2/droplets/{droplet_id}/destroy_with_associated_resources/dangerous" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/destroy_with_associated_resources/dangerous", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - def delete_selective( - self, - droplet_id: int, - *, - floating_ips: List[str] | NotGiven = NOT_GIVEN, - reserved_ips: List[str] | NotGiven = NOT_GIVEN, - snapshots: List[str] | NotGiven = NOT_GIVEN, - volume_snapshots: List[str] | NotGiven = NOT_GIVEN, - volumes: List[str] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - To destroy a Droplet along with a sub-set of its associated resources, send a - DELETE request to the - `/v2/droplets/$DROPLET_ID/destroy_with_associated_resources/selective` endpoint. - The JSON body of the request should include `reserved_ips`, `snapshots`, - `volumes`, or `volume_snapshots` keys each set to an array of IDs for the - associated resources to be destroyed. The IDs can be found by querying the - Droplet's associated resources. Any associated resource not included in the - request will remain and continue to accrue changes on your account. - - A successful response will include a 202 response code and no content. Use the - status endpoint to check on the success or failure of the destruction of the - individual resources. - - Args: - floating_ips: An array of unique identifiers for the floating IPs to be scheduled for - deletion. - - reserved_ips: An array of unique identifiers for the reserved IPs to be scheduled for - deletion. - - snapshots: An array of unique identifiers for the snapshots to be scheduled for deletion. - - volume_snapshots: An array of unique identifiers for the volume snapshots to be scheduled for - deletion. - - volumes: An array of unique identifiers for the volumes to be scheduled for deletion. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return self._delete( - f"/v2/droplets/{droplet_id}/destroy_with_associated_resources/selective" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/destroy_with_associated_resources/selective", - body=maybe_transform( - { - "floating_ips": floating_ips, - "reserved_ips": reserved_ips, - "snapshots": snapshots, - "volume_snapshots": volume_snapshots, - "volumes": volumes, - }, - destroy_with_associated_resource_delete_selective_params.DestroyWithAssociatedResourceDeleteSelectiveParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - def retry( - self, - droplet_id: int, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - If the status of a request to destroy a Droplet with its associated resources - reported any errors, it can be retried by sending a POST request to the - `/v2/droplets/$DROPLET_ID/destroy_with_associated_resources/retry` endpoint. - - Only one destroy can be active at a time per Droplet. If a retry is issued while - another destroy is in progress for the Droplet a 409 status code will be - returned. A successful response will include a 202 response code and no content. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return self._post( - f"/v2/droplets/{droplet_id}/destroy_with_associated_resources/retry" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/destroy_with_associated_resources/retry", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - -class AsyncDestroyWithAssociatedResourcesResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncDestroyWithAssociatedResourcesResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return AsyncDestroyWithAssociatedResourcesResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncDestroyWithAssociatedResourcesResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return AsyncDestroyWithAssociatedResourcesResourceWithStreamingResponse(self) - - async def list( - self, - droplet_id: int, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DestroyWithAssociatedResourceListResponse: - """ - To list the associated billable resources that can be destroyed along with a - Droplet, send a GET request to the - `/v2/droplets/$DROPLET_ID/destroy_with_associated_resources` endpoint. - - This endpoint will only return resources that you are authorized to see. For - example, to see associated Reserved IPs, include the `reserved_ip:read` scope. - - The response will be a JSON object containing `snapshots`, `volumes`, and - `volume_snapshots` keys. Each will be set to an array of objects containing - information about the associated resources. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - f"/v2/droplets/{droplet_id}/destroy_with_associated_resources" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/destroy_with_associated_resources", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=DestroyWithAssociatedResourceListResponse, - ) - - async def check_status( - self, - droplet_id: int, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DestroyWithAssociatedResourceCheckStatusResponse: - """ - To check on the status of a request to destroy a Droplet with its associated - resources, send a GET request to the - `/v2/droplets/$DROPLET_ID/destroy_with_associated_resources/status` endpoint. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - f"/v2/droplets/{droplet_id}/destroy_with_associated_resources/status" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/destroy_with_associated_resources/status", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=DestroyWithAssociatedResourceCheckStatusResponse, - ) - - async def delete_dangerous( - self, - droplet_id: int, - *, - x_dangerous: bool, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - To destroy a Droplet along with all of its associated resources, send a DELETE - request to the - `/v2/droplets/$DROPLET_ID/destroy_with_associated_resources/dangerous` endpoint. - The headers of this request must include an `X-Dangerous` key set to `true`. To - preview which resources will be destroyed, first query the Droplet's associated - resources. This operation _can not_ be reverse and should be used with caution. - - A successful response will include a 202 response code and no content. Use the - status endpoint to check on the success or failure of the destruction of the - individual resources. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - extra_headers.update({"X-Dangerous": ("true" if x_dangerous else "false")}) - return await self._delete( - f"/v2/droplets/{droplet_id}/destroy_with_associated_resources/dangerous" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/destroy_with_associated_resources/dangerous", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - async def delete_selective( - self, - droplet_id: int, - *, - floating_ips: List[str] | NotGiven = NOT_GIVEN, - reserved_ips: List[str] | NotGiven = NOT_GIVEN, - snapshots: List[str] | NotGiven = NOT_GIVEN, - volume_snapshots: List[str] | NotGiven = NOT_GIVEN, - volumes: List[str] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - To destroy a Droplet along with a sub-set of its associated resources, send a - DELETE request to the - `/v2/droplets/$DROPLET_ID/destroy_with_associated_resources/selective` endpoint. - The JSON body of the request should include `reserved_ips`, `snapshots`, - `volumes`, or `volume_snapshots` keys each set to an array of IDs for the - associated resources to be destroyed. The IDs can be found by querying the - Droplet's associated resources. Any associated resource not included in the - request will remain and continue to accrue changes on your account. - - A successful response will include a 202 response code and no content. Use the - status endpoint to check on the success or failure of the destruction of the - individual resources. - - Args: - floating_ips: An array of unique identifiers for the floating IPs to be scheduled for - deletion. - - reserved_ips: An array of unique identifiers for the reserved IPs to be scheduled for - deletion. - - snapshots: An array of unique identifiers for the snapshots to be scheduled for deletion. - - volume_snapshots: An array of unique identifiers for the volume snapshots to be scheduled for - deletion. - - volumes: An array of unique identifiers for the volumes to be scheduled for deletion. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return await self._delete( - f"/v2/droplets/{droplet_id}/destroy_with_associated_resources/selective" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/destroy_with_associated_resources/selective", - body=await async_maybe_transform( - { - "floating_ips": floating_ips, - "reserved_ips": reserved_ips, - "snapshots": snapshots, - "volume_snapshots": volume_snapshots, - "volumes": volumes, - }, - destroy_with_associated_resource_delete_selective_params.DestroyWithAssociatedResourceDeleteSelectiveParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - async def retry( - self, - droplet_id: int, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - If the status of a request to destroy a Droplet with its associated resources - reported any errors, it can be retried by sending a POST request to the - `/v2/droplets/$DROPLET_ID/destroy_with_associated_resources/retry` endpoint. - - Only one destroy can be active at a time per Droplet. If a retry is issued while - another destroy is in progress for the Droplet a 409 status code will be - returned. A successful response will include a 202 response code and no content. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return await self._post( - f"/v2/droplets/{droplet_id}/destroy_with_associated_resources/retry" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/destroy_with_associated_resources/retry", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - -class DestroyWithAssociatedResourcesResourceWithRawResponse: - def __init__(self, destroy_with_associated_resources: DestroyWithAssociatedResourcesResource) -> None: - self._destroy_with_associated_resources = destroy_with_associated_resources - - self.list = to_raw_response_wrapper( - destroy_with_associated_resources.list, - ) - self.check_status = to_raw_response_wrapper( - destroy_with_associated_resources.check_status, - ) - self.delete_dangerous = to_raw_response_wrapper( - destroy_with_associated_resources.delete_dangerous, - ) - self.delete_selective = to_raw_response_wrapper( - destroy_with_associated_resources.delete_selective, - ) - self.retry = to_raw_response_wrapper( - destroy_with_associated_resources.retry, - ) - - -class AsyncDestroyWithAssociatedResourcesResourceWithRawResponse: - def __init__(self, destroy_with_associated_resources: AsyncDestroyWithAssociatedResourcesResource) -> None: - self._destroy_with_associated_resources = destroy_with_associated_resources - - self.list = async_to_raw_response_wrapper( - destroy_with_associated_resources.list, - ) - self.check_status = async_to_raw_response_wrapper( - destroy_with_associated_resources.check_status, - ) - self.delete_dangerous = async_to_raw_response_wrapper( - destroy_with_associated_resources.delete_dangerous, - ) - self.delete_selective = async_to_raw_response_wrapper( - destroy_with_associated_resources.delete_selective, - ) - self.retry = async_to_raw_response_wrapper( - destroy_with_associated_resources.retry, - ) - - -class DestroyWithAssociatedResourcesResourceWithStreamingResponse: - def __init__(self, destroy_with_associated_resources: DestroyWithAssociatedResourcesResource) -> None: - self._destroy_with_associated_resources = destroy_with_associated_resources - - self.list = to_streamed_response_wrapper( - destroy_with_associated_resources.list, - ) - self.check_status = to_streamed_response_wrapper( - destroy_with_associated_resources.check_status, - ) - self.delete_dangerous = to_streamed_response_wrapper( - destroy_with_associated_resources.delete_dangerous, - ) - self.delete_selective = to_streamed_response_wrapper( - destroy_with_associated_resources.delete_selective, - ) - self.retry = to_streamed_response_wrapper( - destroy_with_associated_resources.retry, - ) - - -class AsyncDestroyWithAssociatedResourcesResourceWithStreamingResponse: - def __init__(self, destroy_with_associated_resources: AsyncDestroyWithAssociatedResourcesResource) -> None: - self._destroy_with_associated_resources = destroy_with_associated_resources - - self.list = async_to_streamed_response_wrapper( - destroy_with_associated_resources.list, - ) - self.check_status = async_to_streamed_response_wrapper( - destroy_with_associated_resources.check_status, - ) - self.delete_dangerous = async_to_streamed_response_wrapper( - destroy_with_associated_resources.delete_dangerous, - ) - self.delete_selective = async_to_streamed_response_wrapper( - destroy_with_associated_resources.delete_selective, - ) - self.retry = async_to_streamed_response_wrapper( - destroy_with_associated_resources.retry, - ) diff --git a/src/gradientai/resources/droplets/droplets.py b/src/gradientai/resources/droplets/droplets.py deleted file mode 100644 index fbe2aba5..00000000 --- a/src/gradientai/resources/droplets/droplets.py +++ /dev/null @@ -1,1748 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Any, List, Union, Optional, cast -from typing_extensions import Literal, overload - -import httpx - -from ...types import ( - droplet_list_params, - droplet_create_params, - droplet_list_kernels_params, - droplet_delete_by_tag_params, - droplet_list_firewalls_params, - droplet_list_snapshots_params, -) -from .actions import ( - ActionsResource, - AsyncActionsResource, - ActionsResourceWithRawResponse, - AsyncActionsResourceWithRawResponse, - ActionsResourceWithStreamingResponse, - AsyncActionsResourceWithStreamingResponse, -) -from .backups import ( - BackupsResource, - AsyncBackupsResource, - BackupsResourceWithRawResponse, - AsyncBackupsResourceWithRawResponse, - BackupsResourceWithStreamingResponse, - AsyncBackupsResourceWithStreamingResponse, -) -from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven -from ..._utils import required_args, maybe_transform, async_maybe_transform -from ..._compat import cached_property -from .autoscale import ( - AutoscaleResource, - AsyncAutoscaleResource, - AutoscaleResourceWithRawResponse, - AsyncAutoscaleResourceWithRawResponse, - AutoscaleResourceWithStreamingResponse, - AsyncAutoscaleResourceWithStreamingResponse, -) -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ..._base_client import make_request_options -from ...types.droplet_list_response import DropletListResponse -from ...types.droplet_create_response import DropletCreateResponse -from ...types.droplet_retrieve_response import DropletRetrieveResponse -from .destroy_with_associated_resources import ( - DestroyWithAssociatedResourcesResource, - AsyncDestroyWithAssociatedResourcesResource, - DestroyWithAssociatedResourcesResourceWithRawResponse, - AsyncDestroyWithAssociatedResourcesResourceWithRawResponse, - DestroyWithAssociatedResourcesResourceWithStreamingResponse, - AsyncDestroyWithAssociatedResourcesResourceWithStreamingResponse, -) -from ...types.droplet_backup_policy_param import DropletBackupPolicyParam -from ...types.droplet_list_kernels_response import DropletListKernelsResponse -from ...types.droplet_list_firewalls_response import DropletListFirewallsResponse -from ...types.droplet_list_neighbors_response import DropletListNeighborsResponse -from ...types.droplet_list_snapshots_response import DropletListSnapshotsResponse - -__all__ = ["DropletsResource", "AsyncDropletsResource"] - - -class DropletsResource(SyncAPIResource): - @cached_property - def backups(self) -> BackupsResource: - return BackupsResource(self._client) - - @cached_property - def actions(self) -> ActionsResource: - return ActionsResource(self._client) - - @cached_property - def destroy_with_associated_resources(self) -> DestroyWithAssociatedResourcesResource: - return DestroyWithAssociatedResourcesResource(self._client) - - @cached_property - def autoscale(self) -> AutoscaleResource: - return AutoscaleResource(self._client) - - @cached_property - def with_raw_response(self) -> DropletsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return DropletsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> DropletsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return DropletsResourceWithStreamingResponse(self) - - @overload - def create( - self, - *, - image: Union[str, int], - name: str, - size: str, - backup_policy: DropletBackupPolicyParam | NotGiven = NOT_GIVEN, - backups: bool | NotGiven = NOT_GIVEN, - ipv6: bool | NotGiven = NOT_GIVEN, - monitoring: bool | NotGiven = NOT_GIVEN, - private_networking: bool | NotGiven = NOT_GIVEN, - region: str | NotGiven = NOT_GIVEN, - ssh_keys: List[Union[str, int]] | NotGiven = NOT_GIVEN, - tags: Optional[List[str]] | NotGiven = NOT_GIVEN, - user_data: str | NotGiven = NOT_GIVEN, - volumes: List[str] | NotGiven = NOT_GIVEN, - vpc_uuid: str | NotGiven = NOT_GIVEN, - with_droplet_agent: bool | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DropletCreateResponse: - """ - To create a new Droplet, send a POST request to `/v2/droplets` setting the - required attributes. - - A Droplet will be created using the provided information. The response body will - contain a JSON object with a key called `droplet`. The value will be an object - containing the standard attributes for your new Droplet. The response code, 202 - Accepted, does not indicate the success or failure of the operation, just that - the request has been accepted for processing. The `actions` returned as part of - the response's `links` object can be used to check the status of the Droplet - create event. - - ### Create Multiple Droplets - - Creating multiple Droplets is very similar to creating a single Droplet. Instead - of sending `name` as a string, send `names` as an array of strings. A Droplet - will be created for each name you send using the associated information. Up to - ten Droplets may be created this way at a time. - - Rather than returning a single Droplet, the response body will contain a JSON - array with a key called `droplets`. This will be set to an array of JSON - objects, each of which will contain the standard Droplet attributes. The - response code, 202 Accepted, does not indicate the success or failure of any - operation, just that the request has been accepted for processing. The array of - `actions` returned as part of the response's `links` object can be used to check - the status of each individual Droplet create event. - - Args: - image: The image ID of a public or private image or the slug identifier for a public - image. This image will be the base image for your Droplet. Requires `image:read` - scope. - - name: The human-readable string you wish to use when displaying the Droplet name. The - name, if set to a domain name managed in the DigitalOcean DNS management system, - will configure a PTR record for the Droplet. The name set during creation will - also determine the hostname for the Droplet in its internal configuration. - - size: The slug identifier for the size that you wish to select for this Droplet. - - backup_policy: An object specifying the backup policy for the Droplet. If omitted and `backups` - is `true`, the backup plan will default to daily. - - backups: A boolean indicating whether automated backups should be enabled for the - Droplet. - - ipv6: A boolean indicating whether to enable IPv6 on the Droplet. - - monitoring: A boolean indicating whether to install the DigitalOcean agent for monitoring. - - private_networking: This parameter has been deprecated. Use `vpc_uuid` instead to specify a VPC - network for the Droplet. If no `vpc_uuid` is provided, the Droplet will be - placed in your account's default VPC for the region. - - region: The slug identifier for the region that you wish to deploy the Droplet in. If - the specific datacenter is not not important, a slug prefix (e.g. `nyc`) can be - used to deploy the Droplet in any of the that region's locations (`nyc1`, - `nyc2`, or `nyc3`). If the region is omitted from the create request completely, - the Droplet may deploy in any region. - - ssh_keys: An array containing the IDs or fingerprints of the SSH keys that you wish to - embed in the Droplet's root account upon creation. You must add the keys to your - team before they can be embedded on a Droplet. Requires `ssh_key:read` scope. - - tags: A flat array of tag names as strings to apply to the Droplet after it is - created. Tag names can either be existing or new tags. Requires `tag:create` - scope. - - user_data: A string containing 'user data' which may be used to configure the Droplet on - first boot, often a 'cloud-config' file or Bash script. It must be plain text - and may not exceed 64 KiB in size. - - volumes: An array of IDs for block storage volumes that will be attached to the Droplet - once created. The volumes must not already be attached to an existing Droplet. - Requires `block_storage:read` scpoe. - - vpc_uuid: A string specifying the UUID of the VPC to which the Droplet will be assigned. - If excluded, the Droplet will be assigned to your account's default VPC for the - region. Requires `vpc:read` scope. - - with_droplet_agent: A boolean indicating whether to install the DigitalOcean agent used for - providing access to the Droplet web console in the control panel. By default, - the agent is installed on new Droplets but installation errors (i.e. OS not - supported) are ignored. To prevent it from being installed, set to `false`. To - make installation errors fatal, explicitly set it to `true`. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @overload - def create( - self, - *, - image: Union[str, int], - names: List[str], - size: str, - backup_policy: DropletBackupPolicyParam | NotGiven = NOT_GIVEN, - backups: bool | NotGiven = NOT_GIVEN, - ipv6: bool | NotGiven = NOT_GIVEN, - monitoring: bool | NotGiven = NOT_GIVEN, - private_networking: bool | NotGiven = NOT_GIVEN, - region: str | NotGiven = NOT_GIVEN, - ssh_keys: List[Union[str, int]] | NotGiven = NOT_GIVEN, - tags: Optional[List[str]] | NotGiven = NOT_GIVEN, - user_data: str | NotGiven = NOT_GIVEN, - volumes: List[str] | NotGiven = NOT_GIVEN, - vpc_uuid: str | NotGiven = NOT_GIVEN, - with_droplet_agent: bool | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DropletCreateResponse: - """ - To create a new Droplet, send a POST request to `/v2/droplets` setting the - required attributes. - - A Droplet will be created using the provided information. The response body will - contain a JSON object with a key called `droplet`. The value will be an object - containing the standard attributes for your new Droplet. The response code, 202 - Accepted, does not indicate the success or failure of the operation, just that - the request has been accepted for processing. The `actions` returned as part of - the response's `links` object can be used to check the status of the Droplet - create event. - - ### Create Multiple Droplets - - Creating multiple Droplets is very similar to creating a single Droplet. Instead - of sending `name` as a string, send `names` as an array of strings. A Droplet - will be created for each name you send using the associated information. Up to - ten Droplets may be created this way at a time. - - Rather than returning a single Droplet, the response body will contain a JSON - array with a key called `droplets`. This will be set to an array of JSON - objects, each of which will contain the standard Droplet attributes. The - response code, 202 Accepted, does not indicate the success or failure of any - operation, just that the request has been accepted for processing. The array of - `actions` returned as part of the response's `links` object can be used to check - the status of each individual Droplet create event. - - Args: - image: The image ID of a public or private image or the slug identifier for a public - image. This image will be the base image for your Droplet. Requires `image:read` - scope. - - names: An array of human human-readable strings you wish to use when displaying the - Droplet name. Each name, if set to a domain name managed in the DigitalOcean DNS - management system, will configure a PTR record for the Droplet. Each name set - during creation will also determine the hostname for the Droplet in its internal - configuration. - - size: The slug identifier for the size that you wish to select for this Droplet. - - backup_policy: An object specifying the backup policy for the Droplet. If omitted and `backups` - is `true`, the backup plan will default to daily. - - backups: A boolean indicating whether automated backups should be enabled for the - Droplet. - - ipv6: A boolean indicating whether to enable IPv6 on the Droplet. - - monitoring: A boolean indicating whether to install the DigitalOcean agent for monitoring. - - private_networking: This parameter has been deprecated. Use `vpc_uuid` instead to specify a VPC - network for the Droplet. If no `vpc_uuid` is provided, the Droplet will be - placed in your account's default VPC for the region. - - region: The slug identifier for the region that you wish to deploy the Droplet in. If - the specific datacenter is not not important, a slug prefix (e.g. `nyc`) can be - used to deploy the Droplet in any of the that region's locations (`nyc1`, - `nyc2`, or `nyc3`). If the region is omitted from the create request completely, - the Droplet may deploy in any region. - - ssh_keys: An array containing the IDs or fingerprints of the SSH keys that you wish to - embed in the Droplet's root account upon creation. You must add the keys to your - team before they can be embedded on a Droplet. Requires `ssh_key:read` scope. - - tags: A flat array of tag names as strings to apply to the Droplet after it is - created. Tag names can either be existing or new tags. Requires `tag:create` - scope. - - user_data: A string containing 'user data' which may be used to configure the Droplet on - first boot, often a 'cloud-config' file or Bash script. It must be plain text - and may not exceed 64 KiB in size. - - volumes: An array of IDs for block storage volumes that will be attached to the Droplet - once created. The volumes must not already be attached to an existing Droplet. - Requires `block_storage:read` scpoe. - - vpc_uuid: A string specifying the UUID of the VPC to which the Droplet will be assigned. - If excluded, the Droplet will be assigned to your account's default VPC for the - region. Requires `vpc:read` scope. - - with_droplet_agent: A boolean indicating whether to install the DigitalOcean agent used for - providing access to the Droplet web console in the control panel. By default, - the agent is installed on new Droplets but installation errors (i.e. OS not - supported) are ignored. To prevent it from being installed, set to `false`. To - make installation errors fatal, explicitly set it to `true`. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @required_args(["image", "name", "size"], ["image", "names", "size"]) - def create( - self, - *, - image: Union[str, int], - name: str | NotGiven = NOT_GIVEN, - size: str, - backup_policy: DropletBackupPolicyParam | NotGiven = NOT_GIVEN, - backups: bool | NotGiven = NOT_GIVEN, - ipv6: bool | NotGiven = NOT_GIVEN, - monitoring: bool | NotGiven = NOT_GIVEN, - private_networking: bool | NotGiven = NOT_GIVEN, - region: str | NotGiven = NOT_GIVEN, - ssh_keys: List[Union[str, int]] | NotGiven = NOT_GIVEN, - tags: Optional[List[str]] | NotGiven = NOT_GIVEN, - user_data: str | NotGiven = NOT_GIVEN, - volumes: List[str] | NotGiven = NOT_GIVEN, - vpc_uuid: str | NotGiven = NOT_GIVEN, - with_droplet_agent: bool | NotGiven = NOT_GIVEN, - names: List[str] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DropletCreateResponse: - return cast( - DropletCreateResponse, - self._post( - "/v2/droplets" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/droplets", - body=maybe_transform( - { - "image": image, - "name": name, - "size": size, - "backup_policy": backup_policy, - "backups": backups, - "ipv6": ipv6, - "monitoring": monitoring, - "private_networking": private_networking, - "region": region, - "ssh_keys": ssh_keys, - "tags": tags, - "user_data": user_data, - "volumes": volumes, - "vpc_uuid": vpc_uuid, - "with_droplet_agent": with_droplet_agent, - "names": names, - }, - droplet_create_params.DropletCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=cast( - Any, DropletCreateResponse - ), # Union types cannot be passed in as arguments in the type system - ), - ) - - def retrieve( - self, - droplet_id: int, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DropletRetrieveResponse: - """ - To show information about an individual Droplet, send a GET request to - `/v2/droplets/$DROPLET_ID`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - f"/v2/droplets/{droplet_id}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/droplets/{droplet_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=DropletRetrieveResponse, - ) - - def list( - self, - *, - name: str | NotGiven = NOT_GIVEN, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - tag_name: str | NotGiven = NOT_GIVEN, - type: Literal["droplets", "gpus"] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DropletListResponse: - """ - To list all Droplets in your account, send a GET request to `/v2/droplets`. - - The response body will be a JSON object with a key of `droplets`. This will be - set to an array containing objects each representing a Droplet. These will - contain the standard Droplet attributes. - - ### Filtering Results by Tag - - It's possible to request filtered results by including certain query parameters. - To only list Droplets assigned to a specific tag, include the `tag_name` query - parameter set to the name of the tag in your GET request. For example, - `/v2/droplets?tag_name=$TAG_NAME`. - - ### GPU Droplets - - By default, only non-GPU Droplets are returned. To list only GPU Droplets, set - the `type` query parameter to `gpus`. For example, `/v2/droplets?type=gpus`. - - Args: - name: Used to filter list response by Droplet name returning only exact matches. It is - case-insensitive and can not be combined with `tag_name`. - - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - tag_name: Used to filter Droplets by a specific tag. Can not be combined with `name` or - `type`. Requires `tag:read` scope. - - type: When `type` is set to `gpus`, only GPU Droplets will be returned. By default, - only non-GPU Droplets are returned. Can not be combined with `tag_name`. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/v2/droplets" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/droplets", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "name": name, - "page": page, - "per_page": per_page, - "tag_name": tag_name, - "type": type, - }, - droplet_list_params.DropletListParams, - ), - ), - cast_to=DropletListResponse, - ) - - def delete( - self, - droplet_id: int, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - To delete a Droplet, send a DELETE request to `/v2/droplets/$DROPLET_ID`. - - A successful request will receive a 204 status code with no body in response. - This indicates that the request was processed successfully. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return self._delete( - f"/v2/droplets/{droplet_id}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/droplets/{droplet_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - def delete_by_tag( - self, - *, - tag_name: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - To delete **all** Droplets assigned to a specific tag, include the `tag_name` - query parameter set to the name of the tag in your DELETE request. For example, - `/v2/droplets?tag_name=$TAG_NAME`. - - This endpoint requires `tag:read` scope. - - A successful request will receive a 204 status code with no body in response. - This indicates that the request was processed successfully. - - Args: - tag_name: Specifies Droplets to be deleted by tag. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return self._delete( - "/v2/droplets" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/droplets", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform({"tag_name": tag_name}, droplet_delete_by_tag_params.DropletDeleteByTagParams), - ), - cast_to=NoneType, - ) - - def list_firewalls( - self, - droplet_id: int, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DropletListFirewallsResponse: - """ - To retrieve a list of all firewalls available to a Droplet, send a GET request - to `/v2/droplets/$DROPLET_ID/firewalls` - - The response will be a JSON object that has a key called `firewalls`. This will - be set to an array of `firewall` objects, each of which contain the standard - `firewall` attributes. - - Args: - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - f"/v2/droplets/{droplet_id}/firewalls" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/firewalls", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - }, - droplet_list_firewalls_params.DropletListFirewallsParams, - ), - ), - cast_to=DropletListFirewallsResponse, - ) - - def list_kernels( - self, - droplet_id: int, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DropletListKernelsResponse: - """ - To retrieve a list of all kernels available to a Droplet, send a GET request to - `/v2/droplets/$DROPLET_ID/kernels` - - The response will be a JSON object that has a key called `kernels`. This will be - set to an array of `kernel` objects, each of which contain the standard `kernel` - attributes. - - Args: - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - f"/v2/droplets/{droplet_id}/kernels" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/kernels", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - }, - droplet_list_kernels_params.DropletListKernelsParams, - ), - ), - cast_to=DropletListKernelsResponse, - ) - - def list_neighbors( - self, - droplet_id: int, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DropletListNeighborsResponse: - """To retrieve a list of any "neighbors" (i.e. - - Droplets that are co-located on the - same physical hardware) for a specific Droplet, send a GET request to - `/v2/droplets/$DROPLET_ID/neighbors`. - - The results will be returned as a JSON object with a key of `droplets`. This - will be set to an array containing objects representing any other Droplets that - share the same physical hardware. An empty array indicates that the Droplet is - not co-located any other Droplets associated with your account. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - f"/v2/droplets/{droplet_id}/neighbors" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/neighbors", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=DropletListNeighborsResponse, - ) - - def list_snapshots( - self, - droplet_id: int, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DropletListSnapshotsResponse: - """ - To retrieve the snapshots that have been created from a Droplet, send a GET - request to `/v2/droplets/$DROPLET_ID/snapshots`. - - You will get back a JSON object that has a `snapshots` key. This will be set to - an array of snapshot objects, each of which contain the standard Droplet - snapshot attributes. - - Args: - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - f"/v2/droplets/{droplet_id}/snapshots" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/snapshots", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - }, - droplet_list_snapshots_params.DropletListSnapshotsParams, - ), - ), - cast_to=DropletListSnapshotsResponse, - ) - - -class AsyncDropletsResource(AsyncAPIResource): - @cached_property - def backups(self) -> AsyncBackupsResource: - return AsyncBackupsResource(self._client) - - @cached_property - def actions(self) -> AsyncActionsResource: - return AsyncActionsResource(self._client) - - @cached_property - def destroy_with_associated_resources(self) -> AsyncDestroyWithAssociatedResourcesResource: - return AsyncDestroyWithAssociatedResourcesResource(self._client) - - @cached_property - def autoscale(self) -> AsyncAutoscaleResource: - return AsyncAutoscaleResource(self._client) - - @cached_property - def with_raw_response(self) -> AsyncDropletsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return AsyncDropletsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncDropletsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return AsyncDropletsResourceWithStreamingResponse(self) - - @overload - async def create( - self, - *, - image: Union[str, int], - name: str, - size: str, - backup_policy: DropletBackupPolicyParam | NotGiven = NOT_GIVEN, - backups: bool | NotGiven = NOT_GIVEN, - ipv6: bool | NotGiven = NOT_GIVEN, - monitoring: bool | NotGiven = NOT_GIVEN, - private_networking: bool | NotGiven = NOT_GIVEN, - region: str | NotGiven = NOT_GIVEN, - ssh_keys: List[Union[str, int]] | NotGiven = NOT_GIVEN, - tags: Optional[List[str]] | NotGiven = NOT_GIVEN, - user_data: str | NotGiven = NOT_GIVEN, - volumes: List[str] | NotGiven = NOT_GIVEN, - vpc_uuid: str | NotGiven = NOT_GIVEN, - with_droplet_agent: bool | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DropletCreateResponse: - """ - To create a new Droplet, send a POST request to `/v2/droplets` setting the - required attributes. - - A Droplet will be created using the provided information. The response body will - contain a JSON object with a key called `droplet`. The value will be an object - containing the standard attributes for your new Droplet. The response code, 202 - Accepted, does not indicate the success or failure of the operation, just that - the request has been accepted for processing. The `actions` returned as part of - the response's `links` object can be used to check the status of the Droplet - create event. - - ### Create Multiple Droplets - - Creating multiple Droplets is very similar to creating a single Droplet. Instead - of sending `name` as a string, send `names` as an array of strings. A Droplet - will be created for each name you send using the associated information. Up to - ten Droplets may be created this way at a time. - - Rather than returning a single Droplet, the response body will contain a JSON - array with a key called `droplets`. This will be set to an array of JSON - objects, each of which will contain the standard Droplet attributes. The - response code, 202 Accepted, does not indicate the success or failure of any - operation, just that the request has been accepted for processing. The array of - `actions` returned as part of the response's `links` object can be used to check - the status of each individual Droplet create event. - - Args: - image: The image ID of a public or private image or the slug identifier for a public - image. This image will be the base image for your Droplet. Requires `image:read` - scope. - - name: The human-readable string you wish to use when displaying the Droplet name. The - name, if set to a domain name managed in the DigitalOcean DNS management system, - will configure a PTR record for the Droplet. The name set during creation will - also determine the hostname for the Droplet in its internal configuration. - - size: The slug identifier for the size that you wish to select for this Droplet. - - backup_policy: An object specifying the backup policy for the Droplet. If omitted and `backups` - is `true`, the backup plan will default to daily. - - backups: A boolean indicating whether automated backups should be enabled for the - Droplet. - - ipv6: A boolean indicating whether to enable IPv6 on the Droplet. - - monitoring: A boolean indicating whether to install the DigitalOcean agent for monitoring. - - private_networking: This parameter has been deprecated. Use `vpc_uuid` instead to specify a VPC - network for the Droplet. If no `vpc_uuid` is provided, the Droplet will be - placed in your account's default VPC for the region. - - region: The slug identifier for the region that you wish to deploy the Droplet in. If - the specific datacenter is not not important, a slug prefix (e.g. `nyc`) can be - used to deploy the Droplet in any of the that region's locations (`nyc1`, - `nyc2`, or `nyc3`). If the region is omitted from the create request completely, - the Droplet may deploy in any region. - - ssh_keys: An array containing the IDs or fingerprints of the SSH keys that you wish to - embed in the Droplet's root account upon creation. You must add the keys to your - team before they can be embedded on a Droplet. Requires `ssh_key:read` scope. - - tags: A flat array of tag names as strings to apply to the Droplet after it is - created. Tag names can either be existing or new tags. Requires `tag:create` - scope. - - user_data: A string containing 'user data' which may be used to configure the Droplet on - first boot, often a 'cloud-config' file or Bash script. It must be plain text - and may not exceed 64 KiB in size. - - volumes: An array of IDs for block storage volumes that will be attached to the Droplet - once created. The volumes must not already be attached to an existing Droplet. - Requires `block_storage:read` scpoe. - - vpc_uuid: A string specifying the UUID of the VPC to which the Droplet will be assigned. - If excluded, the Droplet will be assigned to your account's default VPC for the - region. Requires `vpc:read` scope. - - with_droplet_agent: A boolean indicating whether to install the DigitalOcean agent used for - providing access to the Droplet web console in the control panel. By default, - the agent is installed on new Droplets but installation errors (i.e. OS not - supported) are ignored. To prevent it from being installed, set to `false`. To - make installation errors fatal, explicitly set it to `true`. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @overload - async def create( - self, - *, - image: Union[str, int], - names: List[str], - size: str, - backup_policy: DropletBackupPolicyParam | NotGiven = NOT_GIVEN, - backups: bool | NotGiven = NOT_GIVEN, - ipv6: bool | NotGiven = NOT_GIVEN, - monitoring: bool | NotGiven = NOT_GIVEN, - private_networking: bool | NotGiven = NOT_GIVEN, - region: str | NotGiven = NOT_GIVEN, - ssh_keys: List[Union[str, int]] | NotGiven = NOT_GIVEN, - tags: Optional[List[str]] | NotGiven = NOT_GIVEN, - user_data: str | NotGiven = NOT_GIVEN, - volumes: List[str] | NotGiven = NOT_GIVEN, - vpc_uuid: str | NotGiven = NOT_GIVEN, - with_droplet_agent: bool | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DropletCreateResponse: - """ - To create a new Droplet, send a POST request to `/v2/droplets` setting the - required attributes. - - A Droplet will be created using the provided information. The response body will - contain a JSON object with a key called `droplet`. The value will be an object - containing the standard attributes for your new Droplet. The response code, 202 - Accepted, does not indicate the success or failure of the operation, just that - the request has been accepted for processing. The `actions` returned as part of - the response's `links` object can be used to check the status of the Droplet - create event. - - ### Create Multiple Droplets - - Creating multiple Droplets is very similar to creating a single Droplet. Instead - of sending `name` as a string, send `names` as an array of strings. A Droplet - will be created for each name you send using the associated information. Up to - ten Droplets may be created this way at a time. - - Rather than returning a single Droplet, the response body will contain a JSON - array with a key called `droplets`. This will be set to an array of JSON - objects, each of which will contain the standard Droplet attributes. The - response code, 202 Accepted, does not indicate the success or failure of any - operation, just that the request has been accepted for processing. The array of - `actions` returned as part of the response's `links` object can be used to check - the status of each individual Droplet create event. - - Args: - image: The image ID of a public or private image or the slug identifier for a public - image. This image will be the base image for your Droplet. Requires `image:read` - scope. - - names: An array of human human-readable strings you wish to use when displaying the - Droplet name. Each name, if set to a domain name managed in the DigitalOcean DNS - management system, will configure a PTR record for the Droplet. Each name set - during creation will also determine the hostname for the Droplet in its internal - configuration. - - size: The slug identifier for the size that you wish to select for this Droplet. - - backup_policy: An object specifying the backup policy for the Droplet. If omitted and `backups` - is `true`, the backup plan will default to daily. - - backups: A boolean indicating whether automated backups should be enabled for the - Droplet. - - ipv6: A boolean indicating whether to enable IPv6 on the Droplet. - - monitoring: A boolean indicating whether to install the DigitalOcean agent for monitoring. - - private_networking: This parameter has been deprecated. Use `vpc_uuid` instead to specify a VPC - network for the Droplet. If no `vpc_uuid` is provided, the Droplet will be - placed in your account's default VPC for the region. - - region: The slug identifier for the region that you wish to deploy the Droplet in. If - the specific datacenter is not not important, a slug prefix (e.g. `nyc`) can be - used to deploy the Droplet in any of the that region's locations (`nyc1`, - `nyc2`, or `nyc3`). If the region is omitted from the create request completely, - the Droplet may deploy in any region. - - ssh_keys: An array containing the IDs or fingerprints of the SSH keys that you wish to - embed in the Droplet's root account upon creation. You must add the keys to your - team before they can be embedded on a Droplet. Requires `ssh_key:read` scope. - - tags: A flat array of tag names as strings to apply to the Droplet after it is - created. Tag names can either be existing or new tags. Requires `tag:create` - scope. - - user_data: A string containing 'user data' which may be used to configure the Droplet on - first boot, often a 'cloud-config' file or Bash script. It must be plain text - and may not exceed 64 KiB in size. - - volumes: An array of IDs for block storage volumes that will be attached to the Droplet - once created. The volumes must not already be attached to an existing Droplet. - Requires `block_storage:read` scpoe. - - vpc_uuid: A string specifying the UUID of the VPC to which the Droplet will be assigned. - If excluded, the Droplet will be assigned to your account's default VPC for the - region. Requires `vpc:read` scope. - - with_droplet_agent: A boolean indicating whether to install the DigitalOcean agent used for - providing access to the Droplet web console in the control panel. By default, - the agent is installed on new Droplets but installation errors (i.e. OS not - supported) are ignored. To prevent it from being installed, set to `false`. To - make installation errors fatal, explicitly set it to `true`. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @required_args(["image", "name", "size"], ["image", "names", "size"]) - async def create( - self, - *, - image: Union[str, int], - name: str | NotGiven = NOT_GIVEN, - size: str, - backup_policy: DropletBackupPolicyParam | NotGiven = NOT_GIVEN, - backups: bool | NotGiven = NOT_GIVEN, - ipv6: bool | NotGiven = NOT_GIVEN, - monitoring: bool | NotGiven = NOT_GIVEN, - private_networking: bool | NotGiven = NOT_GIVEN, - region: str | NotGiven = NOT_GIVEN, - ssh_keys: List[Union[str, int]] | NotGiven = NOT_GIVEN, - tags: Optional[List[str]] | NotGiven = NOT_GIVEN, - user_data: str | NotGiven = NOT_GIVEN, - volumes: List[str] | NotGiven = NOT_GIVEN, - vpc_uuid: str | NotGiven = NOT_GIVEN, - with_droplet_agent: bool | NotGiven = NOT_GIVEN, - names: List[str] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DropletCreateResponse: - return cast( - DropletCreateResponse, - await self._post( - "/v2/droplets" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/droplets", - body=await async_maybe_transform( - { - "image": image, - "name": name, - "size": size, - "backup_policy": backup_policy, - "backups": backups, - "ipv6": ipv6, - "monitoring": monitoring, - "private_networking": private_networking, - "region": region, - "ssh_keys": ssh_keys, - "tags": tags, - "user_data": user_data, - "volumes": volumes, - "vpc_uuid": vpc_uuid, - "with_droplet_agent": with_droplet_agent, - "names": names, - }, - droplet_create_params.DropletCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=cast( - Any, DropletCreateResponse - ), # Union types cannot be passed in as arguments in the type system - ), - ) - - async def retrieve( - self, - droplet_id: int, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DropletRetrieveResponse: - """ - To show information about an individual Droplet, send a GET request to - `/v2/droplets/$DROPLET_ID`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - f"/v2/droplets/{droplet_id}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/droplets/{droplet_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=DropletRetrieveResponse, - ) - - async def list( - self, - *, - name: str | NotGiven = NOT_GIVEN, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - tag_name: str | NotGiven = NOT_GIVEN, - type: Literal["droplets", "gpus"] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DropletListResponse: - """ - To list all Droplets in your account, send a GET request to `/v2/droplets`. - - The response body will be a JSON object with a key of `droplets`. This will be - set to an array containing objects each representing a Droplet. These will - contain the standard Droplet attributes. - - ### Filtering Results by Tag - - It's possible to request filtered results by including certain query parameters. - To only list Droplets assigned to a specific tag, include the `tag_name` query - parameter set to the name of the tag in your GET request. For example, - `/v2/droplets?tag_name=$TAG_NAME`. - - ### GPU Droplets - - By default, only non-GPU Droplets are returned. To list only GPU Droplets, set - the `type` query parameter to `gpus`. For example, `/v2/droplets?type=gpus`. - - Args: - name: Used to filter list response by Droplet name returning only exact matches. It is - case-insensitive and can not be combined with `tag_name`. - - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - tag_name: Used to filter Droplets by a specific tag. Can not be combined with `name` or - `type`. Requires `tag:read` scope. - - type: When `type` is set to `gpus`, only GPU Droplets will be returned. By default, - only non-GPU Droplets are returned. Can not be combined with `tag_name`. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/v2/droplets" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/droplets", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "name": name, - "page": page, - "per_page": per_page, - "tag_name": tag_name, - "type": type, - }, - droplet_list_params.DropletListParams, - ), - ), - cast_to=DropletListResponse, - ) - - async def delete( - self, - droplet_id: int, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - To delete a Droplet, send a DELETE request to `/v2/droplets/$DROPLET_ID`. - - A successful request will receive a 204 status code with no body in response. - This indicates that the request was processed successfully. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return await self._delete( - f"/v2/droplets/{droplet_id}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/droplets/{droplet_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - async def delete_by_tag( - self, - *, - tag_name: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - To delete **all** Droplets assigned to a specific tag, include the `tag_name` - query parameter set to the name of the tag in your DELETE request. For example, - `/v2/droplets?tag_name=$TAG_NAME`. - - This endpoint requires `tag:read` scope. - - A successful request will receive a 204 status code with no body in response. - This indicates that the request was processed successfully. - - Args: - tag_name: Specifies Droplets to be deleted by tag. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return await self._delete( - "/v2/droplets" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/droplets", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - {"tag_name": tag_name}, droplet_delete_by_tag_params.DropletDeleteByTagParams - ), - ), - cast_to=NoneType, - ) - - async def list_firewalls( - self, - droplet_id: int, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DropletListFirewallsResponse: - """ - To retrieve a list of all firewalls available to a Droplet, send a GET request - to `/v2/droplets/$DROPLET_ID/firewalls` - - The response will be a JSON object that has a key called `firewalls`. This will - be set to an array of `firewall` objects, each of which contain the standard - `firewall` attributes. - - Args: - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - f"/v2/droplets/{droplet_id}/firewalls" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/firewalls", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - }, - droplet_list_firewalls_params.DropletListFirewallsParams, - ), - ), - cast_to=DropletListFirewallsResponse, - ) - - async def list_kernels( - self, - droplet_id: int, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DropletListKernelsResponse: - """ - To retrieve a list of all kernels available to a Droplet, send a GET request to - `/v2/droplets/$DROPLET_ID/kernels` - - The response will be a JSON object that has a key called `kernels`. This will be - set to an array of `kernel` objects, each of which contain the standard `kernel` - attributes. - - Args: - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - f"/v2/droplets/{droplet_id}/kernels" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/kernels", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - }, - droplet_list_kernels_params.DropletListKernelsParams, - ), - ), - cast_to=DropletListKernelsResponse, - ) - - async def list_neighbors( - self, - droplet_id: int, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DropletListNeighborsResponse: - """To retrieve a list of any "neighbors" (i.e. - - Droplets that are co-located on the - same physical hardware) for a specific Droplet, send a GET request to - `/v2/droplets/$DROPLET_ID/neighbors`. - - The results will be returned as a JSON object with a key of `droplets`. This - will be set to an array containing objects representing any other Droplets that - share the same physical hardware. An empty array indicates that the Droplet is - not co-located any other Droplets associated with your account. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - f"/v2/droplets/{droplet_id}/neighbors" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/neighbors", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=DropletListNeighborsResponse, - ) - - async def list_snapshots( - self, - droplet_id: int, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> DropletListSnapshotsResponse: - """ - To retrieve the snapshots that have been created from a Droplet, send a GET - request to `/v2/droplets/$DROPLET_ID/snapshots`. - - You will get back a JSON object that has a `snapshots` key. This will be set to - an array of snapshot objects, each of which contain the standard Droplet - snapshot attributes. - - Args: - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - f"/v2/droplets/{droplet_id}/snapshots" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/snapshots", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - }, - droplet_list_snapshots_params.DropletListSnapshotsParams, - ), - ), - cast_to=DropletListSnapshotsResponse, - ) - - -class DropletsResourceWithRawResponse: - def __init__(self, droplets: DropletsResource) -> None: - self._droplets = droplets - - self.create = to_raw_response_wrapper( - droplets.create, - ) - self.retrieve = to_raw_response_wrapper( - droplets.retrieve, - ) - self.list = to_raw_response_wrapper( - droplets.list, - ) - self.delete = to_raw_response_wrapper( - droplets.delete, - ) - self.delete_by_tag = to_raw_response_wrapper( - droplets.delete_by_tag, - ) - self.list_firewalls = to_raw_response_wrapper( - droplets.list_firewalls, - ) - self.list_kernels = to_raw_response_wrapper( - droplets.list_kernels, - ) - self.list_neighbors = to_raw_response_wrapper( - droplets.list_neighbors, - ) - self.list_snapshots = to_raw_response_wrapper( - droplets.list_snapshots, - ) - - @cached_property - def backups(self) -> BackupsResourceWithRawResponse: - return BackupsResourceWithRawResponse(self._droplets.backups) - - @cached_property - def actions(self) -> ActionsResourceWithRawResponse: - return ActionsResourceWithRawResponse(self._droplets.actions) - - @cached_property - def destroy_with_associated_resources(self) -> DestroyWithAssociatedResourcesResourceWithRawResponse: - return DestroyWithAssociatedResourcesResourceWithRawResponse(self._droplets.destroy_with_associated_resources) - - @cached_property - def autoscale(self) -> AutoscaleResourceWithRawResponse: - return AutoscaleResourceWithRawResponse(self._droplets.autoscale) - - -class AsyncDropletsResourceWithRawResponse: - def __init__(self, droplets: AsyncDropletsResource) -> None: - self._droplets = droplets - - self.create = async_to_raw_response_wrapper( - droplets.create, - ) - self.retrieve = async_to_raw_response_wrapper( - droplets.retrieve, - ) - self.list = async_to_raw_response_wrapper( - droplets.list, - ) - self.delete = async_to_raw_response_wrapper( - droplets.delete, - ) - self.delete_by_tag = async_to_raw_response_wrapper( - droplets.delete_by_tag, - ) - self.list_firewalls = async_to_raw_response_wrapper( - droplets.list_firewalls, - ) - self.list_kernels = async_to_raw_response_wrapper( - droplets.list_kernels, - ) - self.list_neighbors = async_to_raw_response_wrapper( - droplets.list_neighbors, - ) - self.list_snapshots = async_to_raw_response_wrapper( - droplets.list_snapshots, - ) - - @cached_property - def backups(self) -> AsyncBackupsResourceWithRawResponse: - return AsyncBackupsResourceWithRawResponse(self._droplets.backups) - - @cached_property - def actions(self) -> AsyncActionsResourceWithRawResponse: - return AsyncActionsResourceWithRawResponse(self._droplets.actions) - - @cached_property - def destroy_with_associated_resources(self) -> AsyncDestroyWithAssociatedResourcesResourceWithRawResponse: - return AsyncDestroyWithAssociatedResourcesResourceWithRawResponse( - self._droplets.destroy_with_associated_resources - ) - - @cached_property - def autoscale(self) -> AsyncAutoscaleResourceWithRawResponse: - return AsyncAutoscaleResourceWithRawResponse(self._droplets.autoscale) - - -class DropletsResourceWithStreamingResponse: - def __init__(self, droplets: DropletsResource) -> None: - self._droplets = droplets - - self.create = to_streamed_response_wrapper( - droplets.create, - ) - self.retrieve = to_streamed_response_wrapper( - droplets.retrieve, - ) - self.list = to_streamed_response_wrapper( - droplets.list, - ) - self.delete = to_streamed_response_wrapper( - droplets.delete, - ) - self.delete_by_tag = to_streamed_response_wrapper( - droplets.delete_by_tag, - ) - self.list_firewalls = to_streamed_response_wrapper( - droplets.list_firewalls, - ) - self.list_kernels = to_streamed_response_wrapper( - droplets.list_kernels, - ) - self.list_neighbors = to_streamed_response_wrapper( - droplets.list_neighbors, - ) - self.list_snapshots = to_streamed_response_wrapper( - droplets.list_snapshots, - ) - - @cached_property - def backups(self) -> BackupsResourceWithStreamingResponse: - return BackupsResourceWithStreamingResponse(self._droplets.backups) - - @cached_property - def actions(self) -> ActionsResourceWithStreamingResponse: - return ActionsResourceWithStreamingResponse(self._droplets.actions) - - @cached_property - def destroy_with_associated_resources(self) -> DestroyWithAssociatedResourcesResourceWithStreamingResponse: - return DestroyWithAssociatedResourcesResourceWithStreamingResponse( - self._droplets.destroy_with_associated_resources - ) - - @cached_property - def autoscale(self) -> AutoscaleResourceWithStreamingResponse: - return AutoscaleResourceWithStreamingResponse(self._droplets.autoscale) - - -class AsyncDropletsResourceWithStreamingResponse: - def __init__(self, droplets: AsyncDropletsResource) -> None: - self._droplets = droplets - - self.create = async_to_streamed_response_wrapper( - droplets.create, - ) - self.retrieve = async_to_streamed_response_wrapper( - droplets.retrieve, - ) - self.list = async_to_streamed_response_wrapper( - droplets.list, - ) - self.delete = async_to_streamed_response_wrapper( - droplets.delete, - ) - self.delete_by_tag = async_to_streamed_response_wrapper( - droplets.delete_by_tag, - ) - self.list_firewalls = async_to_streamed_response_wrapper( - droplets.list_firewalls, - ) - self.list_kernels = async_to_streamed_response_wrapper( - droplets.list_kernels, - ) - self.list_neighbors = async_to_streamed_response_wrapper( - droplets.list_neighbors, - ) - self.list_snapshots = async_to_streamed_response_wrapper( - droplets.list_snapshots, - ) - - @cached_property - def backups(self) -> AsyncBackupsResourceWithStreamingResponse: - return AsyncBackupsResourceWithStreamingResponse(self._droplets.backups) - - @cached_property - def actions(self) -> AsyncActionsResourceWithStreamingResponse: - return AsyncActionsResourceWithStreamingResponse(self._droplets.actions) - - @cached_property - def destroy_with_associated_resources(self) -> AsyncDestroyWithAssociatedResourcesResourceWithStreamingResponse: - return AsyncDestroyWithAssociatedResourcesResourceWithStreamingResponse( - self._droplets.destroy_with_associated_resources - ) - - @cached_property - def autoscale(self) -> AsyncAutoscaleResourceWithStreamingResponse: - return AsyncAutoscaleResourceWithStreamingResponse(self._droplets.autoscale) diff --git a/src/gradientai/resources/firewalls/__init__.py b/src/gradientai/resources/firewalls/__init__.py deleted file mode 100644 index e9cb832f..00000000 --- a/src/gradientai/resources/firewalls/__init__.py +++ /dev/null @@ -1,61 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .tags import ( - TagsResource, - AsyncTagsResource, - TagsResourceWithRawResponse, - AsyncTagsResourceWithRawResponse, - TagsResourceWithStreamingResponse, - AsyncTagsResourceWithStreamingResponse, -) -from .rules import ( - RulesResource, - AsyncRulesResource, - RulesResourceWithRawResponse, - AsyncRulesResourceWithRawResponse, - RulesResourceWithStreamingResponse, - AsyncRulesResourceWithStreamingResponse, -) -from .droplets import ( - DropletsResource, - AsyncDropletsResource, - DropletsResourceWithRawResponse, - AsyncDropletsResourceWithRawResponse, - DropletsResourceWithStreamingResponse, - AsyncDropletsResourceWithStreamingResponse, -) -from .firewalls import ( - FirewallsResource, - AsyncFirewallsResource, - FirewallsResourceWithRawResponse, - AsyncFirewallsResourceWithRawResponse, - FirewallsResourceWithStreamingResponse, - AsyncFirewallsResourceWithStreamingResponse, -) - -__all__ = [ - "DropletsResource", - "AsyncDropletsResource", - "DropletsResourceWithRawResponse", - "AsyncDropletsResourceWithRawResponse", - "DropletsResourceWithStreamingResponse", - "AsyncDropletsResourceWithStreamingResponse", - "TagsResource", - "AsyncTagsResource", - "TagsResourceWithRawResponse", - "AsyncTagsResourceWithRawResponse", - "TagsResourceWithStreamingResponse", - "AsyncTagsResourceWithStreamingResponse", - "RulesResource", - "AsyncRulesResource", - "RulesResourceWithRawResponse", - "AsyncRulesResourceWithRawResponse", - "RulesResourceWithStreamingResponse", - "AsyncRulesResourceWithStreamingResponse", - "FirewallsResource", - "AsyncFirewallsResource", - "FirewallsResourceWithRawResponse", - "AsyncFirewallsResourceWithRawResponse", - "FirewallsResourceWithStreamingResponse", - "AsyncFirewallsResourceWithStreamingResponse", -] diff --git a/src/gradientai/resources/firewalls/droplets.py b/src/gradientai/resources/firewalls/droplets.py deleted file mode 100644 index 435b28e1..00000000 --- a/src/gradientai/resources/firewalls/droplets.py +++ /dev/null @@ -1,296 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Iterable - -import httpx - -from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven -from ..._utils import maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ..._base_client import make_request_options -from ...types.firewalls import droplet_add_params, droplet_remove_params - -__all__ = ["DropletsResource", "AsyncDropletsResource"] - - -class DropletsResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> DropletsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return DropletsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> DropletsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return DropletsResourceWithStreamingResponse(self) - - def add( - self, - firewall_id: str, - *, - droplet_ids: Iterable[int], - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - To assign a Droplet to a firewall, send a POST request to - `/v2/firewalls/$FIREWALL_ID/droplets`. In the body of the request, there should - be a `droplet_ids` attribute containing a list of Droplet IDs. - - No response body will be sent back, but the response code will indicate success. - Specifically, the response code will be a 204, which means that the action was - successful with no returned body data. - - Args: - droplet_ids: An array containing the IDs of the Droplets to be assigned to the firewall. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not firewall_id: - raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return self._post( - f"/v2/firewalls/{firewall_id}/droplets" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}/droplets", - body=maybe_transform({"droplet_ids": droplet_ids}, droplet_add_params.DropletAddParams), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - def remove( - self, - firewall_id: str, - *, - droplet_ids: Iterable[int], - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - To remove a Droplet from a firewall, send a DELETE request to - `/v2/firewalls/$FIREWALL_ID/droplets`. In the body of the request, there should - be a `droplet_ids` attribute containing a list of Droplet IDs. - - No response body will be sent back, but the response code will indicate success. - Specifically, the response code will be a 204, which means that the action was - successful with no returned body data. - - Args: - droplet_ids: An array containing the IDs of the Droplets to be removed from the firewall. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not firewall_id: - raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return self._delete( - f"/v2/firewalls/{firewall_id}/droplets" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}/droplets", - body=maybe_transform({"droplet_ids": droplet_ids}, droplet_remove_params.DropletRemoveParams), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - -class AsyncDropletsResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncDropletsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return AsyncDropletsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncDropletsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return AsyncDropletsResourceWithStreamingResponse(self) - - async def add( - self, - firewall_id: str, - *, - droplet_ids: Iterable[int], - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - To assign a Droplet to a firewall, send a POST request to - `/v2/firewalls/$FIREWALL_ID/droplets`. In the body of the request, there should - be a `droplet_ids` attribute containing a list of Droplet IDs. - - No response body will be sent back, but the response code will indicate success. - Specifically, the response code will be a 204, which means that the action was - successful with no returned body data. - - Args: - droplet_ids: An array containing the IDs of the Droplets to be assigned to the firewall. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not firewall_id: - raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return await self._post( - f"/v2/firewalls/{firewall_id}/droplets" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}/droplets", - body=await async_maybe_transform({"droplet_ids": droplet_ids}, droplet_add_params.DropletAddParams), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - async def remove( - self, - firewall_id: str, - *, - droplet_ids: Iterable[int], - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - To remove a Droplet from a firewall, send a DELETE request to - `/v2/firewalls/$FIREWALL_ID/droplets`. In the body of the request, there should - be a `droplet_ids` attribute containing a list of Droplet IDs. - - No response body will be sent back, but the response code will indicate success. - Specifically, the response code will be a 204, which means that the action was - successful with no returned body data. - - Args: - droplet_ids: An array containing the IDs of the Droplets to be removed from the firewall. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not firewall_id: - raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return await self._delete( - f"/v2/firewalls/{firewall_id}/droplets" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}/droplets", - body=await async_maybe_transform({"droplet_ids": droplet_ids}, droplet_remove_params.DropletRemoveParams), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - -class DropletsResourceWithRawResponse: - def __init__(self, droplets: DropletsResource) -> None: - self._droplets = droplets - - self.add = to_raw_response_wrapper( - droplets.add, - ) - self.remove = to_raw_response_wrapper( - droplets.remove, - ) - - -class AsyncDropletsResourceWithRawResponse: - def __init__(self, droplets: AsyncDropletsResource) -> None: - self._droplets = droplets - - self.add = async_to_raw_response_wrapper( - droplets.add, - ) - self.remove = async_to_raw_response_wrapper( - droplets.remove, - ) - - -class DropletsResourceWithStreamingResponse: - def __init__(self, droplets: DropletsResource) -> None: - self._droplets = droplets - - self.add = to_streamed_response_wrapper( - droplets.add, - ) - self.remove = to_streamed_response_wrapper( - droplets.remove, - ) - - -class AsyncDropletsResourceWithStreamingResponse: - def __init__(self, droplets: AsyncDropletsResource) -> None: - self._droplets = droplets - - self.add = async_to_streamed_response_wrapper( - droplets.add, - ) - self.remove = async_to_streamed_response_wrapper( - droplets.remove, - ) diff --git a/src/gradientai/resources/firewalls/firewalls.py b/src/gradientai/resources/firewalls/firewalls.py deleted file mode 100644 index f59cd64d..00000000 --- a/src/gradientai/resources/firewalls/firewalls.py +++ /dev/null @@ -1,647 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import httpx - -from .tags import ( - TagsResource, - AsyncTagsResource, - TagsResourceWithRawResponse, - AsyncTagsResourceWithRawResponse, - TagsResourceWithStreamingResponse, - AsyncTagsResourceWithStreamingResponse, -) -from .rules import ( - RulesResource, - AsyncRulesResource, - RulesResourceWithRawResponse, - AsyncRulesResourceWithRawResponse, - RulesResourceWithStreamingResponse, - AsyncRulesResourceWithStreamingResponse, -) -from ...types import firewall_list_params, firewall_create_params, firewall_update_params -from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven -from ..._utils import maybe_transform, async_maybe_transform -from .droplets import ( - DropletsResource, - AsyncDropletsResource, - DropletsResourceWithRawResponse, - AsyncDropletsResourceWithRawResponse, - DropletsResourceWithStreamingResponse, - AsyncDropletsResourceWithStreamingResponse, -) -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ..._base_client import make_request_options -from ...types.firewall_param import FirewallParam -from ...types.firewall_list_response import FirewallListResponse -from ...types.firewall_create_response import FirewallCreateResponse -from ...types.firewall_update_response import FirewallUpdateResponse -from ...types.firewall_retrieve_response import FirewallRetrieveResponse - -__all__ = ["FirewallsResource", "AsyncFirewallsResource"] - - -class FirewallsResource(SyncAPIResource): - @cached_property - def droplets(self) -> DropletsResource: - return DropletsResource(self._client) - - @cached_property - def tags(self) -> TagsResource: - return TagsResource(self._client) - - @cached_property - def rules(self) -> RulesResource: - return RulesResource(self._client) - - @cached_property - def with_raw_response(self) -> FirewallsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return FirewallsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> FirewallsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return FirewallsResourceWithStreamingResponse(self) - - def create( - self, - *, - body: firewall_create_params.Body | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FirewallCreateResponse: - """To create a new firewall, send a POST request to `/v2/firewalls`. - - The request - must contain at least one inbound or outbound access rule. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._post( - "/v2/firewalls" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/firewalls", - body=maybe_transform(body, firewall_create_params.FirewallCreateParams), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=FirewallCreateResponse, - ) - - def retrieve( - self, - firewall_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FirewallRetrieveResponse: - """ - To show information about an existing firewall, send a GET request to - `/v2/firewalls/$FIREWALL_ID`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not firewall_id: - raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") - return self._get( - f"/v2/firewalls/{firewall_id}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=FirewallRetrieveResponse, - ) - - def update( - self, - firewall_id: str, - *, - firewall: FirewallParam, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FirewallUpdateResponse: - """ - To update the configuration of an existing firewall, send a PUT request to - `/v2/firewalls/$FIREWALL_ID`. The request should contain a full representation - of the firewall including existing attributes. **Note that any attributes that - are not provided will be reset to their default values.** - - You must have read access (e.g. `droplet:read`) to all resources attached to the - firewall to successfully update the firewall. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not firewall_id: - raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") - return self._put( - f"/v2/firewalls/{firewall_id}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}", - body=maybe_transform(firewall, firewall_update_params.FirewallUpdateParams), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=FirewallUpdateResponse, - ) - - def list( - self, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FirewallListResponse: - """ - To list all of the firewalls available on your account, send a GET request to - `/v2/firewalls`. - - Args: - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/v2/firewalls" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/firewalls", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - }, - firewall_list_params.FirewallListParams, - ), - ), - cast_to=FirewallListResponse, - ) - - def delete( - self, - firewall_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - To delete a firewall send a DELETE request to `/v2/firewalls/$FIREWALL_ID`. - - No response body will be sent back, but the response code will indicate success. - Specifically, the response code will be a 204, which means that the action was - successful with no returned body data. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not firewall_id: - raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return self._delete( - f"/v2/firewalls/{firewall_id}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - -class AsyncFirewallsResource(AsyncAPIResource): - @cached_property - def droplets(self) -> AsyncDropletsResource: - return AsyncDropletsResource(self._client) - - @cached_property - def tags(self) -> AsyncTagsResource: - return AsyncTagsResource(self._client) - - @cached_property - def rules(self) -> AsyncRulesResource: - return AsyncRulesResource(self._client) - - @cached_property - def with_raw_response(self) -> AsyncFirewallsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return AsyncFirewallsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncFirewallsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return AsyncFirewallsResourceWithStreamingResponse(self) - - async def create( - self, - *, - body: firewall_create_params.Body | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FirewallCreateResponse: - """To create a new firewall, send a POST request to `/v2/firewalls`. - - The request - must contain at least one inbound or outbound access rule. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._post( - "/v2/firewalls" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/firewalls", - body=await async_maybe_transform(body, firewall_create_params.FirewallCreateParams), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=FirewallCreateResponse, - ) - - async def retrieve( - self, - firewall_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FirewallRetrieveResponse: - """ - To show information about an existing firewall, send a GET request to - `/v2/firewalls/$FIREWALL_ID`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not firewall_id: - raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") - return await self._get( - f"/v2/firewalls/{firewall_id}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=FirewallRetrieveResponse, - ) - - async def update( - self, - firewall_id: str, - *, - firewall: FirewallParam, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FirewallUpdateResponse: - """ - To update the configuration of an existing firewall, send a PUT request to - `/v2/firewalls/$FIREWALL_ID`. The request should contain a full representation - of the firewall including existing attributes. **Note that any attributes that - are not provided will be reset to their default values.** - - You must have read access (e.g. `droplet:read`) to all resources attached to the - firewall to successfully update the firewall. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not firewall_id: - raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") - return await self._put( - f"/v2/firewalls/{firewall_id}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}", - body=await async_maybe_transform(firewall, firewall_update_params.FirewallUpdateParams), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=FirewallUpdateResponse, - ) - - async def list( - self, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FirewallListResponse: - """ - To list all of the firewalls available on your account, send a GET request to - `/v2/firewalls`. - - Args: - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/v2/firewalls" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/firewalls", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - }, - firewall_list_params.FirewallListParams, - ), - ), - cast_to=FirewallListResponse, - ) - - async def delete( - self, - firewall_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - To delete a firewall send a DELETE request to `/v2/firewalls/$FIREWALL_ID`. - - No response body will be sent back, but the response code will indicate success. - Specifically, the response code will be a 204, which means that the action was - successful with no returned body data. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not firewall_id: - raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return await self._delete( - f"/v2/firewalls/{firewall_id}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - -class FirewallsResourceWithRawResponse: - def __init__(self, firewalls: FirewallsResource) -> None: - self._firewalls = firewalls - - self.create = to_raw_response_wrapper( - firewalls.create, - ) - self.retrieve = to_raw_response_wrapper( - firewalls.retrieve, - ) - self.update = to_raw_response_wrapper( - firewalls.update, - ) - self.list = to_raw_response_wrapper( - firewalls.list, - ) - self.delete = to_raw_response_wrapper( - firewalls.delete, - ) - - @cached_property - def droplets(self) -> DropletsResourceWithRawResponse: - return DropletsResourceWithRawResponse(self._firewalls.droplets) - - @cached_property - def tags(self) -> TagsResourceWithRawResponse: - return TagsResourceWithRawResponse(self._firewalls.tags) - - @cached_property - def rules(self) -> RulesResourceWithRawResponse: - return RulesResourceWithRawResponse(self._firewalls.rules) - - -class AsyncFirewallsResourceWithRawResponse: - def __init__(self, firewalls: AsyncFirewallsResource) -> None: - self._firewalls = firewalls - - self.create = async_to_raw_response_wrapper( - firewalls.create, - ) - self.retrieve = async_to_raw_response_wrapper( - firewalls.retrieve, - ) - self.update = async_to_raw_response_wrapper( - firewalls.update, - ) - self.list = async_to_raw_response_wrapper( - firewalls.list, - ) - self.delete = async_to_raw_response_wrapper( - firewalls.delete, - ) - - @cached_property - def droplets(self) -> AsyncDropletsResourceWithRawResponse: - return AsyncDropletsResourceWithRawResponse(self._firewalls.droplets) - - @cached_property - def tags(self) -> AsyncTagsResourceWithRawResponse: - return AsyncTagsResourceWithRawResponse(self._firewalls.tags) - - @cached_property - def rules(self) -> AsyncRulesResourceWithRawResponse: - return AsyncRulesResourceWithRawResponse(self._firewalls.rules) - - -class FirewallsResourceWithStreamingResponse: - def __init__(self, firewalls: FirewallsResource) -> None: - self._firewalls = firewalls - - self.create = to_streamed_response_wrapper( - firewalls.create, - ) - self.retrieve = to_streamed_response_wrapper( - firewalls.retrieve, - ) - self.update = to_streamed_response_wrapper( - firewalls.update, - ) - self.list = to_streamed_response_wrapper( - firewalls.list, - ) - self.delete = to_streamed_response_wrapper( - firewalls.delete, - ) - - @cached_property - def droplets(self) -> DropletsResourceWithStreamingResponse: - return DropletsResourceWithStreamingResponse(self._firewalls.droplets) - - @cached_property - def tags(self) -> TagsResourceWithStreamingResponse: - return TagsResourceWithStreamingResponse(self._firewalls.tags) - - @cached_property - def rules(self) -> RulesResourceWithStreamingResponse: - return RulesResourceWithStreamingResponse(self._firewalls.rules) - - -class AsyncFirewallsResourceWithStreamingResponse: - def __init__(self, firewalls: AsyncFirewallsResource) -> None: - self._firewalls = firewalls - - self.create = async_to_streamed_response_wrapper( - firewalls.create, - ) - self.retrieve = async_to_streamed_response_wrapper( - firewalls.retrieve, - ) - self.update = async_to_streamed_response_wrapper( - firewalls.update, - ) - self.list = async_to_streamed_response_wrapper( - firewalls.list, - ) - self.delete = async_to_streamed_response_wrapper( - firewalls.delete, - ) - - @cached_property - def droplets(self) -> AsyncDropletsResourceWithStreamingResponse: - return AsyncDropletsResourceWithStreamingResponse(self._firewalls.droplets) - - @cached_property - def tags(self) -> AsyncTagsResourceWithStreamingResponse: - return AsyncTagsResourceWithStreamingResponse(self._firewalls.tags) - - @cached_property - def rules(self) -> AsyncRulesResourceWithStreamingResponse: - return AsyncRulesResourceWithStreamingResponse(self._firewalls.rules) diff --git a/src/gradientai/resources/firewalls/rules.py b/src/gradientai/resources/firewalls/rules.py deleted file mode 100644 index 756cd0bd..00000000 --- a/src/gradientai/resources/firewalls/rules.py +++ /dev/null @@ -1,320 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Iterable, Optional - -import httpx - -from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven -from ..._utils import maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ..._base_client import make_request_options -from ...types.firewalls import rule_add_params, rule_remove_params - -__all__ = ["RulesResource", "AsyncRulesResource"] - - -class RulesResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> RulesResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return RulesResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> RulesResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return RulesResourceWithStreamingResponse(self) - - def add( - self, - firewall_id: str, - *, - inbound_rules: Optional[Iterable[rule_add_params.InboundRule]] | NotGiven = NOT_GIVEN, - outbound_rules: Optional[Iterable[rule_add_params.OutboundRule]] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - To add additional access rules to a firewall, send a POST request to - `/v2/firewalls/$FIREWALL_ID/rules`. The body of the request may include an - inbound_rules and/or outbound_rules attribute containing an array of rules to be - added. - - No response body will be sent back, but the response code will indicate success. - Specifically, the response code will be a 204, which means that the action was - successful with no returned body data. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not firewall_id: - raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return self._post( - f"/v2/firewalls/{firewall_id}/rules" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}/rules", - body=maybe_transform( - { - "inbound_rules": inbound_rules, - "outbound_rules": outbound_rules, - }, - rule_add_params.RuleAddParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - def remove( - self, - firewall_id: str, - *, - inbound_rules: Optional[Iterable[rule_remove_params.InboundRule]] | NotGiven = NOT_GIVEN, - outbound_rules: Optional[Iterable[rule_remove_params.OutboundRule]] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - To remove access rules from a firewall, send a DELETE request to - `/v2/firewalls/$FIREWALL_ID/rules`. The body of the request may include an - `inbound_rules` and/or `outbound_rules` attribute containing an array of rules - to be removed. - - No response body will be sent back, but the response code will indicate success. - Specifically, the response code will be a 204, which means that the action was - successful with no returned body data. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not firewall_id: - raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return self._delete( - f"/v2/firewalls/{firewall_id}/rules" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}/rules", - body=maybe_transform( - { - "inbound_rules": inbound_rules, - "outbound_rules": outbound_rules, - }, - rule_remove_params.RuleRemoveParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - -class AsyncRulesResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncRulesResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return AsyncRulesResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncRulesResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return AsyncRulesResourceWithStreamingResponse(self) - - async def add( - self, - firewall_id: str, - *, - inbound_rules: Optional[Iterable[rule_add_params.InboundRule]] | NotGiven = NOT_GIVEN, - outbound_rules: Optional[Iterable[rule_add_params.OutboundRule]] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - To add additional access rules to a firewall, send a POST request to - `/v2/firewalls/$FIREWALL_ID/rules`. The body of the request may include an - inbound_rules and/or outbound_rules attribute containing an array of rules to be - added. - - No response body will be sent back, but the response code will indicate success. - Specifically, the response code will be a 204, which means that the action was - successful with no returned body data. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not firewall_id: - raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return await self._post( - f"/v2/firewalls/{firewall_id}/rules" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}/rules", - body=await async_maybe_transform( - { - "inbound_rules": inbound_rules, - "outbound_rules": outbound_rules, - }, - rule_add_params.RuleAddParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - async def remove( - self, - firewall_id: str, - *, - inbound_rules: Optional[Iterable[rule_remove_params.InboundRule]] | NotGiven = NOT_GIVEN, - outbound_rules: Optional[Iterable[rule_remove_params.OutboundRule]] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - To remove access rules from a firewall, send a DELETE request to - `/v2/firewalls/$FIREWALL_ID/rules`. The body of the request may include an - `inbound_rules` and/or `outbound_rules` attribute containing an array of rules - to be removed. - - No response body will be sent back, but the response code will indicate success. - Specifically, the response code will be a 204, which means that the action was - successful with no returned body data. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not firewall_id: - raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return await self._delete( - f"/v2/firewalls/{firewall_id}/rules" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}/rules", - body=await async_maybe_transform( - { - "inbound_rules": inbound_rules, - "outbound_rules": outbound_rules, - }, - rule_remove_params.RuleRemoveParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - -class RulesResourceWithRawResponse: - def __init__(self, rules: RulesResource) -> None: - self._rules = rules - - self.add = to_raw_response_wrapper( - rules.add, - ) - self.remove = to_raw_response_wrapper( - rules.remove, - ) - - -class AsyncRulesResourceWithRawResponse: - def __init__(self, rules: AsyncRulesResource) -> None: - self._rules = rules - - self.add = async_to_raw_response_wrapper( - rules.add, - ) - self.remove = async_to_raw_response_wrapper( - rules.remove, - ) - - -class RulesResourceWithStreamingResponse: - def __init__(self, rules: RulesResource) -> None: - self._rules = rules - - self.add = to_streamed_response_wrapper( - rules.add, - ) - self.remove = to_streamed_response_wrapper( - rules.remove, - ) - - -class AsyncRulesResourceWithStreamingResponse: - def __init__(self, rules: AsyncRulesResource) -> None: - self._rules = rules - - self.add = async_to_streamed_response_wrapper( - rules.add, - ) - self.remove = async_to_streamed_response_wrapper( - rules.remove, - ) diff --git a/src/gradientai/resources/firewalls/tags.py b/src/gradientai/resources/firewalls/tags.py deleted file mode 100644 index 966015ea..00000000 --- a/src/gradientai/resources/firewalls/tags.py +++ /dev/null @@ -1,308 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List, Optional - -import httpx - -from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven -from ..._utils import maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ..._base_client import make_request_options -from ...types.firewalls import tag_add_params, tag_remove_params - -__all__ = ["TagsResource", "AsyncTagsResource"] - - -class TagsResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> TagsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return TagsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> TagsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return TagsResourceWithStreamingResponse(self) - - def add( - self, - firewall_id: str, - *, - tags: Optional[List[str]], - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - To assign a tag representing a group of Droplets to a firewall, send a POST - request to `/v2/firewalls/$FIREWALL_ID/tags`. In the body of the request, there - should be a `tags` attribute containing a list of tag names. - - No response body will be sent back, but the response code will indicate success. - Specifically, the response code will be a 204, which means that the action was - successful with no returned body data. - - Args: - tags: A flat array of tag names as strings to be applied to the resource. Tag names - must exist in order to be referenced in a request. - - Requires `tag:create` and `tag:read` scopes. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not firewall_id: - raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return self._post( - f"/v2/firewalls/{firewall_id}/tags" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}/tags", - body=maybe_transform({"tags": tags}, tag_add_params.TagAddParams), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - def remove( - self, - firewall_id: str, - *, - tags: Optional[List[str]], - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - To remove a tag representing a group of Droplets from a firewall, send a DELETE - request to `/v2/firewalls/$FIREWALL_ID/tags`. In the body of the request, there - should be a `tags` attribute containing a list of tag names. - - No response body will be sent back, but the response code will indicate success. - Specifically, the response code will be a 204, which means that the action was - successful with no returned body data. - - Args: - tags: A flat array of tag names as strings to be applied to the resource. Tag names - must exist in order to be referenced in a request. - - Requires `tag:create` and `tag:read` scopes. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not firewall_id: - raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return self._delete( - f"/v2/firewalls/{firewall_id}/tags" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}/tags", - body=maybe_transform({"tags": tags}, tag_remove_params.TagRemoveParams), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - -class AsyncTagsResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncTagsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return AsyncTagsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncTagsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return AsyncTagsResourceWithStreamingResponse(self) - - async def add( - self, - firewall_id: str, - *, - tags: Optional[List[str]], - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - To assign a tag representing a group of Droplets to a firewall, send a POST - request to `/v2/firewalls/$FIREWALL_ID/tags`. In the body of the request, there - should be a `tags` attribute containing a list of tag names. - - No response body will be sent back, but the response code will indicate success. - Specifically, the response code will be a 204, which means that the action was - successful with no returned body data. - - Args: - tags: A flat array of tag names as strings to be applied to the resource. Tag names - must exist in order to be referenced in a request. - - Requires `tag:create` and `tag:read` scopes. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not firewall_id: - raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return await self._post( - f"/v2/firewalls/{firewall_id}/tags" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}/tags", - body=await async_maybe_transform({"tags": tags}, tag_add_params.TagAddParams), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - async def remove( - self, - firewall_id: str, - *, - tags: Optional[List[str]], - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - To remove a tag representing a group of Droplets from a firewall, send a DELETE - request to `/v2/firewalls/$FIREWALL_ID/tags`. In the body of the request, there - should be a `tags` attribute containing a list of tag names. - - No response body will be sent back, but the response code will indicate success. - Specifically, the response code will be a 204, which means that the action was - successful with no returned body data. - - Args: - tags: A flat array of tag names as strings to be applied to the resource. Tag names - must exist in order to be referenced in a request. - - Requires `tag:create` and `tag:read` scopes. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not firewall_id: - raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return await self._delete( - f"/v2/firewalls/{firewall_id}/tags" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}/tags", - body=await async_maybe_transform({"tags": tags}, tag_remove_params.TagRemoveParams), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - -class TagsResourceWithRawResponse: - def __init__(self, tags: TagsResource) -> None: - self._tags = tags - - self.add = to_raw_response_wrapper( - tags.add, - ) - self.remove = to_raw_response_wrapper( - tags.remove, - ) - - -class AsyncTagsResourceWithRawResponse: - def __init__(self, tags: AsyncTagsResource) -> None: - self._tags = tags - - self.add = async_to_raw_response_wrapper( - tags.add, - ) - self.remove = async_to_raw_response_wrapper( - tags.remove, - ) - - -class TagsResourceWithStreamingResponse: - def __init__(self, tags: TagsResource) -> None: - self._tags = tags - - self.add = to_streamed_response_wrapper( - tags.add, - ) - self.remove = to_streamed_response_wrapper( - tags.remove, - ) - - -class AsyncTagsResourceWithStreamingResponse: - def __init__(self, tags: AsyncTagsResource) -> None: - self._tags = tags - - self.add = async_to_streamed_response_wrapper( - tags.add, - ) - self.remove = async_to_streamed_response_wrapper( - tags.remove, - ) diff --git a/src/gradientai/resources/floating_ips/__init__.py b/src/gradientai/resources/floating_ips/__init__.py deleted file mode 100644 index bf6871b1..00000000 --- a/src/gradientai/resources/floating_ips/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .actions import ( - ActionsResource, - AsyncActionsResource, - ActionsResourceWithRawResponse, - AsyncActionsResourceWithRawResponse, - ActionsResourceWithStreamingResponse, - AsyncActionsResourceWithStreamingResponse, -) -from .floating_ips import ( - FloatingIPsResource, - AsyncFloatingIPsResource, - FloatingIPsResourceWithRawResponse, - AsyncFloatingIPsResourceWithRawResponse, - FloatingIPsResourceWithStreamingResponse, - AsyncFloatingIPsResourceWithStreamingResponse, -) - -__all__ = [ - "ActionsResource", - "AsyncActionsResource", - "ActionsResourceWithRawResponse", - "AsyncActionsResourceWithRawResponse", - "ActionsResourceWithStreamingResponse", - "AsyncActionsResourceWithStreamingResponse", - "FloatingIPsResource", - "AsyncFloatingIPsResource", - "FloatingIPsResourceWithRawResponse", - "AsyncFloatingIPsResourceWithRawResponse", - "FloatingIPsResourceWithStreamingResponse", - "AsyncFloatingIPsResourceWithStreamingResponse", -] diff --git a/src/gradientai/resources/floating_ips/actions.py b/src/gradientai/resources/floating_ips/actions.py deleted file mode 100644 index 69b9b67e..00000000 --- a/src/gradientai/resources/floating_ips/actions.py +++ /dev/null @@ -1,489 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, overload - -import httpx - -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._utils import required_args, maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ..._base_client import make_request_options -from ...types.floating_ips import action_create_params -from ...types.floating_ips.action_list_response import ActionListResponse -from ...types.floating_ips.action_create_response import ActionCreateResponse -from ...types.floating_ips.action_retrieve_response import ActionRetrieveResponse - -__all__ = ["ActionsResource", "AsyncActionsResource"] - - -class ActionsResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> ActionsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return ActionsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> ActionsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return ActionsResourceWithStreamingResponse(self) - - @overload - def create( - self, - floating_ip: str, - *, - type: Literal["assign", "unassign"], - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionCreateResponse: - """ - To initiate an action on a floating IP send a POST request to - `/v2/floating_ips/$FLOATING_IP/actions`. In the JSON body to the request, set - the `type` attribute to on of the supported action types: - - | Action | Details | - | ---------- | ------------------------------------- | - | `assign` | Assigns a floating IP to a Droplet | - | `unassign` | Unassign a floating IP from a Droplet | - - Args: - type: The type of action to initiate for the floating IP. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @overload - def create( - self, - floating_ip: str, - *, - droplet_id: int, - type: Literal["assign", "unassign"], - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionCreateResponse: - """ - To initiate an action on a floating IP send a POST request to - `/v2/floating_ips/$FLOATING_IP/actions`. In the JSON body to the request, set - the `type` attribute to on of the supported action types: - - | Action | Details | - | ---------- | ------------------------------------- | - | `assign` | Assigns a floating IP to a Droplet | - | `unassign` | Unassign a floating IP from a Droplet | - - Args: - droplet_id: The ID of the Droplet that the floating IP will be assigned to. - - type: The type of action to initiate for the floating IP. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @required_args(["type"], ["droplet_id", "type"]) - def create( - self, - floating_ip: str, - *, - type: Literal["assign", "unassign"], - droplet_id: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionCreateResponse: - if not floating_ip: - raise ValueError(f"Expected a non-empty value for `floating_ip` but received {floating_ip!r}") - return self._post( - f"/v2/floating_ips/{floating_ip}/actions" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/floating_ips/{floating_ip}/actions", - body=maybe_transform( - { - "type": type, - "droplet_id": droplet_id, - }, - action_create_params.ActionCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ActionCreateResponse, - ) - - def retrieve( - self, - action_id: int, - *, - floating_ip: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionRetrieveResponse: - """ - To retrieve the status of a floating IP action, send a GET request to - `/v2/floating_ips/$FLOATING_IP/actions/$ACTION_ID`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not floating_ip: - raise ValueError(f"Expected a non-empty value for `floating_ip` but received {floating_ip!r}") - return self._get( - f"/v2/floating_ips/{floating_ip}/actions/{action_id}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/floating_ips/{floating_ip}/actions/{action_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ActionRetrieveResponse, - ) - - def list( - self, - floating_ip: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionListResponse: - """ - To retrieve all actions that have been executed on a floating IP, send a GET - request to `/v2/floating_ips/$FLOATING_IP/actions`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not floating_ip: - raise ValueError(f"Expected a non-empty value for `floating_ip` but received {floating_ip!r}") - return self._get( - f"/v2/floating_ips/{floating_ip}/actions" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/floating_ips/{floating_ip}/actions", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ActionListResponse, - ) - - -class AsyncActionsResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncActionsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return AsyncActionsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncActionsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return AsyncActionsResourceWithStreamingResponse(self) - - @overload - async def create( - self, - floating_ip: str, - *, - type: Literal["assign", "unassign"], - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionCreateResponse: - """ - To initiate an action on a floating IP send a POST request to - `/v2/floating_ips/$FLOATING_IP/actions`. In the JSON body to the request, set - the `type` attribute to on of the supported action types: - - | Action | Details | - | ---------- | ------------------------------------- | - | `assign` | Assigns a floating IP to a Droplet | - | `unassign` | Unassign a floating IP from a Droplet | - - Args: - type: The type of action to initiate for the floating IP. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @overload - async def create( - self, - floating_ip: str, - *, - droplet_id: int, - type: Literal["assign", "unassign"], - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionCreateResponse: - """ - To initiate an action on a floating IP send a POST request to - `/v2/floating_ips/$FLOATING_IP/actions`. In the JSON body to the request, set - the `type` attribute to on of the supported action types: - - | Action | Details | - | ---------- | ------------------------------------- | - | `assign` | Assigns a floating IP to a Droplet | - | `unassign` | Unassign a floating IP from a Droplet | - - Args: - droplet_id: The ID of the Droplet that the floating IP will be assigned to. - - type: The type of action to initiate for the floating IP. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @required_args(["type"], ["droplet_id", "type"]) - async def create( - self, - floating_ip: str, - *, - type: Literal["assign", "unassign"], - droplet_id: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionCreateResponse: - if not floating_ip: - raise ValueError(f"Expected a non-empty value for `floating_ip` but received {floating_ip!r}") - return await self._post( - f"/v2/floating_ips/{floating_ip}/actions" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/floating_ips/{floating_ip}/actions", - body=await async_maybe_transform( - { - "type": type, - "droplet_id": droplet_id, - }, - action_create_params.ActionCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ActionCreateResponse, - ) - - async def retrieve( - self, - action_id: int, - *, - floating_ip: str, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionRetrieveResponse: - """ - To retrieve the status of a floating IP action, send a GET request to - `/v2/floating_ips/$FLOATING_IP/actions/$ACTION_ID`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not floating_ip: - raise ValueError(f"Expected a non-empty value for `floating_ip` but received {floating_ip!r}") - return await self._get( - f"/v2/floating_ips/{floating_ip}/actions/{action_id}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/floating_ips/{floating_ip}/actions/{action_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ActionRetrieveResponse, - ) - - async def list( - self, - floating_ip: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionListResponse: - """ - To retrieve all actions that have been executed on a floating IP, send a GET - request to `/v2/floating_ips/$FLOATING_IP/actions`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not floating_ip: - raise ValueError(f"Expected a non-empty value for `floating_ip` but received {floating_ip!r}") - return await self._get( - f"/v2/floating_ips/{floating_ip}/actions" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/floating_ips/{floating_ip}/actions", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ActionListResponse, - ) - - -class ActionsResourceWithRawResponse: - def __init__(self, actions: ActionsResource) -> None: - self._actions = actions - - self.create = to_raw_response_wrapper( - actions.create, - ) - self.retrieve = to_raw_response_wrapper( - actions.retrieve, - ) - self.list = to_raw_response_wrapper( - actions.list, - ) - - -class AsyncActionsResourceWithRawResponse: - def __init__(self, actions: AsyncActionsResource) -> None: - self._actions = actions - - self.create = async_to_raw_response_wrapper( - actions.create, - ) - self.retrieve = async_to_raw_response_wrapper( - actions.retrieve, - ) - self.list = async_to_raw_response_wrapper( - actions.list, - ) - - -class ActionsResourceWithStreamingResponse: - def __init__(self, actions: ActionsResource) -> None: - self._actions = actions - - self.create = to_streamed_response_wrapper( - actions.create, - ) - self.retrieve = to_streamed_response_wrapper( - actions.retrieve, - ) - self.list = to_streamed_response_wrapper( - actions.list, - ) - - -class AsyncActionsResourceWithStreamingResponse: - def __init__(self, actions: AsyncActionsResource) -> None: - self._actions = actions - - self.create = async_to_streamed_response_wrapper( - actions.create, - ) - self.retrieve = async_to_streamed_response_wrapper( - actions.retrieve, - ) - self.list = async_to_streamed_response_wrapper( - actions.list, - ) diff --git a/src/gradientai/resources/floating_ips/floating_ips.py b/src/gradientai/resources/floating_ips/floating_ips.py deleted file mode 100644 index 0cc083b6..00000000 --- a/src/gradientai/resources/floating_ips/floating_ips.py +++ /dev/null @@ -1,635 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import overload - -import httpx - -from ...types import floating_ip_list_params, floating_ip_create_params -from .actions import ( - ActionsResource, - AsyncActionsResource, - ActionsResourceWithRawResponse, - AsyncActionsResourceWithRawResponse, - ActionsResourceWithStreamingResponse, - AsyncActionsResourceWithStreamingResponse, -) -from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven -from ..._utils import required_args, maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ..._base_client import make_request_options -from ...types.floating_ip_list_response import FloatingIPListResponse -from ...types.floating_ip_create_response import FloatingIPCreateResponse -from ...types.floating_ip_retrieve_response import FloatingIPRetrieveResponse - -__all__ = ["FloatingIPsResource", "AsyncFloatingIPsResource"] - - -class FloatingIPsResource(SyncAPIResource): - @cached_property - def actions(self) -> ActionsResource: - return ActionsResource(self._client) - - @cached_property - def with_raw_response(self) -> FloatingIPsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return FloatingIPsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> FloatingIPsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return FloatingIPsResourceWithStreamingResponse(self) - - @overload - def create( - self, - *, - droplet_id: int, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FloatingIPCreateResponse: - """ - On creation, a floating IP must be either assigned to a Droplet or reserved to a - region. - - - To create a new floating IP assigned to a Droplet, send a POST request to - `/v2/floating_ips` with the `droplet_id` attribute. - - - To create a new floating IP reserved to a region, send a POST request to - `/v2/floating_ips` with the `region` attribute. - - **Note**: In addition to the standard rate limiting, only 12 floating IPs may be - created per 60 seconds. - - Args: - droplet_id: The ID of the Droplet that the floating IP will be assigned to. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @overload - def create( - self, - *, - region: str, - project_id: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FloatingIPCreateResponse: - """ - On creation, a floating IP must be either assigned to a Droplet or reserved to a - region. - - - To create a new floating IP assigned to a Droplet, send a POST request to - `/v2/floating_ips` with the `droplet_id` attribute. - - - To create a new floating IP reserved to a region, send a POST request to - `/v2/floating_ips` with the `region` attribute. - - **Note**: In addition to the standard rate limiting, only 12 floating IPs may be - created per 60 seconds. - - Args: - region: The slug identifier for the region the floating IP will be reserved to. - - project_id: The UUID of the project to which the floating IP will be assigned. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @required_args(["droplet_id"], ["region"]) - def create( - self, - *, - droplet_id: int | NotGiven = NOT_GIVEN, - region: str | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FloatingIPCreateResponse: - return self._post( - "/v2/floating_ips" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/floating_ips", - body=maybe_transform( - { - "droplet_id": droplet_id, - "region": region, - "project_id": project_id, - }, - floating_ip_create_params.FloatingIPCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=FloatingIPCreateResponse, - ) - - def retrieve( - self, - floating_ip: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FloatingIPRetrieveResponse: - """ - To show information about a floating IP, send a GET request to - `/v2/floating_ips/$FLOATING_IP_ADDR`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not floating_ip: - raise ValueError(f"Expected a non-empty value for `floating_ip` but received {floating_ip!r}") - return self._get( - f"/v2/floating_ips/{floating_ip}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/floating_ips/{floating_ip}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=FloatingIPRetrieveResponse, - ) - - def list( - self, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FloatingIPListResponse: - """ - To list all of the floating IPs available on your account, send a GET request to - `/v2/floating_ips`. - - Args: - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/v2/floating_ips" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/floating_ips", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - }, - floating_ip_list_params.FloatingIPListParams, - ), - ), - cast_to=FloatingIPListResponse, - ) - - def delete( - self, - floating_ip: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - To delete a floating IP and remove it from your account, send a DELETE request - to `/v2/floating_ips/$FLOATING_IP_ADDR`. - - A successful request will receive a 204 status code with no body in response. - This indicates that the request was processed successfully. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not floating_ip: - raise ValueError(f"Expected a non-empty value for `floating_ip` but received {floating_ip!r}") - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return self._delete( - f"/v2/floating_ips/{floating_ip}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/floating_ips/{floating_ip}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - -class AsyncFloatingIPsResource(AsyncAPIResource): - @cached_property - def actions(self) -> AsyncActionsResource: - return AsyncActionsResource(self._client) - - @cached_property - def with_raw_response(self) -> AsyncFloatingIPsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return AsyncFloatingIPsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncFloatingIPsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return AsyncFloatingIPsResourceWithStreamingResponse(self) - - @overload - async def create( - self, - *, - droplet_id: int, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FloatingIPCreateResponse: - """ - On creation, a floating IP must be either assigned to a Droplet or reserved to a - region. - - - To create a new floating IP assigned to a Droplet, send a POST request to - `/v2/floating_ips` with the `droplet_id` attribute. - - - To create a new floating IP reserved to a region, send a POST request to - `/v2/floating_ips` with the `region` attribute. - - **Note**: In addition to the standard rate limiting, only 12 floating IPs may be - created per 60 seconds. - - Args: - droplet_id: The ID of the Droplet that the floating IP will be assigned to. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @overload - async def create( - self, - *, - region: str, - project_id: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FloatingIPCreateResponse: - """ - On creation, a floating IP must be either assigned to a Droplet or reserved to a - region. - - - To create a new floating IP assigned to a Droplet, send a POST request to - `/v2/floating_ips` with the `droplet_id` attribute. - - - To create a new floating IP reserved to a region, send a POST request to - `/v2/floating_ips` with the `region` attribute. - - **Note**: In addition to the standard rate limiting, only 12 floating IPs may be - created per 60 seconds. - - Args: - region: The slug identifier for the region the floating IP will be reserved to. - - project_id: The UUID of the project to which the floating IP will be assigned. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @required_args(["droplet_id"], ["region"]) - async def create( - self, - *, - droplet_id: int | NotGiven = NOT_GIVEN, - region: str | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FloatingIPCreateResponse: - return await self._post( - "/v2/floating_ips" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/floating_ips", - body=await async_maybe_transform( - { - "droplet_id": droplet_id, - "region": region, - "project_id": project_id, - }, - floating_ip_create_params.FloatingIPCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=FloatingIPCreateResponse, - ) - - async def retrieve( - self, - floating_ip: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FloatingIPRetrieveResponse: - """ - To show information about a floating IP, send a GET request to - `/v2/floating_ips/$FLOATING_IP_ADDR`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not floating_ip: - raise ValueError(f"Expected a non-empty value for `floating_ip` but received {floating_ip!r}") - return await self._get( - f"/v2/floating_ips/{floating_ip}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/floating_ips/{floating_ip}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=FloatingIPRetrieveResponse, - ) - - async def list( - self, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> FloatingIPListResponse: - """ - To list all of the floating IPs available on your account, send a GET request to - `/v2/floating_ips`. - - Args: - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/v2/floating_ips" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/floating_ips", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - }, - floating_ip_list_params.FloatingIPListParams, - ), - ), - cast_to=FloatingIPListResponse, - ) - - async def delete( - self, - floating_ip: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - To delete a floating IP and remove it from your account, send a DELETE request - to `/v2/floating_ips/$FLOATING_IP_ADDR`. - - A successful request will receive a 204 status code with no body in response. - This indicates that the request was processed successfully. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not floating_ip: - raise ValueError(f"Expected a non-empty value for `floating_ip` but received {floating_ip!r}") - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return await self._delete( - f"/v2/floating_ips/{floating_ip}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/floating_ips/{floating_ip}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - -class FloatingIPsResourceWithRawResponse: - def __init__(self, floating_ips: FloatingIPsResource) -> None: - self._floating_ips = floating_ips - - self.create = to_raw_response_wrapper( - floating_ips.create, - ) - self.retrieve = to_raw_response_wrapper( - floating_ips.retrieve, - ) - self.list = to_raw_response_wrapper( - floating_ips.list, - ) - self.delete = to_raw_response_wrapper( - floating_ips.delete, - ) - - @cached_property - def actions(self) -> ActionsResourceWithRawResponse: - return ActionsResourceWithRawResponse(self._floating_ips.actions) - - -class AsyncFloatingIPsResourceWithRawResponse: - def __init__(self, floating_ips: AsyncFloatingIPsResource) -> None: - self._floating_ips = floating_ips - - self.create = async_to_raw_response_wrapper( - floating_ips.create, - ) - self.retrieve = async_to_raw_response_wrapper( - floating_ips.retrieve, - ) - self.list = async_to_raw_response_wrapper( - floating_ips.list, - ) - self.delete = async_to_raw_response_wrapper( - floating_ips.delete, - ) - - @cached_property - def actions(self) -> AsyncActionsResourceWithRawResponse: - return AsyncActionsResourceWithRawResponse(self._floating_ips.actions) - - -class FloatingIPsResourceWithStreamingResponse: - def __init__(self, floating_ips: FloatingIPsResource) -> None: - self._floating_ips = floating_ips - - self.create = to_streamed_response_wrapper( - floating_ips.create, - ) - self.retrieve = to_streamed_response_wrapper( - floating_ips.retrieve, - ) - self.list = to_streamed_response_wrapper( - floating_ips.list, - ) - self.delete = to_streamed_response_wrapper( - floating_ips.delete, - ) - - @cached_property - def actions(self) -> ActionsResourceWithStreamingResponse: - return ActionsResourceWithStreamingResponse(self._floating_ips.actions) - - -class AsyncFloatingIPsResourceWithStreamingResponse: - def __init__(self, floating_ips: AsyncFloatingIPsResource) -> None: - self._floating_ips = floating_ips - - self.create = async_to_streamed_response_wrapper( - floating_ips.create, - ) - self.retrieve = async_to_streamed_response_wrapper( - floating_ips.retrieve, - ) - self.list = async_to_streamed_response_wrapper( - floating_ips.list, - ) - self.delete = async_to_streamed_response_wrapper( - floating_ips.delete, - ) - - @cached_property - def actions(self) -> AsyncActionsResourceWithStreamingResponse: - return AsyncActionsResourceWithStreamingResponse(self._floating_ips.actions) diff --git a/src/gradientai/resources/images/__init__.py b/src/gradientai/resources/images/__init__.py deleted file mode 100644 index 477fd657..00000000 --- a/src/gradientai/resources/images/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .images import ( - ImagesResource, - AsyncImagesResource, - ImagesResourceWithRawResponse, - AsyncImagesResourceWithRawResponse, - ImagesResourceWithStreamingResponse, - AsyncImagesResourceWithStreamingResponse, -) -from .actions import ( - ActionsResource, - AsyncActionsResource, - ActionsResourceWithRawResponse, - AsyncActionsResourceWithRawResponse, - ActionsResourceWithStreamingResponse, - AsyncActionsResourceWithStreamingResponse, -) - -__all__ = [ - "ActionsResource", - "AsyncActionsResource", - "ActionsResourceWithRawResponse", - "AsyncActionsResourceWithRawResponse", - "ActionsResourceWithStreamingResponse", - "AsyncActionsResourceWithStreamingResponse", - "ImagesResource", - "AsyncImagesResource", - "ImagesResourceWithRawResponse", - "AsyncImagesResourceWithRawResponse", - "ImagesResourceWithStreamingResponse", - "AsyncImagesResourceWithStreamingResponse", -] diff --git a/src/gradientai/resources/images/actions.py b/src/gradientai/resources/images/actions.py deleted file mode 100644 index 3f4b4384..00000000 --- a/src/gradientai/resources/images/actions.py +++ /dev/null @@ -1,560 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, overload - -import httpx - -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._utils import required_args, maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ..._base_client import make_request_options -from ...types.images import action_create_params -from ...types.shared.action import Action -from ...types.images.action_list_response import ActionListResponse - -__all__ = ["ActionsResource", "AsyncActionsResource"] - - -class ActionsResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> ActionsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return ActionsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> ActionsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return ActionsResourceWithStreamingResponse(self) - - @overload - def create( - self, - image_id: int, - *, - type: Literal["convert", "transfer"], - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Action: - """ - The following actions are available on an Image. - - ## Convert an Image to a Snapshot - - To convert an image, for example, a backup to a snapshot, send a POST request to - `/v2/images/$IMAGE_ID/actions`. Set the `type` attribute to `convert`. - - ## Transfer an Image - - To transfer an image to another region, send a POST request to - `/v2/images/$IMAGE_ID/actions`. Set the `type` attribute to `transfer` and set - `region` attribute to the slug identifier of the region you wish to transfer to. - - Args: - type: The action to be taken on the image. Can be either `convert` or `transfer`. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @overload - def create( - self, - image_id: int, - *, - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ], - type: Literal["convert", "transfer"], - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Action: - """ - The following actions are available on an Image. - - ## Convert an Image to a Snapshot - - To convert an image, for example, a backup to a snapshot, send a POST request to - `/v2/images/$IMAGE_ID/actions`. Set the `type` attribute to `convert`. - - ## Transfer an Image - - To transfer an image to another region, send a POST request to - `/v2/images/$IMAGE_ID/actions`. Set the `type` attribute to `transfer` and set - `region` attribute to the slug identifier of the region you wish to transfer to. - - Args: - region: The slug identifier for the region where the resource will initially be - available. - - type: The action to be taken on the image. Can be either `convert` or `transfer`. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @required_args(["type"], ["region", "type"]) - def create( - self, - image_id: int, - *, - type: Literal["convert", "transfer"], - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ] - | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Action: - return self._post( - f"/v2/images/{image_id}/actions" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/images/{image_id}/actions", - body=maybe_transform( - { - "type": type, - "region": region, - }, - action_create_params.ActionCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Action, - ) - - def retrieve( - self, - action_id: int, - *, - image_id: int, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Action: - """ - To retrieve the status of an image action, send a GET request to - `/v2/images/$IMAGE_ID/actions/$IMAGE_ACTION_ID`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - f"/v2/images/{image_id}/actions/{action_id}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/images/{image_id}/actions/{action_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Action, - ) - - def list( - self, - image_id: int, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionListResponse: - """ - To retrieve all actions that have been executed on an image, send a GET request - to `/v2/images/$IMAGE_ID/actions`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - f"/v2/images/{image_id}/actions" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/images/{image_id}/actions", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ActionListResponse, - ) - - -class AsyncActionsResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncActionsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return AsyncActionsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncActionsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return AsyncActionsResourceWithStreamingResponse(self) - - @overload - async def create( - self, - image_id: int, - *, - type: Literal["convert", "transfer"], - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Action: - """ - The following actions are available on an Image. - - ## Convert an Image to a Snapshot - - To convert an image, for example, a backup to a snapshot, send a POST request to - `/v2/images/$IMAGE_ID/actions`. Set the `type` attribute to `convert`. - - ## Transfer an Image - - To transfer an image to another region, send a POST request to - `/v2/images/$IMAGE_ID/actions`. Set the `type` attribute to `transfer` and set - `region` attribute to the slug identifier of the region you wish to transfer to. - - Args: - type: The action to be taken on the image. Can be either `convert` or `transfer`. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @overload - async def create( - self, - image_id: int, - *, - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ], - type: Literal["convert", "transfer"], - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Action: - """ - The following actions are available on an Image. - - ## Convert an Image to a Snapshot - - To convert an image, for example, a backup to a snapshot, send a POST request to - `/v2/images/$IMAGE_ID/actions`. Set the `type` attribute to `convert`. - - ## Transfer an Image - - To transfer an image to another region, send a POST request to - `/v2/images/$IMAGE_ID/actions`. Set the `type` attribute to `transfer` and set - `region` attribute to the slug identifier of the region you wish to transfer to. - - Args: - region: The slug identifier for the region where the resource will initially be - available. - - type: The action to be taken on the image. Can be either `convert` or `transfer`. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @required_args(["type"], ["region", "type"]) - async def create( - self, - image_id: int, - *, - type: Literal["convert", "transfer"], - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ] - | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Action: - return await self._post( - f"/v2/images/{image_id}/actions" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/images/{image_id}/actions", - body=await async_maybe_transform( - { - "type": type, - "region": region, - }, - action_create_params.ActionCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Action, - ) - - async def retrieve( - self, - action_id: int, - *, - image_id: int, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Action: - """ - To retrieve the status of an image action, send a GET request to - `/v2/images/$IMAGE_ID/actions/$IMAGE_ACTION_ID`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - f"/v2/images/{image_id}/actions/{action_id}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/images/{image_id}/actions/{action_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=Action, - ) - - async def list( - self, - image_id: int, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionListResponse: - """ - To retrieve all actions that have been executed on an image, send a GET request - to `/v2/images/$IMAGE_ID/actions`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - f"/v2/images/{image_id}/actions" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/images/{image_id}/actions", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ActionListResponse, - ) - - -class ActionsResourceWithRawResponse: - def __init__(self, actions: ActionsResource) -> None: - self._actions = actions - - self.create = to_raw_response_wrapper( - actions.create, - ) - self.retrieve = to_raw_response_wrapper( - actions.retrieve, - ) - self.list = to_raw_response_wrapper( - actions.list, - ) - - -class AsyncActionsResourceWithRawResponse: - def __init__(self, actions: AsyncActionsResource) -> None: - self._actions = actions - - self.create = async_to_raw_response_wrapper( - actions.create, - ) - self.retrieve = async_to_raw_response_wrapper( - actions.retrieve, - ) - self.list = async_to_raw_response_wrapper( - actions.list, - ) - - -class ActionsResourceWithStreamingResponse: - def __init__(self, actions: ActionsResource) -> None: - self._actions = actions - - self.create = to_streamed_response_wrapper( - actions.create, - ) - self.retrieve = to_streamed_response_wrapper( - actions.retrieve, - ) - self.list = to_streamed_response_wrapper( - actions.list, - ) - - -class AsyncActionsResourceWithStreamingResponse: - def __init__(self, actions: AsyncActionsResource) -> None: - self._actions = actions - - self.create = async_to_streamed_response_wrapper( - actions.create, - ) - self.retrieve = async_to_streamed_response_wrapper( - actions.retrieve, - ) - self.list = async_to_streamed_response_wrapper( - actions.list, - ) diff --git a/src/gradientai/resources/images/images.py b/src/gradientai/resources/images/images.py deleted file mode 100644 index f75a1e73..00000000 --- a/src/gradientai/resources/images/images.py +++ /dev/null @@ -1,867 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List, Union, Optional -from typing_extensions import Literal - -import httpx - -from ...types import image_list_params, image_create_params, image_update_params -from .actions import ( - ActionsResource, - AsyncActionsResource, - ActionsResourceWithRawResponse, - AsyncActionsResourceWithRawResponse, - ActionsResourceWithStreamingResponse, - AsyncActionsResourceWithStreamingResponse, -) -from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven -from ..._utils import maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ..._base_client import make_request_options -from ...types.image_list_response import ImageListResponse -from ...types.image_create_response import ImageCreateResponse -from ...types.image_update_response import ImageUpdateResponse -from ...types.image_retrieve_response import ImageRetrieveResponse - -__all__ = ["ImagesResource", "AsyncImagesResource"] - - -class ImagesResource(SyncAPIResource): - @cached_property - def actions(self) -> ActionsResource: - return ActionsResource(self._client) - - @cached_property - def with_raw_response(self) -> ImagesResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return ImagesResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> ImagesResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return ImagesResourceWithStreamingResponse(self) - - def create( - self, - *, - description: str | NotGiven = NOT_GIVEN, - distribution: Literal[ - "Arch Linux", - "CentOS", - "CoreOS", - "Debian", - "Fedora", - "Fedora Atomic", - "FreeBSD", - "Gentoo", - "openSUSE", - "RancherOS", - "Rocky Linux", - "Ubuntu", - "Unknown", - ] - | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ] - | NotGiven = NOT_GIVEN, - tags: Optional[List[str]] | NotGiven = NOT_GIVEN, - url: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ImageCreateResponse: - """To create a new custom image, send a POST request to /v2/images. - - The body must - contain a url attribute pointing to a Linux virtual machine image to be imported - into DigitalOcean. The image must be in the raw, qcow2, vhdx, vdi, or vmdk - format. It may be compressed using gzip or bzip2 and must be smaller than 100 GB - after being decompressed. - - Args: - description: An optional free-form text field to describe an image. - - distribution: The name of a custom image's distribution. Currently, the valid values are - `Arch Linux`, `CentOS`, `CoreOS`, `Debian`, `Fedora`, `Fedora Atomic`, - `FreeBSD`, `Gentoo`, `openSUSE`, `RancherOS`, `Rocky Linux`, `Ubuntu`, and - `Unknown`. Any other value will be accepted but ignored, and `Unknown` will be - used in its place. - - name: The display name that has been given to an image. This is what is shown in the - control panel and is generally a descriptive title for the image in question. - - region: The slug identifier for the region where the resource will initially be - available. - - tags: A flat array of tag names as strings to be applied to the resource. Tag names - may be for either existing or new tags. - - Requires `tag:create` scope. - - url: A URL from which the custom Linux virtual machine image may be retrieved. The - image it points to must be in the raw, qcow2, vhdx, vdi, or vmdk format. It may - be compressed using gzip or bzip2 and must be smaller than 100 GB after being - decompressed. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._post( - "/v2/images" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/images", - body=maybe_transform( - { - "description": description, - "distribution": distribution, - "name": name, - "region": region, - "tags": tags, - "url": url, - }, - image_create_params.ImageCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ImageCreateResponse, - ) - - def retrieve( - self, - image_id: Union[int, str], - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ImageRetrieveResponse: - """ - To retrieve information about an image, send a `GET` request to - `/v2/images/$IDENTIFIER`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - f"/v2/images/{image_id}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/images/{image_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ImageRetrieveResponse, - ) - - def update( - self, - image_id: int, - *, - description: str | NotGiven = NOT_GIVEN, - distribution: Literal[ - "Arch Linux", - "CentOS", - "CoreOS", - "Debian", - "Fedora", - "Fedora Atomic", - "FreeBSD", - "Gentoo", - "openSUSE", - "RancherOS", - "Rocky Linux", - "Ubuntu", - "Unknown", - ] - | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ImageUpdateResponse: - """To update an image, send a `PUT` request to `/v2/images/$IMAGE_ID`. - - Set the - `name` attribute to the new value you would like to use. For custom images, the - `description` and `distribution` attributes may also be updated. - - Args: - description: An optional free-form text field to describe an image. - - distribution: The name of a custom image's distribution. Currently, the valid values are - `Arch Linux`, `CentOS`, `CoreOS`, `Debian`, `Fedora`, `Fedora Atomic`, - `FreeBSD`, `Gentoo`, `openSUSE`, `RancherOS`, `Rocky Linux`, `Ubuntu`, and - `Unknown`. Any other value will be accepted but ignored, and `Unknown` will be - used in its place. - - name: The display name that has been given to an image. This is what is shown in the - control panel and is generally a descriptive title for the image in question. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._put( - f"/v2/images/{image_id}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/images/{image_id}", - body=maybe_transform( - { - "description": description, - "distribution": distribution, - "name": name, - }, - image_update_params.ImageUpdateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ImageUpdateResponse, - ) - - def list( - self, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - private: bool | NotGiven = NOT_GIVEN, - tag_name: str | NotGiven = NOT_GIVEN, - type: Literal["application", "distribution"] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ImageListResponse: - """ - To list all of the images available on your account, send a GET request to - /v2/images. - - ## Filtering Results - - --- - - It's possible to request filtered results by including certain query parameters. - - **Image Type** - - Either 1-Click Application or OS Distribution images can be filtered by using - the `type` query parameter. - - > Important: The `type` query parameter does not directly relate to the `type` - > attribute. - - To retrieve only **_distribution_** images, include the `type` query parameter - set to distribution, `/v2/images?type=distribution`. - - To retrieve only **_application_** images, include the `type` query parameter - set to application, `/v2/images?type=application`. - - **User Images** - - To retrieve only the private images of a user, include the `private` query - parameter set to true, `/v2/images?private=true`. - - **Tags** - - To list all images assigned to a specific tag, include the `tag_name` query - parameter set to the name of the tag in your GET request. For example, - `/v2/images?tag_name=$TAG_NAME`. - - Args: - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - private: Used to filter only user images. - - tag_name: Used to filter images by a specific tag. - - type: Filters results based on image type which can be either `application` or - `distribution`. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/v2/images" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/images", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - "private": private, - "tag_name": tag_name, - "type": type, - }, - image_list_params.ImageListParams, - ), - ), - cast_to=ImageListResponse, - ) - - def delete( - self, - image_id: int, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - To delete a snapshot or custom image, send a `DELETE` request to - `/v2/images/$IMAGE_ID`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return self._delete( - f"/v2/images/{image_id}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/images/{image_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - -class AsyncImagesResource(AsyncAPIResource): - @cached_property - def actions(self) -> AsyncActionsResource: - return AsyncActionsResource(self._client) - - @cached_property - def with_raw_response(self) -> AsyncImagesResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return AsyncImagesResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncImagesResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return AsyncImagesResourceWithStreamingResponse(self) - - async def create( - self, - *, - description: str | NotGiven = NOT_GIVEN, - distribution: Literal[ - "Arch Linux", - "CentOS", - "CoreOS", - "Debian", - "Fedora", - "Fedora Atomic", - "FreeBSD", - "Gentoo", - "openSUSE", - "RancherOS", - "Rocky Linux", - "Ubuntu", - "Unknown", - ] - | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ] - | NotGiven = NOT_GIVEN, - tags: Optional[List[str]] | NotGiven = NOT_GIVEN, - url: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ImageCreateResponse: - """To create a new custom image, send a POST request to /v2/images. - - The body must - contain a url attribute pointing to a Linux virtual machine image to be imported - into DigitalOcean. The image must be in the raw, qcow2, vhdx, vdi, or vmdk - format. It may be compressed using gzip or bzip2 and must be smaller than 100 GB - after being decompressed. - - Args: - description: An optional free-form text field to describe an image. - - distribution: The name of a custom image's distribution. Currently, the valid values are - `Arch Linux`, `CentOS`, `CoreOS`, `Debian`, `Fedora`, `Fedora Atomic`, - `FreeBSD`, `Gentoo`, `openSUSE`, `RancherOS`, `Rocky Linux`, `Ubuntu`, and - `Unknown`. Any other value will be accepted but ignored, and `Unknown` will be - used in its place. - - name: The display name that has been given to an image. This is what is shown in the - control panel and is generally a descriptive title for the image in question. - - region: The slug identifier for the region where the resource will initially be - available. - - tags: A flat array of tag names as strings to be applied to the resource. Tag names - may be for either existing or new tags. - - Requires `tag:create` scope. - - url: A URL from which the custom Linux virtual machine image may be retrieved. The - image it points to must be in the raw, qcow2, vhdx, vdi, or vmdk format. It may - be compressed using gzip or bzip2 and must be smaller than 100 GB after being - decompressed. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._post( - "/v2/images" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/images", - body=await async_maybe_transform( - { - "description": description, - "distribution": distribution, - "name": name, - "region": region, - "tags": tags, - "url": url, - }, - image_create_params.ImageCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ImageCreateResponse, - ) - - async def retrieve( - self, - image_id: Union[int, str], - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ImageRetrieveResponse: - """ - To retrieve information about an image, send a `GET` request to - `/v2/images/$IDENTIFIER`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - f"/v2/images/{image_id}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/images/{image_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ImageRetrieveResponse, - ) - - async def update( - self, - image_id: int, - *, - description: str | NotGiven = NOT_GIVEN, - distribution: Literal[ - "Arch Linux", - "CentOS", - "CoreOS", - "Debian", - "Fedora", - "Fedora Atomic", - "FreeBSD", - "Gentoo", - "openSUSE", - "RancherOS", - "Rocky Linux", - "Ubuntu", - "Unknown", - ] - | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ImageUpdateResponse: - """To update an image, send a `PUT` request to `/v2/images/$IMAGE_ID`. - - Set the - `name` attribute to the new value you would like to use. For custom images, the - `description` and `distribution` attributes may also be updated. - - Args: - description: An optional free-form text field to describe an image. - - distribution: The name of a custom image's distribution. Currently, the valid values are - `Arch Linux`, `CentOS`, `CoreOS`, `Debian`, `Fedora`, `Fedora Atomic`, - `FreeBSD`, `Gentoo`, `openSUSE`, `RancherOS`, `Rocky Linux`, `Ubuntu`, and - `Unknown`. Any other value will be accepted but ignored, and `Unknown` will be - used in its place. - - name: The display name that has been given to an image. This is what is shown in the - control panel and is generally a descriptive title for the image in question. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._put( - f"/v2/images/{image_id}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/images/{image_id}", - body=await async_maybe_transform( - { - "description": description, - "distribution": distribution, - "name": name, - }, - image_update_params.ImageUpdateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ImageUpdateResponse, - ) - - async def list( - self, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - private: bool | NotGiven = NOT_GIVEN, - tag_name: str | NotGiven = NOT_GIVEN, - type: Literal["application", "distribution"] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ImageListResponse: - """ - To list all of the images available on your account, send a GET request to - /v2/images. - - ## Filtering Results - - --- - - It's possible to request filtered results by including certain query parameters. - - **Image Type** - - Either 1-Click Application or OS Distribution images can be filtered by using - the `type` query parameter. - - > Important: The `type` query parameter does not directly relate to the `type` - > attribute. - - To retrieve only **_distribution_** images, include the `type` query parameter - set to distribution, `/v2/images?type=distribution`. - - To retrieve only **_application_** images, include the `type` query parameter - set to application, `/v2/images?type=application`. - - **User Images** - - To retrieve only the private images of a user, include the `private` query - parameter set to true, `/v2/images?private=true`. - - **Tags** - - To list all images assigned to a specific tag, include the `tag_name` query - parameter set to the name of the tag in your GET request. For example, - `/v2/images?tag_name=$TAG_NAME`. - - Args: - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - private: Used to filter only user images. - - tag_name: Used to filter images by a specific tag. - - type: Filters results based on image type which can be either `application` or - `distribution`. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/v2/images" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/images", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - "private": private, - "tag_name": tag_name, - "type": type, - }, - image_list_params.ImageListParams, - ), - ), - cast_to=ImageListResponse, - ) - - async def delete( - self, - image_id: int, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - To delete a snapshot or custom image, send a `DELETE` request to - `/v2/images/$IMAGE_ID`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return await self._delete( - f"/v2/images/{image_id}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/images/{image_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - -class ImagesResourceWithRawResponse: - def __init__(self, images: ImagesResource) -> None: - self._images = images - - self.create = to_raw_response_wrapper( - images.create, - ) - self.retrieve = to_raw_response_wrapper( - images.retrieve, - ) - self.update = to_raw_response_wrapper( - images.update, - ) - self.list = to_raw_response_wrapper( - images.list, - ) - self.delete = to_raw_response_wrapper( - images.delete, - ) - - @cached_property - def actions(self) -> ActionsResourceWithRawResponse: - return ActionsResourceWithRawResponse(self._images.actions) - - -class AsyncImagesResourceWithRawResponse: - def __init__(self, images: AsyncImagesResource) -> None: - self._images = images - - self.create = async_to_raw_response_wrapper( - images.create, - ) - self.retrieve = async_to_raw_response_wrapper( - images.retrieve, - ) - self.update = async_to_raw_response_wrapper( - images.update, - ) - self.list = async_to_raw_response_wrapper( - images.list, - ) - self.delete = async_to_raw_response_wrapper( - images.delete, - ) - - @cached_property - def actions(self) -> AsyncActionsResourceWithRawResponse: - return AsyncActionsResourceWithRawResponse(self._images.actions) - - -class ImagesResourceWithStreamingResponse: - def __init__(self, images: ImagesResource) -> None: - self._images = images - - self.create = to_streamed_response_wrapper( - images.create, - ) - self.retrieve = to_streamed_response_wrapper( - images.retrieve, - ) - self.update = to_streamed_response_wrapper( - images.update, - ) - self.list = to_streamed_response_wrapper( - images.list, - ) - self.delete = to_streamed_response_wrapper( - images.delete, - ) - - @cached_property - def actions(self) -> ActionsResourceWithStreamingResponse: - return ActionsResourceWithStreamingResponse(self._images.actions) - - -class AsyncImagesResourceWithStreamingResponse: - def __init__(self, images: AsyncImagesResource) -> None: - self._images = images - - self.create = async_to_streamed_response_wrapper( - images.create, - ) - self.retrieve = async_to_streamed_response_wrapper( - images.retrieve, - ) - self.update = async_to_streamed_response_wrapper( - images.update, - ) - self.list = async_to_streamed_response_wrapper( - images.list, - ) - self.delete = async_to_streamed_response_wrapper( - images.delete, - ) - - @cached_property - def actions(self) -> AsyncActionsResourceWithStreamingResponse: - return AsyncActionsResourceWithStreamingResponse(self._images.actions) diff --git a/src/gradientai/resources/inference/api_keys.py b/src/gradientai/resources/inference/api_keys.py index 238ef6f6..6759d09c 100644 --- a/src/gradientai/resources/inference/api_keys.py +++ b/src/gradientai/resources/inference/api_keys.py @@ -60,8 +60,6 @@ def create( To create a model API key, send a POST request to `/v2/gen-ai/models/api_keys`. Args: - name: A human friendly name to identify the key - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -99,10 +97,6 @@ def update( `/v2/gen-ai/models/api_keys/{api_key_uuid}`. Args: - body_api_key_uuid: API key ID - - name: Name - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -146,9 +140,9 @@ def list( To list all model API keys, send a GET request to `/v2/gen-ai/models/api_keys`. Args: - page: Page number. + page: page number. - per_page: Items per page. + per_page: items per page. extra_headers: Send extra headers @@ -286,8 +280,6 @@ async def create( To create a model API key, send a POST request to `/v2/gen-ai/models/api_keys`. Args: - name: A human friendly name to identify the key - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -325,10 +317,6 @@ async def update( `/v2/gen-ai/models/api_keys/{api_key_uuid}`. Args: - body_api_key_uuid: API key ID - - name: Name - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -372,9 +360,9 @@ async def list( To list all model API keys, send a GET request to `/v2/gen-ai/models/api_keys`. Args: - page: Page number. + page: page number. - per_page: Items per page. + per_page: items per page. extra_headers: Send extra headers diff --git a/src/gradientai/resources/knowledge_bases/data_sources.py b/src/gradientai/resources/knowledge_bases/data_sources.py index 8357dfda..e05696b9 100644 --- a/src/gradientai/resources/knowledge_bases/data_sources.py +++ b/src/gradientai/resources/knowledge_bases/data_sources.py @@ -69,14 +69,6 @@ def create( `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources`. Args: - aws_data_source: AWS S3 Data Source - - body_knowledge_base_uuid: Knowledge base id - - spaces_data_source: Spaces Bucket Data Source - - web_crawler_data_source: WebCrawlerDataSource - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -126,9 +118,9 @@ def list( `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources`. Args: - page: Page number. + page: page number. - per_page: Items per page. + per_page: items per page. extra_headers: Send extra headers @@ -244,14 +236,6 @@ async def create( `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources`. Args: - aws_data_source: AWS S3 Data Source - - body_knowledge_base_uuid: Knowledge base id - - spaces_data_source: Spaces Bucket Data Source - - web_crawler_data_source: WebCrawlerDataSource - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -301,9 +285,9 @@ async def list( `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources`. Args: - page: Page number. + page: page number. - per_page: Items per page. + per_page: items per page. extra_headers: Send extra headers diff --git a/src/gradientai/resources/knowledge_bases/indexing_jobs.py b/src/gradientai/resources/knowledge_bases/indexing_jobs.py index 891acd0b..39151e41 100644 --- a/src/gradientai/resources/knowledge_bases/indexing_jobs.py +++ b/src/gradientai/resources/knowledge_bases/indexing_jobs.py @@ -68,11 +68,6 @@ def create( `/v2/gen-ai/indexing_jobs`. Args: - data_source_uuids: List of data source ids to index, if none are provided, all data sources will be - indexed - - knowledge_base_uuid: Knowledge base id - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -151,9 +146,9 @@ def list( `/v2/gen-ai/indexing_jobs`. Args: - page: Page number. + page: page number. - per_page: Items per page. + per_page: items per page. extra_headers: Send extra headers @@ -299,11 +294,6 @@ async def create( `/v2/gen-ai/indexing_jobs`. Args: - data_source_uuids: List of data source ids to index, if none are provided, all data sources will be - indexed - - knowledge_base_uuid: Knowledge base id - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -382,9 +372,9 @@ async def list( `/v2/gen-ai/indexing_jobs`. Args: - page: Page number. + page: page number. - per_page: Items per page. + per_page: items per page. extra_headers: Send extra headers diff --git a/src/gradientai/resources/knowledge_bases/knowledge_bases.py b/src/gradientai/resources/knowledge_bases/knowledge_bases.py index c181295c..28acdd7f 100644 --- a/src/gradientai/resources/knowledge_bases/knowledge_bases.py +++ b/src/gradientai/resources/knowledge_bases/knowledge_bases.py @@ -112,8 +112,6 @@ def create( tags: Tags to organize your knowledge base. - vpc_uuid: The VPC to deploy the knowledge base database in - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -203,18 +201,12 @@ def update( `/v2/gen-ai/knowledge_bases/{uuid}`. Args: - database_id: The id of the DigitalOcean database this knowledge base will use, optiona. + database_id: the id of the DigitalOcean database this knowledge base will use, optiona. embedding_model_uuid: Identifier for the foundation model. - name: Knowledge base name - - project_id: The id of the DigitalOcean project this knowledge base will belong to - tags: Tags to organize your knowledge base. - body_uuid: Knowledge base id - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -262,9 +254,9 @@ def list( To list all knowledge bases, send a GET request to `/v2/gen-ai/knowledge_bases`. Args: - page: Page number. + page: page number. - per_page: Items per page. + per_page: items per page. extra_headers: Send extra headers @@ -400,8 +392,6 @@ async def create( tags: Tags to organize your knowledge base. - vpc_uuid: The VPC to deploy the knowledge base database in - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -491,18 +481,12 @@ async def update( `/v2/gen-ai/knowledge_bases/{uuid}`. Args: - database_id: The id of the DigitalOcean database this knowledge base will use, optiona. + database_id: the id of the DigitalOcean database this knowledge base will use, optiona. embedding_model_uuid: Identifier for the foundation model. - name: Knowledge base name - - project_id: The id of the DigitalOcean project this knowledge base will belong to - tags: Tags to organize your knowledge base. - body_uuid: Knowledge base id - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -550,9 +534,9 @@ async def list( To list all knowledge bases, send a GET request to `/v2/gen-ai/knowledge_bases`. Args: - page: Page number. + page: page number. - per_page: Items per page. + per_page: items per page. extra_headers: Send extra headers diff --git a/src/gradientai/resources/load_balancers/__init__.py b/src/gradientai/resources/load_balancers/__init__.py deleted file mode 100644 index 2cede1c8..00000000 --- a/src/gradientai/resources/load_balancers/__init__.py +++ /dev/null @@ -1,47 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .droplets import ( - DropletsResource, - AsyncDropletsResource, - DropletsResourceWithRawResponse, - AsyncDropletsResourceWithRawResponse, - DropletsResourceWithStreamingResponse, - AsyncDropletsResourceWithStreamingResponse, -) -from .load_balancers import ( - LoadBalancersResource, - AsyncLoadBalancersResource, - LoadBalancersResourceWithRawResponse, - AsyncLoadBalancersResourceWithRawResponse, - LoadBalancersResourceWithStreamingResponse, - AsyncLoadBalancersResourceWithStreamingResponse, -) -from .forwarding_rules import ( - ForwardingRulesResource, - AsyncForwardingRulesResource, - ForwardingRulesResourceWithRawResponse, - AsyncForwardingRulesResourceWithRawResponse, - ForwardingRulesResourceWithStreamingResponse, - AsyncForwardingRulesResourceWithStreamingResponse, -) - -__all__ = [ - "DropletsResource", - "AsyncDropletsResource", - "DropletsResourceWithRawResponse", - "AsyncDropletsResourceWithRawResponse", - "DropletsResourceWithStreamingResponse", - "AsyncDropletsResourceWithStreamingResponse", - "ForwardingRulesResource", - "AsyncForwardingRulesResource", - "ForwardingRulesResourceWithRawResponse", - "AsyncForwardingRulesResourceWithRawResponse", - "ForwardingRulesResourceWithStreamingResponse", - "AsyncForwardingRulesResourceWithStreamingResponse", - "LoadBalancersResource", - "AsyncLoadBalancersResource", - "LoadBalancersResourceWithRawResponse", - "AsyncLoadBalancersResourceWithRawResponse", - "LoadBalancersResourceWithStreamingResponse", - "AsyncLoadBalancersResourceWithStreamingResponse", -] diff --git a/src/gradientai/resources/load_balancers/droplets.py b/src/gradientai/resources/load_balancers/droplets.py deleted file mode 100644 index 4eb0ed60..00000000 --- a/src/gradientai/resources/load_balancers/droplets.py +++ /dev/null @@ -1,302 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Iterable - -import httpx - -from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven -from ..._utils import maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ..._base_client import make_request_options -from ...types.load_balancers import droplet_add_params, droplet_remove_params - -__all__ = ["DropletsResource", "AsyncDropletsResource"] - - -class DropletsResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> DropletsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return DropletsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> DropletsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return DropletsResourceWithStreamingResponse(self) - - def add( - self, - lb_id: str, - *, - droplet_ids: Iterable[int], - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - To assign a Droplet to a load balancer instance, send a POST request to - `/v2/load_balancers/$LOAD_BALANCER_ID/droplets`. In the body of the request, - there should be a `droplet_ids` attribute containing a list of Droplet IDs. - Individual Droplets can not be added to a load balancer configured with a - Droplet tag. Attempting to do so will result in a "422 Unprocessable Entity" - response from the API. - - No response body will be sent back, but the response code will indicate success. - Specifically, the response code will be a 204, which means that the action was - successful with no returned body data. - - Args: - droplet_ids: An array containing the IDs of the Droplets assigned to the load balancer. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not lb_id: - raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}") - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return self._post( - f"/v2/load_balancers/{lb_id}/droplets" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}/droplets", - body=maybe_transform({"droplet_ids": droplet_ids}, droplet_add_params.DropletAddParams), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - def remove( - self, - lb_id: str, - *, - droplet_ids: Iterable[int], - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - To remove a Droplet from a load balancer instance, send a DELETE request to - `/v2/load_balancers/$LOAD_BALANCER_ID/droplets`. In the body of the request, - there should be a `droplet_ids` attribute containing a list of Droplet IDs. - - No response body will be sent back, but the response code will indicate success. - Specifically, the response code will be a 204, which means that the action was - successful with no returned body data. - - Args: - droplet_ids: An array containing the IDs of the Droplets assigned to the load balancer. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not lb_id: - raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}") - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return self._delete( - f"/v2/load_balancers/{lb_id}/droplets" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}/droplets", - body=maybe_transform({"droplet_ids": droplet_ids}, droplet_remove_params.DropletRemoveParams), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - -class AsyncDropletsResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncDropletsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return AsyncDropletsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncDropletsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return AsyncDropletsResourceWithStreamingResponse(self) - - async def add( - self, - lb_id: str, - *, - droplet_ids: Iterable[int], - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - To assign a Droplet to a load balancer instance, send a POST request to - `/v2/load_balancers/$LOAD_BALANCER_ID/droplets`. In the body of the request, - there should be a `droplet_ids` attribute containing a list of Droplet IDs. - Individual Droplets can not be added to a load balancer configured with a - Droplet tag. Attempting to do so will result in a "422 Unprocessable Entity" - response from the API. - - No response body will be sent back, but the response code will indicate success. - Specifically, the response code will be a 204, which means that the action was - successful with no returned body data. - - Args: - droplet_ids: An array containing the IDs of the Droplets assigned to the load balancer. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not lb_id: - raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}") - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return await self._post( - f"/v2/load_balancers/{lb_id}/droplets" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}/droplets", - body=await async_maybe_transform({"droplet_ids": droplet_ids}, droplet_add_params.DropletAddParams), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - async def remove( - self, - lb_id: str, - *, - droplet_ids: Iterable[int], - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - To remove a Droplet from a load balancer instance, send a DELETE request to - `/v2/load_balancers/$LOAD_BALANCER_ID/droplets`. In the body of the request, - there should be a `droplet_ids` attribute containing a list of Droplet IDs. - - No response body will be sent back, but the response code will indicate success. - Specifically, the response code will be a 204, which means that the action was - successful with no returned body data. - - Args: - droplet_ids: An array containing the IDs of the Droplets assigned to the load balancer. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not lb_id: - raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}") - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return await self._delete( - f"/v2/load_balancers/{lb_id}/droplets" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}/droplets", - body=await async_maybe_transform({"droplet_ids": droplet_ids}, droplet_remove_params.DropletRemoveParams), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - -class DropletsResourceWithRawResponse: - def __init__(self, droplets: DropletsResource) -> None: - self._droplets = droplets - - self.add = to_raw_response_wrapper( - droplets.add, - ) - self.remove = to_raw_response_wrapper( - droplets.remove, - ) - - -class AsyncDropletsResourceWithRawResponse: - def __init__(self, droplets: AsyncDropletsResource) -> None: - self._droplets = droplets - - self.add = async_to_raw_response_wrapper( - droplets.add, - ) - self.remove = async_to_raw_response_wrapper( - droplets.remove, - ) - - -class DropletsResourceWithStreamingResponse: - def __init__(self, droplets: DropletsResource) -> None: - self._droplets = droplets - - self.add = to_streamed_response_wrapper( - droplets.add, - ) - self.remove = to_streamed_response_wrapper( - droplets.remove, - ) - - -class AsyncDropletsResourceWithStreamingResponse: - def __init__(self, droplets: AsyncDropletsResource) -> None: - self._droplets = droplets - - self.add = async_to_streamed_response_wrapper( - droplets.add, - ) - self.remove = async_to_streamed_response_wrapper( - droplets.remove, - ) diff --git a/src/gradientai/resources/load_balancers/forwarding_rules.py b/src/gradientai/resources/load_balancers/forwarding_rules.py deleted file mode 100644 index 6e9757c5..00000000 --- a/src/gradientai/resources/load_balancers/forwarding_rules.py +++ /dev/null @@ -1,301 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Iterable - -import httpx - -from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven -from ..._utils import maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ..._base_client import make_request_options -from ...types.load_balancers import forwarding_rule_add_params, forwarding_rule_remove_params -from ...types.forwarding_rule_param import ForwardingRuleParam - -__all__ = ["ForwardingRulesResource", "AsyncForwardingRulesResource"] - - -class ForwardingRulesResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> ForwardingRulesResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return ForwardingRulesResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> ForwardingRulesResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return ForwardingRulesResourceWithStreamingResponse(self) - - def add( - self, - lb_id: str, - *, - forwarding_rules: Iterable[ForwardingRuleParam], - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - To add an additional forwarding rule to a load balancer instance, send a POST - request to `/v2/load_balancers/$LOAD_BALANCER_ID/forwarding_rules`. In the body - of the request, there should be a `forwarding_rules` attribute containing an - array of rules to be added. - - No response body will be sent back, but the response code will indicate success. - Specifically, the response code will be a 204, which means that the action was - successful with no returned body data. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not lb_id: - raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}") - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return self._post( - f"/v2/load_balancers/{lb_id}/forwarding_rules" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}/forwarding_rules", - body=maybe_transform( - {"forwarding_rules": forwarding_rules}, forwarding_rule_add_params.ForwardingRuleAddParams - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - def remove( - self, - lb_id: str, - *, - forwarding_rules: Iterable[ForwardingRuleParam], - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - To remove forwarding rules from a load balancer instance, send a DELETE request - to `/v2/load_balancers/$LOAD_BALANCER_ID/forwarding_rules`. In the body of the - request, there should be a `forwarding_rules` attribute containing an array of - rules to be removed. - - No response body will be sent back, but the response code will indicate success. - Specifically, the response code will be a 204, which means that the action was - successful with no returned body data. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not lb_id: - raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}") - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return self._delete( - f"/v2/load_balancers/{lb_id}/forwarding_rules" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}/forwarding_rules", - body=maybe_transform( - {"forwarding_rules": forwarding_rules}, forwarding_rule_remove_params.ForwardingRuleRemoveParams - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - -class AsyncForwardingRulesResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncForwardingRulesResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return AsyncForwardingRulesResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncForwardingRulesResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return AsyncForwardingRulesResourceWithStreamingResponse(self) - - async def add( - self, - lb_id: str, - *, - forwarding_rules: Iterable[ForwardingRuleParam], - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - To add an additional forwarding rule to a load balancer instance, send a POST - request to `/v2/load_balancers/$LOAD_BALANCER_ID/forwarding_rules`. In the body - of the request, there should be a `forwarding_rules` attribute containing an - array of rules to be added. - - No response body will be sent back, but the response code will indicate success. - Specifically, the response code will be a 204, which means that the action was - successful with no returned body data. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not lb_id: - raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}") - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return await self._post( - f"/v2/load_balancers/{lb_id}/forwarding_rules" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}/forwarding_rules", - body=await async_maybe_transform( - {"forwarding_rules": forwarding_rules}, forwarding_rule_add_params.ForwardingRuleAddParams - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - async def remove( - self, - lb_id: str, - *, - forwarding_rules: Iterable[ForwardingRuleParam], - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - To remove forwarding rules from a load balancer instance, send a DELETE request - to `/v2/load_balancers/$LOAD_BALANCER_ID/forwarding_rules`. In the body of the - request, there should be a `forwarding_rules` attribute containing an array of - rules to be removed. - - No response body will be sent back, but the response code will indicate success. - Specifically, the response code will be a 204, which means that the action was - successful with no returned body data. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not lb_id: - raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}") - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return await self._delete( - f"/v2/load_balancers/{lb_id}/forwarding_rules" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}/forwarding_rules", - body=await async_maybe_transform( - {"forwarding_rules": forwarding_rules}, forwarding_rule_remove_params.ForwardingRuleRemoveParams - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - -class ForwardingRulesResourceWithRawResponse: - def __init__(self, forwarding_rules: ForwardingRulesResource) -> None: - self._forwarding_rules = forwarding_rules - - self.add = to_raw_response_wrapper( - forwarding_rules.add, - ) - self.remove = to_raw_response_wrapper( - forwarding_rules.remove, - ) - - -class AsyncForwardingRulesResourceWithRawResponse: - def __init__(self, forwarding_rules: AsyncForwardingRulesResource) -> None: - self._forwarding_rules = forwarding_rules - - self.add = async_to_raw_response_wrapper( - forwarding_rules.add, - ) - self.remove = async_to_raw_response_wrapper( - forwarding_rules.remove, - ) - - -class ForwardingRulesResourceWithStreamingResponse: - def __init__(self, forwarding_rules: ForwardingRulesResource) -> None: - self._forwarding_rules = forwarding_rules - - self.add = to_streamed_response_wrapper( - forwarding_rules.add, - ) - self.remove = to_streamed_response_wrapper( - forwarding_rules.remove, - ) - - -class AsyncForwardingRulesResourceWithStreamingResponse: - def __init__(self, forwarding_rules: AsyncForwardingRulesResource) -> None: - self._forwarding_rules = forwarding_rules - - self.add = async_to_streamed_response_wrapper( - forwarding_rules.add, - ) - self.remove = async_to_streamed_response_wrapper( - forwarding_rules.remove, - ) diff --git a/src/gradientai/resources/load_balancers/load_balancers.py b/src/gradientai/resources/load_balancers/load_balancers.py deleted file mode 100644 index 12b9097c..00000000 --- a/src/gradientai/resources/load_balancers/load_balancers.py +++ /dev/null @@ -1,2205 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List, Iterable -from typing_extensions import Literal, overload - -import httpx - -from ...types import ( - load_balancer_list_params, - load_balancer_create_params, - load_balancer_update_params, -) -from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven -from ..._utils import required_args, maybe_transform, async_maybe_transform -from .droplets import ( - DropletsResource, - AsyncDropletsResource, - DropletsResourceWithRawResponse, - AsyncDropletsResourceWithRawResponse, - DropletsResourceWithStreamingResponse, - AsyncDropletsResourceWithStreamingResponse, -) -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ..._base_client import make_request_options -from .forwarding_rules import ( - ForwardingRulesResource, - AsyncForwardingRulesResource, - ForwardingRulesResourceWithRawResponse, - AsyncForwardingRulesResourceWithRawResponse, - ForwardingRulesResourceWithStreamingResponse, - AsyncForwardingRulesResourceWithStreamingResponse, -) -from ...types.domains_param import DomainsParam -from ...types.lb_firewall_param import LbFirewallParam -from ...types.glb_settings_param import GlbSettingsParam -from ...types.health_check_param import HealthCheckParam -from ...types.forwarding_rule_param import ForwardingRuleParam -from ...types.sticky_sessions_param import StickySessionsParam -from ...types.load_balancer_list_response import LoadBalancerListResponse -from ...types.load_balancer_create_response import LoadBalancerCreateResponse -from ...types.load_balancer_update_response import LoadBalancerUpdateResponse -from ...types.load_balancer_retrieve_response import LoadBalancerRetrieveResponse - -__all__ = ["LoadBalancersResource", "AsyncLoadBalancersResource"] - - -class LoadBalancersResource(SyncAPIResource): - @cached_property - def droplets(self) -> DropletsResource: - return DropletsResource(self._client) - - @cached_property - def forwarding_rules(self) -> ForwardingRulesResource: - return ForwardingRulesResource(self._client) - - @cached_property - def with_raw_response(self) -> LoadBalancersResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return LoadBalancersResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> LoadBalancersResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return LoadBalancersResourceWithStreamingResponse(self) - - @overload - def create( - self, - *, - forwarding_rules: Iterable[ForwardingRuleParam], - algorithm: Literal["round_robin", "least_connections"] | NotGiven = NOT_GIVEN, - disable_lets_encrypt_dns_records: bool | NotGiven = NOT_GIVEN, - domains: Iterable[DomainsParam] | NotGiven = NOT_GIVEN, - droplet_ids: Iterable[int] | NotGiven = NOT_GIVEN, - enable_backend_keepalive: bool | NotGiven = NOT_GIVEN, - enable_proxy_protocol: bool | NotGiven = NOT_GIVEN, - firewall: LbFirewallParam | NotGiven = NOT_GIVEN, - glb_settings: GlbSettingsParam | NotGiven = NOT_GIVEN, - health_check: HealthCheckParam | NotGiven = NOT_GIVEN, - http_idle_timeout_seconds: int | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - network: Literal["EXTERNAL", "INTERNAL"] | NotGiven = NOT_GIVEN, - network_stack: Literal["IPV4", "DUALSTACK"] | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - redirect_http_to_https: bool | NotGiven = NOT_GIVEN, - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ] - | NotGiven = NOT_GIVEN, - size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, - size_unit: int | NotGiven = NOT_GIVEN, - sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, - target_load_balancer_ids: List[str] | NotGiven = NOT_GIVEN, - tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, - type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, - vpc_uuid: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> LoadBalancerCreateResponse: - """ - To create a new load balancer instance, send a POST request to - `/v2/load_balancers`. - - You can specify the Droplets that will sit behind the load balancer using one of - two methods: - - - Set `droplet_ids` to a list of specific Droplet IDs. - - Set `tag` to the name of a tag. All Droplets with this tag applied will be - assigned to the load balancer. Additional Droplets will be automatically - assigned as they are tagged. - - These methods are mutually exclusive. - - Args: - forwarding_rules: An array of objects specifying the forwarding rules for a load balancer. - - algorithm: This field has been deprecated. You can no longer specify an algorithm for load - balancers. - - disable_lets_encrypt_dns_records: A boolean value indicating whether to disable automatic DNS record creation for - Let's Encrypt certificates that are added to the load balancer. - - domains: An array of objects specifying the domain configurations for a Global load - balancer. - - droplet_ids: An array containing the IDs of the Droplets assigned to the load balancer. - - enable_backend_keepalive: A boolean value indicating whether HTTP keepalive connections are maintained to - target Droplets. - - enable_proxy_protocol: A boolean value indicating whether PROXY Protocol is in use. - - firewall: An object specifying allow and deny rules to control traffic to the load - balancer. - - glb_settings: An object specifying forwarding configurations for a Global load balancer. - - health_check: An object specifying health check settings for the load balancer. - - http_idle_timeout_seconds: An integer value which configures the idle timeout for HTTP requests to the - target droplets. - - name: A human-readable name for a load balancer instance. - - network: A string indicating whether the load balancer should be external or internal. - Internal load balancers have no public IPs and are only accessible to resources - on the same VPC network. This property cannot be updated after creating the load - balancer. - - network_stack: A string indicating whether the load balancer will support IPv4 or both IPv4 and - IPv6 networking. This property cannot be updated after creating the load - balancer. - - project_id: The ID of the project that the load balancer is associated with. If no ID is - provided at creation, the load balancer associates with the user's default - project. If an invalid project ID is provided, the load balancer will not be - created. - - redirect_http_to_https: A boolean value indicating whether HTTP requests to the load balancer on port 80 - will be redirected to HTTPS on port 443. - - region: The slug identifier for the region where the resource will initially be - available. - - size: This field has been replaced by the `size_unit` field for all regions except in - AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load - balancer having a set number of nodes. - - - `lb-small` = 1 node - - `lb-medium` = 3 nodes - - `lb-large` = 6 nodes - - You can resize load balancers after creation up to once per hour. You cannot - resize a load balancer within the first hour of its creation. - - size_unit: How many nodes the load balancer contains. Each additional node increases the - load balancer's ability to manage more connections. Load balancers can be scaled - up or down, and you can change the number of nodes after creation up to once per - hour. This field is currently not available in the AMS2, NYC2, or SFO1 regions. - Use the `size` field to scale load balancers that reside in these regions. - - sticky_sessions: An object specifying sticky sessions settings for the load balancer. - - target_load_balancer_ids: An array containing the UUIDs of the Regional load balancers to be used as - target backends for a Global load balancer. - - tls_cipher_policy: A string indicating the policy for the TLS cipher suites used by the load - balancer. The possible values are `DEFAULT` or `STRONG`. The default value is - `DEFAULT`. - - type: A string indicating whether the load balancer should be a standard regional HTTP - load balancer, a regional network load balancer that routes traffic at the - TCP/UDP transport layer, or a global load balancer. - - vpc_uuid: A string specifying the UUID of the VPC to which the load balancer is assigned. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @overload - def create( - self, - *, - forwarding_rules: Iterable[ForwardingRuleParam], - algorithm: Literal["round_robin", "least_connections"] | NotGiven = NOT_GIVEN, - disable_lets_encrypt_dns_records: bool | NotGiven = NOT_GIVEN, - domains: Iterable[DomainsParam] | NotGiven = NOT_GIVEN, - enable_backend_keepalive: bool | NotGiven = NOT_GIVEN, - enable_proxy_protocol: bool | NotGiven = NOT_GIVEN, - firewall: LbFirewallParam | NotGiven = NOT_GIVEN, - glb_settings: GlbSettingsParam | NotGiven = NOT_GIVEN, - health_check: HealthCheckParam | NotGiven = NOT_GIVEN, - http_idle_timeout_seconds: int | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - network: Literal["EXTERNAL", "INTERNAL"] | NotGiven = NOT_GIVEN, - network_stack: Literal["IPV4", "DUALSTACK"] | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - redirect_http_to_https: bool | NotGiven = NOT_GIVEN, - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ] - | NotGiven = NOT_GIVEN, - size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, - size_unit: int | NotGiven = NOT_GIVEN, - sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, - tag: str | NotGiven = NOT_GIVEN, - target_load_balancer_ids: List[str] | NotGiven = NOT_GIVEN, - tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, - type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, - vpc_uuid: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> LoadBalancerCreateResponse: - """ - To create a new load balancer instance, send a POST request to - `/v2/load_balancers`. - - You can specify the Droplets that will sit behind the load balancer using one of - two methods: - - - Set `droplet_ids` to a list of specific Droplet IDs. - - Set `tag` to the name of a tag. All Droplets with this tag applied will be - assigned to the load balancer. Additional Droplets will be automatically - assigned as they are tagged. - - These methods are mutually exclusive. - - Args: - forwarding_rules: An array of objects specifying the forwarding rules for a load balancer. - - algorithm: This field has been deprecated. You can no longer specify an algorithm for load - balancers. - - disable_lets_encrypt_dns_records: A boolean value indicating whether to disable automatic DNS record creation for - Let's Encrypt certificates that are added to the load balancer. - - domains: An array of objects specifying the domain configurations for a Global load - balancer. - - enable_backend_keepalive: A boolean value indicating whether HTTP keepalive connections are maintained to - target Droplets. - - enable_proxy_protocol: A boolean value indicating whether PROXY Protocol is in use. - - firewall: An object specifying allow and deny rules to control traffic to the load - balancer. - - glb_settings: An object specifying forwarding configurations for a Global load balancer. - - health_check: An object specifying health check settings for the load balancer. - - http_idle_timeout_seconds: An integer value which configures the idle timeout for HTTP requests to the - target droplets. - - name: A human-readable name for a load balancer instance. - - network: A string indicating whether the load balancer should be external or internal. - Internal load balancers have no public IPs and are only accessible to resources - on the same VPC network. This property cannot be updated after creating the load - balancer. - - network_stack: A string indicating whether the load balancer will support IPv4 or both IPv4 and - IPv6 networking. This property cannot be updated after creating the load - balancer. - - project_id: The ID of the project that the load balancer is associated with. If no ID is - provided at creation, the load balancer associates with the user's default - project. If an invalid project ID is provided, the load balancer will not be - created. - - redirect_http_to_https: A boolean value indicating whether HTTP requests to the load balancer on port 80 - will be redirected to HTTPS on port 443. - - region: The slug identifier for the region where the resource will initially be - available. - - size: This field has been replaced by the `size_unit` field for all regions except in - AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load - balancer having a set number of nodes. - - - `lb-small` = 1 node - - `lb-medium` = 3 nodes - - `lb-large` = 6 nodes - - You can resize load balancers after creation up to once per hour. You cannot - resize a load balancer within the first hour of its creation. - - size_unit: How many nodes the load balancer contains. Each additional node increases the - load balancer's ability to manage more connections. Load balancers can be scaled - up or down, and you can change the number of nodes after creation up to once per - hour. This field is currently not available in the AMS2, NYC2, or SFO1 regions. - Use the `size` field to scale load balancers that reside in these regions. - - sticky_sessions: An object specifying sticky sessions settings for the load balancer. - - tag: The name of a Droplet tag corresponding to Droplets assigned to the load - balancer. - - target_load_balancer_ids: An array containing the UUIDs of the Regional load balancers to be used as - target backends for a Global load balancer. - - tls_cipher_policy: A string indicating the policy for the TLS cipher suites used by the load - balancer. The possible values are `DEFAULT` or `STRONG`. The default value is - `DEFAULT`. - - type: A string indicating whether the load balancer should be a standard regional HTTP - load balancer, a regional network load balancer that routes traffic at the - TCP/UDP transport layer, or a global load balancer. - - vpc_uuid: A string specifying the UUID of the VPC to which the load balancer is assigned. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @required_args(["forwarding_rules"]) - def create( - self, - *, - forwarding_rules: Iterable[ForwardingRuleParam], - algorithm: Literal["round_robin", "least_connections"] | NotGiven = NOT_GIVEN, - disable_lets_encrypt_dns_records: bool | NotGiven = NOT_GIVEN, - domains: Iterable[DomainsParam] | NotGiven = NOT_GIVEN, - droplet_ids: Iterable[int] | NotGiven = NOT_GIVEN, - enable_backend_keepalive: bool | NotGiven = NOT_GIVEN, - enable_proxy_protocol: bool | NotGiven = NOT_GIVEN, - firewall: LbFirewallParam | NotGiven = NOT_GIVEN, - glb_settings: GlbSettingsParam | NotGiven = NOT_GIVEN, - health_check: HealthCheckParam | NotGiven = NOT_GIVEN, - http_idle_timeout_seconds: int | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - network: Literal["EXTERNAL", "INTERNAL"] | NotGiven = NOT_GIVEN, - network_stack: Literal["IPV4", "DUALSTACK"] | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - redirect_http_to_https: bool | NotGiven = NOT_GIVEN, - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ] - | NotGiven = NOT_GIVEN, - size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, - size_unit: int | NotGiven = NOT_GIVEN, - sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, - target_load_balancer_ids: List[str] | NotGiven = NOT_GIVEN, - tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, - type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, - vpc_uuid: str | NotGiven = NOT_GIVEN, - tag: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> LoadBalancerCreateResponse: - return self._post( - "/v2/load_balancers" - if self._client._base_url_overridden - else "https://api.digitalocean.com/v2/load_balancers", - body=maybe_transform( - { - "forwarding_rules": forwarding_rules, - "algorithm": algorithm, - "disable_lets_encrypt_dns_records": disable_lets_encrypt_dns_records, - "domains": domains, - "droplet_ids": droplet_ids, - "enable_backend_keepalive": enable_backend_keepalive, - "enable_proxy_protocol": enable_proxy_protocol, - "firewall": firewall, - "glb_settings": glb_settings, - "health_check": health_check, - "http_idle_timeout_seconds": http_idle_timeout_seconds, - "name": name, - "network": network, - "network_stack": network_stack, - "project_id": project_id, - "redirect_http_to_https": redirect_http_to_https, - "region": region, - "size": size, - "size_unit": size_unit, - "sticky_sessions": sticky_sessions, - "target_load_balancer_ids": target_load_balancer_ids, - "tls_cipher_policy": tls_cipher_policy, - "type": type, - "vpc_uuid": vpc_uuid, - "tag": tag, - }, - load_balancer_create_params.LoadBalancerCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=LoadBalancerCreateResponse, - ) - - def retrieve( - self, - lb_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> LoadBalancerRetrieveResponse: - """ - To show information about a load balancer instance, send a GET request to - `/v2/load_balancers/$LOAD_BALANCER_ID`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not lb_id: - raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}") - return self._get( - f"/v2/load_balancers/{lb_id}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=LoadBalancerRetrieveResponse, - ) - - @overload - def update( - self, - lb_id: str, - *, - forwarding_rules: Iterable[ForwardingRuleParam], - algorithm: Literal["round_robin", "least_connections"] | NotGiven = NOT_GIVEN, - disable_lets_encrypt_dns_records: bool | NotGiven = NOT_GIVEN, - domains: Iterable[DomainsParam] | NotGiven = NOT_GIVEN, - droplet_ids: Iterable[int] | NotGiven = NOT_GIVEN, - enable_backend_keepalive: bool | NotGiven = NOT_GIVEN, - enable_proxy_protocol: bool | NotGiven = NOT_GIVEN, - firewall: LbFirewallParam | NotGiven = NOT_GIVEN, - glb_settings: GlbSettingsParam | NotGiven = NOT_GIVEN, - health_check: HealthCheckParam | NotGiven = NOT_GIVEN, - http_idle_timeout_seconds: int | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - network: Literal["EXTERNAL", "INTERNAL"] | NotGiven = NOT_GIVEN, - network_stack: Literal["IPV4", "DUALSTACK"] | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - redirect_http_to_https: bool | NotGiven = NOT_GIVEN, - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ] - | NotGiven = NOT_GIVEN, - size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, - size_unit: int | NotGiven = NOT_GIVEN, - sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, - target_load_balancer_ids: List[str] | NotGiven = NOT_GIVEN, - tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, - type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, - vpc_uuid: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> LoadBalancerUpdateResponse: - """ - To update a load balancer's settings, send a PUT request to - `/v2/load_balancers/$LOAD_BALANCER_ID`. The request should contain a full - representation of the load balancer including existing attributes. It may - contain _one of_ the `droplets_ids` or `tag` attributes as they are mutually - exclusive. **Note that any attribute that is not provided will be reset to its - default value.** - - Args: - forwarding_rules: An array of objects specifying the forwarding rules for a load balancer. - - algorithm: This field has been deprecated. You can no longer specify an algorithm for load - balancers. - - disable_lets_encrypt_dns_records: A boolean value indicating whether to disable automatic DNS record creation for - Let's Encrypt certificates that are added to the load balancer. - - domains: An array of objects specifying the domain configurations for a Global load - balancer. - - droplet_ids: An array containing the IDs of the Droplets assigned to the load balancer. - - enable_backend_keepalive: A boolean value indicating whether HTTP keepalive connections are maintained to - target Droplets. - - enable_proxy_protocol: A boolean value indicating whether PROXY Protocol is in use. - - firewall: An object specifying allow and deny rules to control traffic to the load - balancer. - - glb_settings: An object specifying forwarding configurations for a Global load balancer. - - health_check: An object specifying health check settings for the load balancer. - - http_idle_timeout_seconds: An integer value which configures the idle timeout for HTTP requests to the - target droplets. - - name: A human-readable name for a load balancer instance. - - network: A string indicating whether the load balancer should be external or internal. - Internal load balancers have no public IPs and are only accessible to resources - on the same VPC network. This property cannot be updated after creating the load - balancer. - - network_stack: A string indicating whether the load balancer will support IPv4 or both IPv4 and - IPv6 networking. This property cannot be updated after creating the load - balancer. - - project_id: The ID of the project that the load balancer is associated with. If no ID is - provided at creation, the load balancer associates with the user's default - project. If an invalid project ID is provided, the load balancer will not be - created. - - redirect_http_to_https: A boolean value indicating whether HTTP requests to the load balancer on port 80 - will be redirected to HTTPS on port 443. - - region: The slug identifier for the region where the resource will initially be - available. - - size: This field has been replaced by the `size_unit` field for all regions except in - AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load - balancer having a set number of nodes. - - - `lb-small` = 1 node - - `lb-medium` = 3 nodes - - `lb-large` = 6 nodes - - You can resize load balancers after creation up to once per hour. You cannot - resize a load balancer within the first hour of its creation. - - size_unit: How many nodes the load balancer contains. Each additional node increases the - load balancer's ability to manage more connections. Load balancers can be scaled - up or down, and you can change the number of nodes after creation up to once per - hour. This field is currently not available in the AMS2, NYC2, or SFO1 regions. - Use the `size` field to scale load balancers that reside in these regions. - - sticky_sessions: An object specifying sticky sessions settings for the load balancer. - - target_load_balancer_ids: An array containing the UUIDs of the Regional load balancers to be used as - target backends for a Global load balancer. - - tls_cipher_policy: A string indicating the policy for the TLS cipher suites used by the load - balancer. The possible values are `DEFAULT` or `STRONG`. The default value is - `DEFAULT`. - - type: A string indicating whether the load balancer should be a standard regional HTTP - load balancer, a regional network load balancer that routes traffic at the - TCP/UDP transport layer, or a global load balancer. - - vpc_uuid: A string specifying the UUID of the VPC to which the load balancer is assigned. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @overload - def update( - self, - lb_id: str, - *, - forwarding_rules: Iterable[ForwardingRuleParam], - algorithm: Literal["round_robin", "least_connections"] | NotGiven = NOT_GIVEN, - disable_lets_encrypt_dns_records: bool | NotGiven = NOT_GIVEN, - domains: Iterable[DomainsParam] | NotGiven = NOT_GIVEN, - enable_backend_keepalive: bool | NotGiven = NOT_GIVEN, - enable_proxy_protocol: bool | NotGiven = NOT_GIVEN, - firewall: LbFirewallParam | NotGiven = NOT_GIVEN, - glb_settings: GlbSettingsParam | NotGiven = NOT_GIVEN, - health_check: HealthCheckParam | NotGiven = NOT_GIVEN, - http_idle_timeout_seconds: int | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - network: Literal["EXTERNAL", "INTERNAL"] | NotGiven = NOT_GIVEN, - network_stack: Literal["IPV4", "DUALSTACK"] | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - redirect_http_to_https: bool | NotGiven = NOT_GIVEN, - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ] - | NotGiven = NOT_GIVEN, - size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, - size_unit: int | NotGiven = NOT_GIVEN, - sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, - tag: str | NotGiven = NOT_GIVEN, - target_load_balancer_ids: List[str] | NotGiven = NOT_GIVEN, - tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, - type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, - vpc_uuid: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> LoadBalancerUpdateResponse: - """ - To update a load balancer's settings, send a PUT request to - `/v2/load_balancers/$LOAD_BALANCER_ID`. The request should contain a full - representation of the load balancer including existing attributes. It may - contain _one of_ the `droplets_ids` or `tag` attributes as they are mutually - exclusive. **Note that any attribute that is not provided will be reset to its - default value.** - - Args: - forwarding_rules: An array of objects specifying the forwarding rules for a load balancer. - - algorithm: This field has been deprecated. You can no longer specify an algorithm for load - balancers. - - disable_lets_encrypt_dns_records: A boolean value indicating whether to disable automatic DNS record creation for - Let's Encrypt certificates that are added to the load balancer. - - domains: An array of objects specifying the domain configurations for a Global load - balancer. - - enable_backend_keepalive: A boolean value indicating whether HTTP keepalive connections are maintained to - target Droplets. - - enable_proxy_protocol: A boolean value indicating whether PROXY Protocol is in use. - - firewall: An object specifying allow and deny rules to control traffic to the load - balancer. - - glb_settings: An object specifying forwarding configurations for a Global load balancer. - - health_check: An object specifying health check settings for the load balancer. - - http_idle_timeout_seconds: An integer value which configures the idle timeout for HTTP requests to the - target droplets. - - name: A human-readable name for a load balancer instance. - - network: A string indicating whether the load balancer should be external or internal. - Internal load balancers have no public IPs and are only accessible to resources - on the same VPC network. This property cannot be updated after creating the load - balancer. - - network_stack: A string indicating whether the load balancer will support IPv4 or both IPv4 and - IPv6 networking. This property cannot be updated after creating the load - balancer. - - project_id: The ID of the project that the load balancer is associated with. If no ID is - provided at creation, the load balancer associates with the user's default - project. If an invalid project ID is provided, the load balancer will not be - created. - - redirect_http_to_https: A boolean value indicating whether HTTP requests to the load balancer on port 80 - will be redirected to HTTPS on port 443. - - region: The slug identifier for the region where the resource will initially be - available. - - size: This field has been replaced by the `size_unit` field for all regions except in - AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load - balancer having a set number of nodes. - - - `lb-small` = 1 node - - `lb-medium` = 3 nodes - - `lb-large` = 6 nodes - - You can resize load balancers after creation up to once per hour. You cannot - resize a load balancer within the first hour of its creation. - - size_unit: How many nodes the load balancer contains. Each additional node increases the - load balancer's ability to manage more connections. Load balancers can be scaled - up or down, and you can change the number of nodes after creation up to once per - hour. This field is currently not available in the AMS2, NYC2, or SFO1 regions. - Use the `size` field to scale load balancers that reside in these regions. - - sticky_sessions: An object specifying sticky sessions settings for the load balancer. - - tag: The name of a Droplet tag corresponding to Droplets assigned to the load - balancer. - - target_load_balancer_ids: An array containing the UUIDs of the Regional load balancers to be used as - target backends for a Global load balancer. - - tls_cipher_policy: A string indicating the policy for the TLS cipher suites used by the load - balancer. The possible values are `DEFAULT` or `STRONG`. The default value is - `DEFAULT`. - - type: A string indicating whether the load balancer should be a standard regional HTTP - load balancer, a regional network load balancer that routes traffic at the - TCP/UDP transport layer, or a global load balancer. - - vpc_uuid: A string specifying the UUID of the VPC to which the load balancer is assigned. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @required_args(["forwarding_rules"]) - def update( - self, - lb_id: str, - *, - forwarding_rules: Iterable[ForwardingRuleParam], - algorithm: Literal["round_robin", "least_connections"] | NotGiven = NOT_GIVEN, - disable_lets_encrypt_dns_records: bool | NotGiven = NOT_GIVEN, - domains: Iterable[DomainsParam] | NotGiven = NOT_GIVEN, - droplet_ids: Iterable[int] | NotGiven = NOT_GIVEN, - enable_backend_keepalive: bool | NotGiven = NOT_GIVEN, - enable_proxy_protocol: bool | NotGiven = NOT_GIVEN, - firewall: LbFirewallParam | NotGiven = NOT_GIVEN, - glb_settings: GlbSettingsParam | NotGiven = NOT_GIVEN, - health_check: HealthCheckParam | NotGiven = NOT_GIVEN, - http_idle_timeout_seconds: int | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - network: Literal["EXTERNAL", "INTERNAL"] | NotGiven = NOT_GIVEN, - network_stack: Literal["IPV4", "DUALSTACK"] | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - redirect_http_to_https: bool | NotGiven = NOT_GIVEN, - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ] - | NotGiven = NOT_GIVEN, - size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, - size_unit: int | NotGiven = NOT_GIVEN, - sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, - target_load_balancer_ids: List[str] | NotGiven = NOT_GIVEN, - tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, - type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, - vpc_uuid: str | NotGiven = NOT_GIVEN, - tag: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> LoadBalancerUpdateResponse: - if not lb_id: - raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}") - return self._put( - f"/v2/load_balancers/{lb_id}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}", - body=maybe_transform( - { - "forwarding_rules": forwarding_rules, - "algorithm": algorithm, - "disable_lets_encrypt_dns_records": disable_lets_encrypt_dns_records, - "domains": domains, - "droplet_ids": droplet_ids, - "enable_backend_keepalive": enable_backend_keepalive, - "enable_proxy_protocol": enable_proxy_protocol, - "firewall": firewall, - "glb_settings": glb_settings, - "health_check": health_check, - "http_idle_timeout_seconds": http_idle_timeout_seconds, - "name": name, - "network": network, - "network_stack": network_stack, - "project_id": project_id, - "redirect_http_to_https": redirect_http_to_https, - "region": region, - "size": size, - "size_unit": size_unit, - "sticky_sessions": sticky_sessions, - "target_load_balancer_ids": target_load_balancer_ids, - "tls_cipher_policy": tls_cipher_policy, - "type": type, - "vpc_uuid": vpc_uuid, - "tag": tag, - }, - load_balancer_update_params.LoadBalancerUpdateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=LoadBalancerUpdateResponse, - ) - - def list( - self, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> LoadBalancerListResponse: - """ - To list all of the load balancer instances on your account, send a GET request - to `/v2/load_balancers`. - - Args: - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/v2/load_balancers" - if self._client._base_url_overridden - else "https://api.digitalocean.com/v2/load_balancers", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - }, - load_balancer_list_params.LoadBalancerListParams, - ), - ), - cast_to=LoadBalancerListResponse, - ) - - def delete( - self, - lb_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - To delete a load balancer instance, disassociating any Droplets assigned to it - and removing it from your account, send a DELETE request to - `/v2/load_balancers/$LOAD_BALANCER_ID`. - - A successful request will receive a 204 status code with no body in response. - This indicates that the request was processed successfully. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not lb_id: - raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}") - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return self._delete( - f"/v2/load_balancers/{lb_id}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - def delete_cache( - self, - lb_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - To delete a Global load balancer CDN cache, send a DELETE request to - `/v2/load_balancers/$LOAD_BALANCER_ID/cache`. - - A successful request will receive a 204 status code with no body in response. - This indicates that the request was processed successfully. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not lb_id: - raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}") - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return self._delete( - f"/v2/load_balancers/{lb_id}/cache" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}/cache", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - -class AsyncLoadBalancersResource(AsyncAPIResource): - @cached_property - def droplets(self) -> AsyncDropletsResource: - return AsyncDropletsResource(self._client) - - @cached_property - def forwarding_rules(self) -> AsyncForwardingRulesResource: - return AsyncForwardingRulesResource(self._client) - - @cached_property - def with_raw_response(self) -> AsyncLoadBalancersResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return AsyncLoadBalancersResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncLoadBalancersResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return AsyncLoadBalancersResourceWithStreamingResponse(self) - - @overload - async def create( - self, - *, - forwarding_rules: Iterable[ForwardingRuleParam], - algorithm: Literal["round_robin", "least_connections"] | NotGiven = NOT_GIVEN, - disable_lets_encrypt_dns_records: bool | NotGiven = NOT_GIVEN, - domains: Iterable[DomainsParam] | NotGiven = NOT_GIVEN, - droplet_ids: Iterable[int] | NotGiven = NOT_GIVEN, - enable_backend_keepalive: bool | NotGiven = NOT_GIVEN, - enable_proxy_protocol: bool | NotGiven = NOT_GIVEN, - firewall: LbFirewallParam | NotGiven = NOT_GIVEN, - glb_settings: GlbSettingsParam | NotGiven = NOT_GIVEN, - health_check: HealthCheckParam | NotGiven = NOT_GIVEN, - http_idle_timeout_seconds: int | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - network: Literal["EXTERNAL", "INTERNAL"] | NotGiven = NOT_GIVEN, - network_stack: Literal["IPV4", "DUALSTACK"] | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - redirect_http_to_https: bool | NotGiven = NOT_GIVEN, - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ] - | NotGiven = NOT_GIVEN, - size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, - size_unit: int | NotGiven = NOT_GIVEN, - sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, - target_load_balancer_ids: List[str] | NotGiven = NOT_GIVEN, - tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, - type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, - vpc_uuid: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> LoadBalancerCreateResponse: - """ - To create a new load balancer instance, send a POST request to - `/v2/load_balancers`. - - You can specify the Droplets that will sit behind the load balancer using one of - two methods: - - - Set `droplet_ids` to a list of specific Droplet IDs. - - Set `tag` to the name of a tag. All Droplets with this tag applied will be - assigned to the load balancer. Additional Droplets will be automatically - assigned as they are tagged. - - These methods are mutually exclusive. - - Args: - forwarding_rules: An array of objects specifying the forwarding rules for a load balancer. - - algorithm: This field has been deprecated. You can no longer specify an algorithm for load - balancers. - - disable_lets_encrypt_dns_records: A boolean value indicating whether to disable automatic DNS record creation for - Let's Encrypt certificates that are added to the load balancer. - - domains: An array of objects specifying the domain configurations for a Global load - balancer. - - droplet_ids: An array containing the IDs of the Droplets assigned to the load balancer. - - enable_backend_keepalive: A boolean value indicating whether HTTP keepalive connections are maintained to - target Droplets. - - enable_proxy_protocol: A boolean value indicating whether PROXY Protocol is in use. - - firewall: An object specifying allow and deny rules to control traffic to the load - balancer. - - glb_settings: An object specifying forwarding configurations for a Global load balancer. - - health_check: An object specifying health check settings for the load balancer. - - http_idle_timeout_seconds: An integer value which configures the idle timeout for HTTP requests to the - target droplets. - - name: A human-readable name for a load balancer instance. - - network: A string indicating whether the load balancer should be external or internal. - Internal load balancers have no public IPs and are only accessible to resources - on the same VPC network. This property cannot be updated after creating the load - balancer. - - network_stack: A string indicating whether the load balancer will support IPv4 or both IPv4 and - IPv6 networking. This property cannot be updated after creating the load - balancer. - - project_id: The ID of the project that the load balancer is associated with. If no ID is - provided at creation, the load balancer associates with the user's default - project. If an invalid project ID is provided, the load balancer will not be - created. - - redirect_http_to_https: A boolean value indicating whether HTTP requests to the load balancer on port 80 - will be redirected to HTTPS on port 443. - - region: The slug identifier for the region where the resource will initially be - available. - - size: This field has been replaced by the `size_unit` field for all regions except in - AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load - balancer having a set number of nodes. - - - `lb-small` = 1 node - - `lb-medium` = 3 nodes - - `lb-large` = 6 nodes - - You can resize load balancers after creation up to once per hour. You cannot - resize a load balancer within the first hour of its creation. - - size_unit: How many nodes the load balancer contains. Each additional node increases the - load balancer's ability to manage more connections. Load balancers can be scaled - up or down, and you can change the number of nodes after creation up to once per - hour. This field is currently not available in the AMS2, NYC2, or SFO1 regions. - Use the `size` field to scale load balancers that reside in these regions. - - sticky_sessions: An object specifying sticky sessions settings for the load balancer. - - target_load_balancer_ids: An array containing the UUIDs of the Regional load balancers to be used as - target backends for a Global load balancer. - - tls_cipher_policy: A string indicating the policy for the TLS cipher suites used by the load - balancer. The possible values are `DEFAULT` or `STRONG`. The default value is - `DEFAULT`. - - type: A string indicating whether the load balancer should be a standard regional HTTP - load balancer, a regional network load balancer that routes traffic at the - TCP/UDP transport layer, or a global load balancer. - - vpc_uuid: A string specifying the UUID of the VPC to which the load balancer is assigned. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @overload - async def create( - self, - *, - forwarding_rules: Iterable[ForwardingRuleParam], - algorithm: Literal["round_robin", "least_connections"] | NotGiven = NOT_GIVEN, - disable_lets_encrypt_dns_records: bool | NotGiven = NOT_GIVEN, - domains: Iterable[DomainsParam] | NotGiven = NOT_GIVEN, - enable_backend_keepalive: bool | NotGiven = NOT_GIVEN, - enable_proxy_protocol: bool | NotGiven = NOT_GIVEN, - firewall: LbFirewallParam | NotGiven = NOT_GIVEN, - glb_settings: GlbSettingsParam | NotGiven = NOT_GIVEN, - health_check: HealthCheckParam | NotGiven = NOT_GIVEN, - http_idle_timeout_seconds: int | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - network: Literal["EXTERNAL", "INTERNAL"] | NotGiven = NOT_GIVEN, - network_stack: Literal["IPV4", "DUALSTACK"] | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - redirect_http_to_https: bool | NotGiven = NOT_GIVEN, - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ] - | NotGiven = NOT_GIVEN, - size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, - size_unit: int | NotGiven = NOT_GIVEN, - sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, - tag: str | NotGiven = NOT_GIVEN, - target_load_balancer_ids: List[str] | NotGiven = NOT_GIVEN, - tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, - type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, - vpc_uuid: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> LoadBalancerCreateResponse: - """ - To create a new load balancer instance, send a POST request to - `/v2/load_balancers`. - - You can specify the Droplets that will sit behind the load balancer using one of - two methods: - - - Set `droplet_ids` to a list of specific Droplet IDs. - - Set `tag` to the name of a tag. All Droplets with this tag applied will be - assigned to the load balancer. Additional Droplets will be automatically - assigned as they are tagged. - - These methods are mutually exclusive. - - Args: - forwarding_rules: An array of objects specifying the forwarding rules for a load balancer. - - algorithm: This field has been deprecated. You can no longer specify an algorithm for load - balancers. - - disable_lets_encrypt_dns_records: A boolean value indicating whether to disable automatic DNS record creation for - Let's Encrypt certificates that are added to the load balancer. - - domains: An array of objects specifying the domain configurations for a Global load - balancer. - - enable_backend_keepalive: A boolean value indicating whether HTTP keepalive connections are maintained to - target Droplets. - - enable_proxy_protocol: A boolean value indicating whether PROXY Protocol is in use. - - firewall: An object specifying allow and deny rules to control traffic to the load - balancer. - - glb_settings: An object specifying forwarding configurations for a Global load balancer. - - health_check: An object specifying health check settings for the load balancer. - - http_idle_timeout_seconds: An integer value which configures the idle timeout for HTTP requests to the - target droplets. - - name: A human-readable name for a load balancer instance. - - network: A string indicating whether the load balancer should be external or internal. - Internal load balancers have no public IPs and are only accessible to resources - on the same VPC network. This property cannot be updated after creating the load - balancer. - - network_stack: A string indicating whether the load balancer will support IPv4 or both IPv4 and - IPv6 networking. This property cannot be updated after creating the load - balancer. - - project_id: The ID of the project that the load balancer is associated with. If no ID is - provided at creation, the load balancer associates with the user's default - project. If an invalid project ID is provided, the load balancer will not be - created. - - redirect_http_to_https: A boolean value indicating whether HTTP requests to the load balancer on port 80 - will be redirected to HTTPS on port 443. - - region: The slug identifier for the region where the resource will initially be - available. - - size: This field has been replaced by the `size_unit` field for all regions except in - AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load - balancer having a set number of nodes. - - - `lb-small` = 1 node - - `lb-medium` = 3 nodes - - `lb-large` = 6 nodes - - You can resize load balancers after creation up to once per hour. You cannot - resize a load balancer within the first hour of its creation. - - size_unit: How many nodes the load balancer contains. Each additional node increases the - load balancer's ability to manage more connections. Load balancers can be scaled - up or down, and you can change the number of nodes after creation up to once per - hour. This field is currently not available in the AMS2, NYC2, or SFO1 regions. - Use the `size` field to scale load balancers that reside in these regions. - - sticky_sessions: An object specifying sticky sessions settings for the load balancer. - - tag: The name of a Droplet tag corresponding to Droplets assigned to the load - balancer. - - target_load_balancer_ids: An array containing the UUIDs of the Regional load balancers to be used as - target backends for a Global load balancer. - - tls_cipher_policy: A string indicating the policy for the TLS cipher suites used by the load - balancer. The possible values are `DEFAULT` or `STRONG`. The default value is - `DEFAULT`. - - type: A string indicating whether the load balancer should be a standard regional HTTP - load balancer, a regional network load balancer that routes traffic at the - TCP/UDP transport layer, or a global load balancer. - - vpc_uuid: A string specifying the UUID of the VPC to which the load balancer is assigned. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @required_args(["forwarding_rules"]) - async def create( - self, - *, - forwarding_rules: Iterable[ForwardingRuleParam], - algorithm: Literal["round_robin", "least_connections"] | NotGiven = NOT_GIVEN, - disable_lets_encrypt_dns_records: bool | NotGiven = NOT_GIVEN, - domains: Iterable[DomainsParam] | NotGiven = NOT_GIVEN, - droplet_ids: Iterable[int] | NotGiven = NOT_GIVEN, - enable_backend_keepalive: bool | NotGiven = NOT_GIVEN, - enable_proxy_protocol: bool | NotGiven = NOT_GIVEN, - firewall: LbFirewallParam | NotGiven = NOT_GIVEN, - glb_settings: GlbSettingsParam | NotGiven = NOT_GIVEN, - health_check: HealthCheckParam | NotGiven = NOT_GIVEN, - http_idle_timeout_seconds: int | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - network: Literal["EXTERNAL", "INTERNAL"] | NotGiven = NOT_GIVEN, - network_stack: Literal["IPV4", "DUALSTACK"] | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - redirect_http_to_https: bool | NotGiven = NOT_GIVEN, - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ] - | NotGiven = NOT_GIVEN, - size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, - size_unit: int | NotGiven = NOT_GIVEN, - sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, - target_load_balancer_ids: List[str] | NotGiven = NOT_GIVEN, - tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, - type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, - vpc_uuid: str | NotGiven = NOT_GIVEN, - tag: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> LoadBalancerCreateResponse: - return await self._post( - "/v2/load_balancers" - if self._client._base_url_overridden - else "https://api.digitalocean.com/v2/load_balancers", - body=await async_maybe_transform( - { - "forwarding_rules": forwarding_rules, - "algorithm": algorithm, - "disable_lets_encrypt_dns_records": disable_lets_encrypt_dns_records, - "domains": domains, - "droplet_ids": droplet_ids, - "enable_backend_keepalive": enable_backend_keepalive, - "enable_proxy_protocol": enable_proxy_protocol, - "firewall": firewall, - "glb_settings": glb_settings, - "health_check": health_check, - "http_idle_timeout_seconds": http_idle_timeout_seconds, - "name": name, - "network": network, - "network_stack": network_stack, - "project_id": project_id, - "redirect_http_to_https": redirect_http_to_https, - "region": region, - "size": size, - "size_unit": size_unit, - "sticky_sessions": sticky_sessions, - "target_load_balancer_ids": target_load_balancer_ids, - "tls_cipher_policy": tls_cipher_policy, - "type": type, - "vpc_uuid": vpc_uuid, - "tag": tag, - }, - load_balancer_create_params.LoadBalancerCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=LoadBalancerCreateResponse, - ) - - async def retrieve( - self, - lb_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> LoadBalancerRetrieveResponse: - """ - To show information about a load balancer instance, send a GET request to - `/v2/load_balancers/$LOAD_BALANCER_ID`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not lb_id: - raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}") - return await self._get( - f"/v2/load_balancers/{lb_id}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=LoadBalancerRetrieveResponse, - ) - - @overload - async def update( - self, - lb_id: str, - *, - forwarding_rules: Iterable[ForwardingRuleParam], - algorithm: Literal["round_robin", "least_connections"] | NotGiven = NOT_GIVEN, - disable_lets_encrypt_dns_records: bool | NotGiven = NOT_GIVEN, - domains: Iterable[DomainsParam] | NotGiven = NOT_GIVEN, - droplet_ids: Iterable[int] | NotGiven = NOT_GIVEN, - enable_backend_keepalive: bool | NotGiven = NOT_GIVEN, - enable_proxy_protocol: bool | NotGiven = NOT_GIVEN, - firewall: LbFirewallParam | NotGiven = NOT_GIVEN, - glb_settings: GlbSettingsParam | NotGiven = NOT_GIVEN, - health_check: HealthCheckParam | NotGiven = NOT_GIVEN, - http_idle_timeout_seconds: int | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - network: Literal["EXTERNAL", "INTERNAL"] | NotGiven = NOT_GIVEN, - network_stack: Literal["IPV4", "DUALSTACK"] | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - redirect_http_to_https: bool | NotGiven = NOT_GIVEN, - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ] - | NotGiven = NOT_GIVEN, - size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, - size_unit: int | NotGiven = NOT_GIVEN, - sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, - target_load_balancer_ids: List[str] | NotGiven = NOT_GIVEN, - tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, - type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, - vpc_uuid: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> LoadBalancerUpdateResponse: - """ - To update a load balancer's settings, send a PUT request to - `/v2/load_balancers/$LOAD_BALANCER_ID`. The request should contain a full - representation of the load balancer including existing attributes. It may - contain _one of_ the `droplets_ids` or `tag` attributes as they are mutually - exclusive. **Note that any attribute that is not provided will be reset to its - default value.** - - Args: - forwarding_rules: An array of objects specifying the forwarding rules for a load balancer. - - algorithm: This field has been deprecated. You can no longer specify an algorithm for load - balancers. - - disable_lets_encrypt_dns_records: A boolean value indicating whether to disable automatic DNS record creation for - Let's Encrypt certificates that are added to the load balancer. - - domains: An array of objects specifying the domain configurations for a Global load - balancer. - - droplet_ids: An array containing the IDs of the Droplets assigned to the load balancer. - - enable_backend_keepalive: A boolean value indicating whether HTTP keepalive connections are maintained to - target Droplets. - - enable_proxy_protocol: A boolean value indicating whether PROXY Protocol is in use. - - firewall: An object specifying allow and deny rules to control traffic to the load - balancer. - - glb_settings: An object specifying forwarding configurations for a Global load balancer. - - health_check: An object specifying health check settings for the load balancer. - - http_idle_timeout_seconds: An integer value which configures the idle timeout for HTTP requests to the - target droplets. - - name: A human-readable name for a load balancer instance. - - network: A string indicating whether the load balancer should be external or internal. - Internal load balancers have no public IPs and are only accessible to resources - on the same VPC network. This property cannot be updated after creating the load - balancer. - - network_stack: A string indicating whether the load balancer will support IPv4 or both IPv4 and - IPv6 networking. This property cannot be updated after creating the load - balancer. - - project_id: The ID of the project that the load balancer is associated with. If no ID is - provided at creation, the load balancer associates with the user's default - project. If an invalid project ID is provided, the load balancer will not be - created. - - redirect_http_to_https: A boolean value indicating whether HTTP requests to the load balancer on port 80 - will be redirected to HTTPS on port 443. - - region: The slug identifier for the region where the resource will initially be - available. - - size: This field has been replaced by the `size_unit` field for all regions except in - AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load - balancer having a set number of nodes. - - - `lb-small` = 1 node - - `lb-medium` = 3 nodes - - `lb-large` = 6 nodes - - You can resize load balancers after creation up to once per hour. You cannot - resize a load balancer within the first hour of its creation. - - size_unit: How many nodes the load balancer contains. Each additional node increases the - load balancer's ability to manage more connections. Load balancers can be scaled - up or down, and you can change the number of nodes after creation up to once per - hour. This field is currently not available in the AMS2, NYC2, or SFO1 regions. - Use the `size` field to scale load balancers that reside in these regions. - - sticky_sessions: An object specifying sticky sessions settings for the load balancer. - - target_load_balancer_ids: An array containing the UUIDs of the Regional load balancers to be used as - target backends for a Global load balancer. - - tls_cipher_policy: A string indicating the policy for the TLS cipher suites used by the load - balancer. The possible values are `DEFAULT` or `STRONG`. The default value is - `DEFAULT`. - - type: A string indicating whether the load balancer should be a standard regional HTTP - load balancer, a regional network load balancer that routes traffic at the - TCP/UDP transport layer, or a global load balancer. - - vpc_uuid: A string specifying the UUID of the VPC to which the load balancer is assigned. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @overload - async def update( - self, - lb_id: str, - *, - forwarding_rules: Iterable[ForwardingRuleParam], - algorithm: Literal["round_robin", "least_connections"] | NotGiven = NOT_GIVEN, - disable_lets_encrypt_dns_records: bool | NotGiven = NOT_GIVEN, - domains: Iterable[DomainsParam] | NotGiven = NOT_GIVEN, - enable_backend_keepalive: bool | NotGiven = NOT_GIVEN, - enable_proxy_protocol: bool | NotGiven = NOT_GIVEN, - firewall: LbFirewallParam | NotGiven = NOT_GIVEN, - glb_settings: GlbSettingsParam | NotGiven = NOT_GIVEN, - health_check: HealthCheckParam | NotGiven = NOT_GIVEN, - http_idle_timeout_seconds: int | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - network: Literal["EXTERNAL", "INTERNAL"] | NotGiven = NOT_GIVEN, - network_stack: Literal["IPV4", "DUALSTACK"] | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - redirect_http_to_https: bool | NotGiven = NOT_GIVEN, - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ] - | NotGiven = NOT_GIVEN, - size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, - size_unit: int | NotGiven = NOT_GIVEN, - sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, - tag: str | NotGiven = NOT_GIVEN, - target_load_balancer_ids: List[str] | NotGiven = NOT_GIVEN, - tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, - type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, - vpc_uuid: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> LoadBalancerUpdateResponse: - """ - To update a load balancer's settings, send a PUT request to - `/v2/load_balancers/$LOAD_BALANCER_ID`. The request should contain a full - representation of the load balancer including existing attributes. It may - contain _one of_ the `droplets_ids` or `tag` attributes as they are mutually - exclusive. **Note that any attribute that is not provided will be reset to its - default value.** - - Args: - forwarding_rules: An array of objects specifying the forwarding rules for a load balancer. - - algorithm: This field has been deprecated. You can no longer specify an algorithm for load - balancers. - - disable_lets_encrypt_dns_records: A boolean value indicating whether to disable automatic DNS record creation for - Let's Encrypt certificates that are added to the load balancer. - - domains: An array of objects specifying the domain configurations for a Global load - balancer. - - enable_backend_keepalive: A boolean value indicating whether HTTP keepalive connections are maintained to - target Droplets. - - enable_proxy_protocol: A boolean value indicating whether PROXY Protocol is in use. - - firewall: An object specifying allow and deny rules to control traffic to the load - balancer. - - glb_settings: An object specifying forwarding configurations for a Global load balancer. - - health_check: An object specifying health check settings for the load balancer. - - http_idle_timeout_seconds: An integer value which configures the idle timeout for HTTP requests to the - target droplets. - - name: A human-readable name for a load balancer instance. - - network: A string indicating whether the load balancer should be external or internal. - Internal load balancers have no public IPs and are only accessible to resources - on the same VPC network. This property cannot be updated after creating the load - balancer. - - network_stack: A string indicating whether the load balancer will support IPv4 or both IPv4 and - IPv6 networking. This property cannot be updated after creating the load - balancer. - - project_id: The ID of the project that the load balancer is associated with. If no ID is - provided at creation, the load balancer associates with the user's default - project. If an invalid project ID is provided, the load balancer will not be - created. - - redirect_http_to_https: A boolean value indicating whether HTTP requests to the load balancer on port 80 - will be redirected to HTTPS on port 443. - - region: The slug identifier for the region where the resource will initially be - available. - - size: This field has been replaced by the `size_unit` field for all regions except in - AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load - balancer having a set number of nodes. - - - `lb-small` = 1 node - - `lb-medium` = 3 nodes - - `lb-large` = 6 nodes - - You can resize load balancers after creation up to once per hour. You cannot - resize a load balancer within the first hour of its creation. - - size_unit: How many nodes the load balancer contains. Each additional node increases the - load balancer's ability to manage more connections. Load balancers can be scaled - up or down, and you can change the number of nodes after creation up to once per - hour. This field is currently not available in the AMS2, NYC2, or SFO1 regions. - Use the `size` field to scale load balancers that reside in these regions. - - sticky_sessions: An object specifying sticky sessions settings for the load balancer. - - tag: The name of a Droplet tag corresponding to Droplets assigned to the load - balancer. - - target_load_balancer_ids: An array containing the UUIDs of the Regional load balancers to be used as - target backends for a Global load balancer. - - tls_cipher_policy: A string indicating the policy for the TLS cipher suites used by the load - balancer. The possible values are `DEFAULT` or `STRONG`. The default value is - `DEFAULT`. - - type: A string indicating whether the load balancer should be a standard regional HTTP - load balancer, a regional network load balancer that routes traffic at the - TCP/UDP transport layer, or a global load balancer. - - vpc_uuid: A string specifying the UUID of the VPC to which the load balancer is assigned. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @required_args(["forwarding_rules"]) - async def update( - self, - lb_id: str, - *, - forwarding_rules: Iterable[ForwardingRuleParam], - algorithm: Literal["round_robin", "least_connections"] | NotGiven = NOT_GIVEN, - disable_lets_encrypt_dns_records: bool | NotGiven = NOT_GIVEN, - domains: Iterable[DomainsParam] | NotGiven = NOT_GIVEN, - droplet_ids: Iterable[int] | NotGiven = NOT_GIVEN, - enable_backend_keepalive: bool | NotGiven = NOT_GIVEN, - enable_proxy_protocol: bool | NotGiven = NOT_GIVEN, - firewall: LbFirewallParam | NotGiven = NOT_GIVEN, - glb_settings: GlbSettingsParam | NotGiven = NOT_GIVEN, - health_check: HealthCheckParam | NotGiven = NOT_GIVEN, - http_idle_timeout_seconds: int | NotGiven = NOT_GIVEN, - name: str | NotGiven = NOT_GIVEN, - network: Literal["EXTERNAL", "INTERNAL"] | NotGiven = NOT_GIVEN, - network_stack: Literal["IPV4", "DUALSTACK"] | NotGiven = NOT_GIVEN, - project_id: str | NotGiven = NOT_GIVEN, - redirect_http_to_https: bool | NotGiven = NOT_GIVEN, - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ] - | NotGiven = NOT_GIVEN, - size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, - size_unit: int | NotGiven = NOT_GIVEN, - sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, - target_load_balancer_ids: List[str] | NotGiven = NOT_GIVEN, - tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, - type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, - vpc_uuid: str | NotGiven = NOT_GIVEN, - tag: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> LoadBalancerUpdateResponse: - if not lb_id: - raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}") - return await self._put( - f"/v2/load_balancers/{lb_id}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}", - body=await async_maybe_transform( - { - "forwarding_rules": forwarding_rules, - "algorithm": algorithm, - "disable_lets_encrypt_dns_records": disable_lets_encrypt_dns_records, - "domains": domains, - "droplet_ids": droplet_ids, - "enable_backend_keepalive": enable_backend_keepalive, - "enable_proxy_protocol": enable_proxy_protocol, - "firewall": firewall, - "glb_settings": glb_settings, - "health_check": health_check, - "http_idle_timeout_seconds": http_idle_timeout_seconds, - "name": name, - "network": network, - "network_stack": network_stack, - "project_id": project_id, - "redirect_http_to_https": redirect_http_to_https, - "region": region, - "size": size, - "size_unit": size_unit, - "sticky_sessions": sticky_sessions, - "target_load_balancer_ids": target_load_balancer_ids, - "tls_cipher_policy": tls_cipher_policy, - "type": type, - "vpc_uuid": vpc_uuid, - "tag": tag, - }, - load_balancer_update_params.LoadBalancerUpdateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=LoadBalancerUpdateResponse, - ) - - async def list( - self, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> LoadBalancerListResponse: - """ - To list all of the load balancer instances on your account, send a GET request - to `/v2/load_balancers`. - - Args: - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/v2/load_balancers" - if self._client._base_url_overridden - else "https://api.digitalocean.com/v2/load_balancers", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - }, - load_balancer_list_params.LoadBalancerListParams, - ), - ), - cast_to=LoadBalancerListResponse, - ) - - async def delete( - self, - lb_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - To delete a load balancer instance, disassociating any Droplets assigned to it - and removing it from your account, send a DELETE request to - `/v2/load_balancers/$LOAD_BALANCER_ID`. - - A successful request will receive a 204 status code with no body in response. - This indicates that the request was processed successfully. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not lb_id: - raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}") - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return await self._delete( - f"/v2/load_balancers/{lb_id}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - async def delete_cache( - self, - lb_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - To delete a Global load balancer CDN cache, send a DELETE request to - `/v2/load_balancers/$LOAD_BALANCER_ID/cache`. - - A successful request will receive a 204 status code with no body in response. - This indicates that the request was processed successfully. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not lb_id: - raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}") - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return await self._delete( - f"/v2/load_balancers/{lb_id}/cache" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}/cache", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - -class LoadBalancersResourceWithRawResponse: - def __init__(self, load_balancers: LoadBalancersResource) -> None: - self._load_balancers = load_balancers - - self.create = to_raw_response_wrapper( - load_balancers.create, - ) - self.retrieve = to_raw_response_wrapper( - load_balancers.retrieve, - ) - self.update = to_raw_response_wrapper( - load_balancers.update, - ) - self.list = to_raw_response_wrapper( - load_balancers.list, - ) - self.delete = to_raw_response_wrapper( - load_balancers.delete, - ) - self.delete_cache = to_raw_response_wrapper( - load_balancers.delete_cache, - ) - - @cached_property - def droplets(self) -> DropletsResourceWithRawResponse: - return DropletsResourceWithRawResponse(self._load_balancers.droplets) - - @cached_property - def forwarding_rules(self) -> ForwardingRulesResourceWithRawResponse: - return ForwardingRulesResourceWithRawResponse(self._load_balancers.forwarding_rules) - - -class AsyncLoadBalancersResourceWithRawResponse: - def __init__(self, load_balancers: AsyncLoadBalancersResource) -> None: - self._load_balancers = load_balancers - - self.create = async_to_raw_response_wrapper( - load_balancers.create, - ) - self.retrieve = async_to_raw_response_wrapper( - load_balancers.retrieve, - ) - self.update = async_to_raw_response_wrapper( - load_balancers.update, - ) - self.list = async_to_raw_response_wrapper( - load_balancers.list, - ) - self.delete = async_to_raw_response_wrapper( - load_balancers.delete, - ) - self.delete_cache = async_to_raw_response_wrapper( - load_balancers.delete_cache, - ) - - @cached_property - def droplets(self) -> AsyncDropletsResourceWithRawResponse: - return AsyncDropletsResourceWithRawResponse(self._load_balancers.droplets) - - @cached_property - def forwarding_rules(self) -> AsyncForwardingRulesResourceWithRawResponse: - return AsyncForwardingRulesResourceWithRawResponse(self._load_balancers.forwarding_rules) - - -class LoadBalancersResourceWithStreamingResponse: - def __init__(self, load_balancers: LoadBalancersResource) -> None: - self._load_balancers = load_balancers - - self.create = to_streamed_response_wrapper( - load_balancers.create, - ) - self.retrieve = to_streamed_response_wrapper( - load_balancers.retrieve, - ) - self.update = to_streamed_response_wrapper( - load_balancers.update, - ) - self.list = to_streamed_response_wrapper( - load_balancers.list, - ) - self.delete = to_streamed_response_wrapper( - load_balancers.delete, - ) - self.delete_cache = to_streamed_response_wrapper( - load_balancers.delete_cache, - ) - - @cached_property - def droplets(self) -> DropletsResourceWithStreamingResponse: - return DropletsResourceWithStreamingResponse(self._load_balancers.droplets) - - @cached_property - def forwarding_rules(self) -> ForwardingRulesResourceWithStreamingResponse: - return ForwardingRulesResourceWithStreamingResponse(self._load_balancers.forwarding_rules) - - -class AsyncLoadBalancersResourceWithStreamingResponse: - def __init__(self, load_balancers: AsyncLoadBalancersResource) -> None: - self._load_balancers = load_balancers - - self.create = async_to_streamed_response_wrapper( - load_balancers.create, - ) - self.retrieve = async_to_streamed_response_wrapper( - load_balancers.retrieve, - ) - self.update = async_to_streamed_response_wrapper( - load_balancers.update, - ) - self.list = async_to_streamed_response_wrapper( - load_balancers.list, - ) - self.delete = async_to_streamed_response_wrapper( - load_balancers.delete, - ) - self.delete_cache = async_to_streamed_response_wrapper( - load_balancers.delete_cache, - ) - - @cached_property - def droplets(self) -> AsyncDropletsResourceWithStreamingResponse: - return AsyncDropletsResourceWithStreamingResponse(self._load_balancers.droplets) - - @cached_property - def forwarding_rules(self) -> AsyncForwardingRulesResourceWithStreamingResponse: - return AsyncForwardingRulesResourceWithStreamingResponse(self._load_balancers.forwarding_rules) diff --git a/src/gradientai/resources/models/models.py b/src/gradientai/resources/models/models.py index 41f2eabd..3c524767 100644 --- a/src/gradientai/resources/models/models.py +++ b/src/gradientai/resources/models/models.py @@ -2,9 +2,14 @@ from __future__ import annotations +from typing import List +from typing_extensions import Literal + import httpx +from ...types import model_list_params from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource from ..._response import ( @@ -23,7 +28,6 @@ AsyncProvidersResourceWithStreamingResponse, ) from ...types.model_list_response import ModelListResponse -from ...types.model_retrieve_response import ModelRetrieveResponse __all__ = ["ModelsResource", "AsyncModelsResource"] @@ -52,22 +56,52 @@ def with_streaming_response(self) -> ModelsResourceWithStreamingResponse: """ return ModelsResourceWithStreamingResponse(self) - def retrieve( + def list( self, - model: str, *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + public_only: bool | NotGiven = NOT_GIVEN, + usecases: List[ + Literal[ + "MODEL_USECASE_UNKNOWN", + "MODEL_USECASE_AGENT", + "MODEL_USECASE_FINETUNED", + "MODEL_USECASE_KNOWLEDGEBASE", + "MODEL_USECASE_GUARDRAIL", + "MODEL_USECASE_REASONING", + "MODEL_USECASE_SERVERLESS", + ] + ] + | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ModelRetrieveResponse: + ) -> ModelListResponse: """ - Retrieves a model instance, providing basic information about the model such as - the owner and permissioning. + To list all models, send a GET request to `/v2/gen-ai/models`. Args: + page: page number. + + per_page: items per page. + + public_only: only include models that are publicly available. + + usecases: include only models defined for the listed usecases. + + - MODEL_USECASE_UNKNOWN: The use case of the model is unknown + - MODEL_USECASE_AGENT: The model maybe used in an agent + - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning + - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases + (embedding models) + - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails + - MODEL_USECASE_REASONING: The model usecase for reasoning + - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -76,36 +110,24 @@ def retrieve( timeout: Override the client-level default timeout for this request, in seconds """ - if not model: - raise ValueError(f"Expected a non-empty value for `model` but received {model!r}") return self._get( - f"/models/{model}" + "/v2/gen-ai/models" if self._client._base_url_overridden - else f"https://inference.do-ai.run/v1/models/{model}", + else "https://api.digitalocean.com/v2/gen-ai/models", options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ModelRetrieveResponse, - ) - - def list( - self, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ModelListResponse: - """ - Lists the currently available models, and provides basic information about each - one such as the owner and availability. - """ - return self._get( - "/models" if self._client._base_url_overridden else "https://inference.do-ai.run/v1/models", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + "public_only": public_only, + "usecases": usecases, + }, + model_list_params.ModelListParams, + ), ), cast_to=ModelListResponse, ) @@ -135,22 +157,52 @@ def with_streaming_response(self) -> AsyncModelsResourceWithStreamingResponse: """ return AsyncModelsResourceWithStreamingResponse(self) - async def retrieve( + async def list( self, - model: str, *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + public_only: bool | NotGiven = NOT_GIVEN, + usecases: List[ + Literal[ + "MODEL_USECASE_UNKNOWN", + "MODEL_USECASE_AGENT", + "MODEL_USECASE_FINETUNED", + "MODEL_USECASE_KNOWLEDGEBASE", + "MODEL_USECASE_GUARDRAIL", + "MODEL_USECASE_REASONING", + "MODEL_USECASE_SERVERLESS", + ] + ] + | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ModelRetrieveResponse: + ) -> ModelListResponse: """ - Retrieves a model instance, providing basic information about the model such as - the owner and permissioning. + To list all models, send a GET request to `/v2/gen-ai/models`. Args: + page: page number. + + per_page: items per page. + + public_only: only include models that are publicly available. + + usecases: include only models defined for the listed usecases. + + - MODEL_USECASE_UNKNOWN: The use case of the model is unknown + - MODEL_USECASE_AGENT: The model maybe used in an agent + - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning + - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases + (embedding models) + - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails + - MODEL_USECASE_REASONING: The model usecase for reasoning + - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -159,36 +211,24 @@ async def retrieve( timeout: Override the client-level default timeout for this request, in seconds """ - if not model: - raise ValueError(f"Expected a non-empty value for `model` but received {model!r}") return await self._get( - f"/models/{model}" + "/v2/gen-ai/models" if self._client._base_url_overridden - else f"https://inference.do-ai.run/v1/models/{model}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ModelRetrieveResponse, - ) - - async def list( - self, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ModelListResponse: - """ - Lists the currently available models, and provides basic information about each - one such as the owner and availability. - """ - return await self._get( - "/models" if self._client._base_url_overridden else "https://inference.do-ai.run/v1/models", + else "https://api.digitalocean.com/v2/gen-ai/models", options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + "public_only": public_only, + "usecases": usecases, + }, + model_list_params.ModelListParams, + ), ), cast_to=ModelListResponse, ) @@ -198,9 +238,6 @@ class ModelsResourceWithRawResponse: def __init__(self, models: ModelsResource) -> None: self._models = models - self.retrieve = to_raw_response_wrapper( - models.retrieve, - ) self.list = to_raw_response_wrapper( models.list, ) @@ -214,9 +251,6 @@ class AsyncModelsResourceWithRawResponse: def __init__(self, models: AsyncModelsResource) -> None: self._models = models - self.retrieve = async_to_raw_response_wrapper( - models.retrieve, - ) self.list = async_to_raw_response_wrapper( models.list, ) @@ -230,9 +264,6 @@ class ModelsResourceWithStreamingResponse: def __init__(self, models: ModelsResource) -> None: self._models = models - self.retrieve = to_streamed_response_wrapper( - models.retrieve, - ) self.list = to_streamed_response_wrapper( models.list, ) @@ -246,9 +277,6 @@ class AsyncModelsResourceWithStreamingResponse: def __init__(self, models: AsyncModelsResource) -> None: self._models = models - self.retrieve = async_to_streamed_response_wrapper( - models.retrieve, - ) self.list = async_to_streamed_response_wrapper( models.list, ) diff --git a/src/gradientai/resources/models/providers/anthropic.py b/src/gradientai/resources/models/providers/anthropic.py index e570be51..26c9b977 100644 --- a/src/gradientai/resources/models/providers/anthropic.py +++ b/src/gradientai/resources/models/providers/anthropic.py @@ -68,10 +68,6 @@ def create( `/v2/gen-ai/anthropic/keys`. Args: - api_key: Anthropic API key - - name: Name of the key - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -152,12 +148,6 @@ def update( `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. Args: - api_key: Anthropic API key - - body_api_key_uuid: API key ID - - name: Name of the key - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -203,9 +193,9 @@ def list( `/v2/gen-ai/anthropic/keys`. Args: - page: Page number. + page: page number. - per_page: Items per page. + per_page: items per page. extra_headers: Send extra headers @@ -288,9 +278,9 @@ def list_agents( List Agents by Anthropic Key. Args: - page: Page number. + page: page number. - per_page: Items per page. + per_page: items per page. extra_headers: Send extra headers @@ -360,10 +350,6 @@ async def create( `/v2/gen-ai/anthropic/keys`. Args: - api_key: Anthropic API key - - name: Name of the key - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -444,12 +430,6 @@ async def update( `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. Args: - api_key: Anthropic API key - - body_api_key_uuid: API key ID - - name: Name of the key - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -495,9 +475,9 @@ async def list( `/v2/gen-ai/anthropic/keys`. Args: - page: Page number. + page: page number. - per_page: Items per page. + per_page: items per page. extra_headers: Send extra headers @@ -580,9 +560,9 @@ async def list_agents( List Agents by Anthropic Key. Args: - page: Page number. + page: page number. - per_page: Items per page. + per_page: items per page. extra_headers: Send extra headers diff --git a/src/gradientai/resources/models/providers/openai.py b/src/gradientai/resources/models/providers/openai.py index ccd594b8..d337cd9b 100644 --- a/src/gradientai/resources/models/providers/openai.py +++ b/src/gradientai/resources/models/providers/openai.py @@ -67,10 +67,6 @@ def create( To create an OpenAI API key, send a POST request to `/v2/gen-ai/openai/keys`. Args: - api_key: OpenAI API key - - name: Name of the key - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -151,12 +147,6 @@ def update( `/v2/gen-ai/openai/keys/{api_key_uuid}`. Args: - api_key: OpenAI API key - - body_api_key_uuid: API key ID - - name: Name of the key - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -201,9 +191,9 @@ def list( To list all OpenAI API keys, send a GET request to `/v2/gen-ai/openai/keys`. Args: - page: Page number. + page: page number. - per_page: Items per page. + per_page: items per page. extra_headers: Send extra headers @@ -286,9 +276,9 @@ def retrieve_agents( List Agents by OpenAI Key. Args: - page: Page number. + page: page number. - per_page: Items per page. + per_page: items per page. extra_headers: Send extra headers @@ -357,10 +347,6 @@ async def create( To create an OpenAI API key, send a POST request to `/v2/gen-ai/openai/keys`. Args: - api_key: OpenAI API key - - name: Name of the key - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -441,12 +427,6 @@ async def update( `/v2/gen-ai/openai/keys/{api_key_uuid}`. Args: - api_key: OpenAI API key - - body_api_key_uuid: API key ID - - name: Name of the key - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -491,9 +471,9 @@ async def list( To list all OpenAI API keys, send a GET request to `/v2/gen-ai/openai/keys`. Args: - page: Page number. + page: page number. - per_page: Items per page. + per_page: items per page. extra_headers: Send extra headers @@ -576,9 +556,9 @@ async def retrieve_agents( List Agents by OpenAI Key. Args: - page: Page number. + page: page number. - per_page: Items per page. + per_page: items per page. extra_headers: Send extra headers diff --git a/src/gradientai/resources/regions.py b/src/gradientai/resources/regions.py index e953e4f3..4c50d9e6 100644 --- a/src/gradientai/resources/regions.py +++ b/src/gradientai/resources/regions.py @@ -44,8 +44,8 @@ def with_streaming_response(self) -> RegionsResourceWithStreamingResponse: def list( self, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + serves_batch: bool | NotGiven = NOT_GIVEN, + serves_inference: bool | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -54,15 +54,12 @@ def list( timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> RegionListResponse: """ - To list all of the regions that are available, send a GET request to - `/v2/regions`. The response will be a JSON object with a key called `regions`. - The value of this will be an array of `region` objects, each of which will - contain the standard region attributes. + To list all datacenter regions, send a GET request to `/v2/gen-ai/regions`. Args: - page: Which 'page' of paginated results to return. + serves_batch: include datacenters that are capable of running batch jobs. - per_page: Number of items returned per page + serves_inference: include datacenters that serve inference. extra_headers: Send extra headers @@ -73,7 +70,9 @@ def list( timeout: Override the client-level default timeout for this request, in seconds """ return self._get( - "/v2/regions" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/regions", + "/v2/gen-ai/regions" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/regions", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -81,8 +80,8 @@ def list( timeout=timeout, query=maybe_transform( { - "page": page, - "per_page": per_page, + "serves_batch": serves_batch, + "serves_inference": serves_inference, }, region_list_params.RegionListParams, ), @@ -114,8 +113,8 @@ def with_streaming_response(self) -> AsyncRegionsResourceWithStreamingResponse: async def list( self, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, + serves_batch: bool | NotGiven = NOT_GIVEN, + serves_inference: bool | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -124,15 +123,12 @@ async def list( timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> RegionListResponse: """ - To list all of the regions that are available, send a GET request to - `/v2/regions`. The response will be a JSON object with a key called `regions`. - The value of this will be an array of `region` objects, each of which will - contain the standard region attributes. + To list all datacenter regions, send a GET request to `/v2/gen-ai/regions`. Args: - page: Which 'page' of paginated results to return. + serves_batch: include datacenters that are capable of running batch jobs. - per_page: Number of items returned per page + serves_inference: include datacenters that serve inference. extra_headers: Send extra headers @@ -143,7 +139,9 @@ async def list( timeout: Override the client-level default timeout for this request, in seconds """ return await self._get( - "/v2/regions" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/regions", + "/v2/gen-ai/regions" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/regions", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -151,8 +149,8 @@ async def list( timeout=timeout, query=await async_maybe_transform( { - "page": page, - "per_page": per_page, + "serves_batch": serves_batch, + "serves_inference": serves_inference, }, region_list_params.RegionListParams, ), diff --git a/src/gradientai/resources/sizes.py b/src/gradientai/resources/sizes.py deleted file mode 100644 index a432920e..00000000 --- a/src/gradientai/resources/sizes.py +++ /dev/null @@ -1,199 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import httpx - -from ..types import size_list_params -from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from .._utils import maybe_transform, async_maybe_transform -from .._compat import cached_property -from .._resource import SyncAPIResource, AsyncAPIResource -from .._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from .._base_client import make_request_options -from ..types.size_list_response import SizeListResponse - -__all__ = ["SizesResource", "AsyncSizesResource"] - - -class SizesResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> SizesResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return SizesResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> SizesResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return SizesResourceWithStreamingResponse(self) - - def list( - self, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> SizeListResponse: - """To list all of available Droplet sizes, send a GET request to `/v2/sizes`. - - The - response will be a JSON object with a key called `sizes`. The value of this will - be an array of `size` objects each of which contain the standard size - attributes. - - Args: - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/v2/sizes" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/sizes", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - }, - size_list_params.SizeListParams, - ), - ), - cast_to=SizeListResponse, - ) - - -class AsyncSizesResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncSizesResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return AsyncSizesResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncSizesResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return AsyncSizesResourceWithStreamingResponse(self) - - async def list( - self, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> SizeListResponse: - """To list all of available Droplet sizes, send a GET request to `/v2/sizes`. - - The - response will be a JSON object with a key called `sizes`. The value of this will - be an array of `size` objects each of which contain the standard size - attributes. - - Args: - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/v2/sizes" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/sizes", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - }, - size_list_params.SizeListParams, - ), - ), - cast_to=SizeListResponse, - ) - - -class SizesResourceWithRawResponse: - def __init__(self, sizes: SizesResource) -> None: - self._sizes = sizes - - self.list = to_raw_response_wrapper( - sizes.list, - ) - - -class AsyncSizesResourceWithRawResponse: - def __init__(self, sizes: AsyncSizesResource) -> None: - self._sizes = sizes - - self.list = async_to_raw_response_wrapper( - sizes.list, - ) - - -class SizesResourceWithStreamingResponse: - def __init__(self, sizes: SizesResource) -> None: - self._sizes = sizes - - self.list = to_streamed_response_wrapper( - sizes.list, - ) - - -class AsyncSizesResourceWithStreamingResponse: - def __init__(self, sizes: AsyncSizesResource) -> None: - self._sizes = sizes - - self.list = async_to_streamed_response_wrapper( - sizes.list, - ) diff --git a/src/gradientai/resources/snapshots.py b/src/gradientai/resources/snapshots.py deleted file mode 100644 index 2c4d0060..00000000 --- a/src/gradientai/resources/snapshots.py +++ /dev/null @@ -1,425 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Union -from typing_extensions import Literal - -import httpx - -from ..types import snapshot_list_params -from .._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven -from .._utils import maybe_transform, async_maybe_transform -from .._compat import cached_property -from .._resource import SyncAPIResource, AsyncAPIResource -from .._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from .._base_client import make_request_options -from ..types.snapshot_list_response import SnapshotListResponse -from ..types.snapshot_retrieve_response import SnapshotRetrieveResponse - -__all__ = ["SnapshotsResource", "AsyncSnapshotsResource"] - - -class SnapshotsResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> SnapshotsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return SnapshotsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> SnapshotsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return SnapshotsResourceWithStreamingResponse(self) - - def retrieve( - self, - snapshot_id: Union[int, str], - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> SnapshotRetrieveResponse: - """ - To retrieve information about a snapshot, send a GET request to - `/v2/snapshots/$SNAPSHOT_ID`. - - The response will be a JSON object with a key called `snapshot`. The value of - this will be an snapshot object containing the standard snapshot attributes. - - Args: - snapshot_id: The ID of a Droplet snapshot. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - f"/v2/snapshots/{snapshot_id}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/snapshots/{snapshot_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=SnapshotRetrieveResponse, - ) - - def list( - self, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - resource_type: Literal["droplet", "volume"] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> SnapshotListResponse: - """ - To list all of the snapshots available on your account, send a GET request to - `/v2/snapshots`. - - The response will be a JSON object with a key called `snapshots`. This will be - set to an array of `snapshot` objects, each of which will contain the standard - snapshot attributes. - - ### Filtering Results by Resource Type - - It's possible to request filtered results by including certain query parameters. - - #### List Droplet Snapshots - - To retrieve only snapshots based on Droplets, include the `resource_type` query - parameter set to `droplet`. For example, `/v2/snapshots?resource_type=droplet`. - - #### List Volume Snapshots - - To retrieve only snapshots based on volumes, include the `resource_type` query - parameter set to `volume`. For example, `/v2/snapshots?resource_type=volume`. - - Args: - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - resource_type: Used to filter snapshots by a resource type. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/v2/snapshots" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/snapshots", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - "resource_type": resource_type, - }, - snapshot_list_params.SnapshotListParams, - ), - ), - cast_to=SnapshotListResponse, - ) - - def delete( - self, - snapshot_id: Union[int, str], - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - Both Droplet and volume snapshots are managed through the `/v2/snapshots/` - endpoint. To delete a snapshot, send a DELETE request to - `/v2/snapshots/$SNAPSHOT_ID`. - - A status of 204 will be given. This indicates that the request was processed - successfully, but that no response body is needed. - - Args: - snapshot_id: The ID of a Droplet snapshot. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return self._delete( - f"/v2/snapshots/{snapshot_id}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/snapshots/{snapshot_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - -class AsyncSnapshotsResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncSnapshotsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return AsyncSnapshotsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncSnapshotsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return AsyncSnapshotsResourceWithStreamingResponse(self) - - async def retrieve( - self, - snapshot_id: Union[int, str], - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> SnapshotRetrieveResponse: - """ - To retrieve information about a snapshot, send a GET request to - `/v2/snapshots/$SNAPSHOT_ID`. - - The response will be a JSON object with a key called `snapshot`. The value of - this will be an snapshot object containing the standard snapshot attributes. - - Args: - snapshot_id: The ID of a Droplet snapshot. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - f"/v2/snapshots/{snapshot_id}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/snapshots/{snapshot_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=SnapshotRetrieveResponse, - ) - - async def list( - self, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - resource_type: Literal["droplet", "volume"] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> SnapshotListResponse: - """ - To list all of the snapshots available on your account, send a GET request to - `/v2/snapshots`. - - The response will be a JSON object with a key called `snapshots`. This will be - set to an array of `snapshot` objects, each of which will contain the standard - snapshot attributes. - - ### Filtering Results by Resource Type - - It's possible to request filtered results by including certain query parameters. - - #### List Droplet Snapshots - - To retrieve only snapshots based on Droplets, include the `resource_type` query - parameter set to `droplet`. For example, `/v2/snapshots?resource_type=droplet`. - - #### List Volume Snapshots - - To retrieve only snapshots based on volumes, include the `resource_type` query - parameter set to `volume`. For example, `/v2/snapshots?resource_type=volume`. - - Args: - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - resource_type: Used to filter snapshots by a resource type. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/v2/snapshots" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/snapshots", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - "resource_type": resource_type, - }, - snapshot_list_params.SnapshotListParams, - ), - ), - cast_to=SnapshotListResponse, - ) - - async def delete( - self, - snapshot_id: Union[int, str], - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - Both Droplet and volume snapshots are managed through the `/v2/snapshots/` - endpoint. To delete a snapshot, send a DELETE request to - `/v2/snapshots/$SNAPSHOT_ID`. - - A status of 204 will be given. This indicates that the request was processed - successfully, but that no response body is needed. - - Args: - snapshot_id: The ID of a Droplet snapshot. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return await self._delete( - f"/v2/snapshots/{snapshot_id}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/snapshots/{snapshot_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - -class SnapshotsResourceWithRawResponse: - def __init__(self, snapshots: SnapshotsResource) -> None: - self._snapshots = snapshots - - self.retrieve = to_raw_response_wrapper( - snapshots.retrieve, - ) - self.list = to_raw_response_wrapper( - snapshots.list, - ) - self.delete = to_raw_response_wrapper( - snapshots.delete, - ) - - -class AsyncSnapshotsResourceWithRawResponse: - def __init__(self, snapshots: AsyncSnapshotsResource) -> None: - self._snapshots = snapshots - - self.retrieve = async_to_raw_response_wrapper( - snapshots.retrieve, - ) - self.list = async_to_raw_response_wrapper( - snapshots.list, - ) - self.delete = async_to_raw_response_wrapper( - snapshots.delete, - ) - - -class SnapshotsResourceWithStreamingResponse: - def __init__(self, snapshots: SnapshotsResource) -> None: - self._snapshots = snapshots - - self.retrieve = to_streamed_response_wrapper( - snapshots.retrieve, - ) - self.list = to_streamed_response_wrapper( - snapshots.list, - ) - self.delete = to_streamed_response_wrapper( - snapshots.delete, - ) - - -class AsyncSnapshotsResourceWithStreamingResponse: - def __init__(self, snapshots: AsyncSnapshotsResource) -> None: - self._snapshots = snapshots - - self.retrieve = async_to_streamed_response_wrapper( - snapshots.retrieve, - ) - self.list = async_to_streamed_response_wrapper( - snapshots.list, - ) - self.delete = async_to_streamed_response_wrapper( - snapshots.delete, - ) diff --git a/src/gradientai/resources/volumes/__init__.py b/src/gradientai/resources/volumes/__init__.py deleted file mode 100644 index 167db0b3..00000000 --- a/src/gradientai/resources/volumes/__init__.py +++ /dev/null @@ -1,47 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .actions import ( - ActionsResource, - AsyncActionsResource, - ActionsResourceWithRawResponse, - AsyncActionsResourceWithRawResponse, - ActionsResourceWithStreamingResponse, - AsyncActionsResourceWithStreamingResponse, -) -from .volumes import ( - VolumesResource, - AsyncVolumesResource, - VolumesResourceWithRawResponse, - AsyncVolumesResourceWithRawResponse, - VolumesResourceWithStreamingResponse, - AsyncVolumesResourceWithStreamingResponse, -) -from .snapshots import ( - SnapshotsResource, - AsyncSnapshotsResource, - SnapshotsResourceWithRawResponse, - AsyncSnapshotsResourceWithRawResponse, - SnapshotsResourceWithStreamingResponse, - AsyncSnapshotsResourceWithStreamingResponse, -) - -__all__ = [ - "ActionsResource", - "AsyncActionsResource", - "ActionsResourceWithRawResponse", - "AsyncActionsResourceWithRawResponse", - "ActionsResourceWithStreamingResponse", - "AsyncActionsResourceWithStreamingResponse", - "SnapshotsResource", - "AsyncSnapshotsResource", - "SnapshotsResourceWithRawResponse", - "AsyncSnapshotsResourceWithRawResponse", - "SnapshotsResourceWithStreamingResponse", - "AsyncSnapshotsResourceWithStreamingResponse", - "VolumesResource", - "AsyncVolumesResource", - "VolumesResourceWithRawResponse", - "AsyncVolumesResourceWithRawResponse", - "VolumesResourceWithStreamingResponse", - "AsyncVolumesResourceWithStreamingResponse", -] diff --git a/src/gradientai/resources/volumes/actions.py b/src/gradientai/resources/volumes/actions.py deleted file mode 100644 index 08b56e53..00000000 --- a/src/gradientai/resources/volumes/actions.py +++ /dev/null @@ -1,1554 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List, Optional -from typing_extensions import Literal, overload - -import httpx - -from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._utils import required_args, maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ..._base_client import make_request_options -from ...types.volumes import ( - action_list_params, - action_retrieve_params, - action_initiate_by_id_params, - action_initiate_by_name_params, -) -from ...types.volumes.action_list_response import ActionListResponse -from ...types.volumes.action_retrieve_response import ActionRetrieveResponse -from ...types.volumes.action_initiate_by_id_response import ActionInitiateByIDResponse -from ...types.volumes.action_initiate_by_name_response import ActionInitiateByNameResponse - -__all__ = ["ActionsResource", "AsyncActionsResource"] - - -class ActionsResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> ActionsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return ActionsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> ActionsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return ActionsResourceWithStreamingResponse(self) - - def retrieve( - self, - action_id: int, - *, - volume_id: str, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionRetrieveResponse: - """ - To retrieve the status of a volume action, send a GET request to - `/v2/volumes/$VOLUME_ID/actions/$ACTION_ID`. - - Args: - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not volume_id: - raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}") - return self._get( - f"/v2/volumes/{volume_id}/actions/{action_id}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/volumes/{volume_id}/actions/{action_id}", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - }, - action_retrieve_params.ActionRetrieveParams, - ), - ), - cast_to=ActionRetrieveResponse, - ) - - def list( - self, - volume_id: str, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionListResponse: - """ - To retrieve all actions that have been executed on a volume, send a GET request - to `/v2/volumes/$VOLUME_ID/actions`. - - Args: - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not volume_id: - raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}") - return self._get( - f"/v2/volumes/{volume_id}/actions" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/volumes/{volume_id}/actions", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - }, - action_list_params.ActionListParams, - ), - ), - cast_to=ActionListResponse, - ) - - @overload - def initiate_by_id( - self, - volume_id: str, - *, - droplet_id: int, - type: Literal["attach", "detach", "resize"], - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ] - | NotGiven = NOT_GIVEN, - tags: Optional[List[str]] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionInitiateByIDResponse: - """ - To initiate an action on a block storage volume by Id, send a POST request to - `~/v2/volumes/$VOLUME_ID/actions`. The body should contain the appropriate - attributes for the respective action. - - ## Attach a Block Storage Volume to a Droplet - - | Attribute | Details | - | ---------- | ------------------------------------------------------------------- | - | type | This must be `attach` | - | droplet_id | Set to the Droplet's ID | - | region | Set to the slug representing the region where the volume is located | - - Each volume may only be attached to a single Droplet. However, up to fifteen - volumes may be attached to a Droplet at a time. Pre-formatted volumes will be - automatically mounted to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS - Droplets created on or after April 26, 2018 when attached. On older Droplets, - [additional configuration](https://docs.digitalocean.com/products/volumes/how-to/mount/) - is required. - - ## Remove a Block Storage Volume from a Droplet - - | Attribute | Details | - | ---------- | ------------------------------------------------------------------- | - | type | This must be `detach` | - | droplet_id | Set to the Droplet's ID | - | region | Set to the slug representing the region where the volume is located | - - ## Resize a Volume - - | Attribute | Details | - | -------------- | ------------------------------------------------------------------- | - | type | This must be `resize` | - | size_gigabytes | The new size of the block storage volume in GiB (1024^3) | - | region | Set to the slug representing the region where the volume is located | - - Volumes may only be resized upwards. The maximum size for a volume is 16TiB. - - Args: - droplet_id: The unique identifier for the Droplet the volume will be attached or detached - from. - - type: The volume action to initiate. - - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - region: The slug identifier for the region where the resource will initially be - available. - - tags: A flat array of tag names as strings to be applied to the resource. Tag names - may be for either existing or new tags. - - Requires `tag:create` scope. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @overload - def initiate_by_id( - self, - volume_id: str, - *, - droplet_id: int, - type: Literal["attach", "detach", "resize"], - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ] - | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionInitiateByIDResponse: - """ - To initiate an action on a block storage volume by Id, send a POST request to - `~/v2/volumes/$VOLUME_ID/actions`. The body should contain the appropriate - attributes for the respective action. - - ## Attach a Block Storage Volume to a Droplet - - | Attribute | Details | - | ---------- | ------------------------------------------------------------------- | - | type | This must be `attach` | - | droplet_id | Set to the Droplet's ID | - | region | Set to the slug representing the region where the volume is located | - - Each volume may only be attached to a single Droplet. However, up to fifteen - volumes may be attached to a Droplet at a time. Pre-formatted volumes will be - automatically mounted to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS - Droplets created on or after April 26, 2018 when attached. On older Droplets, - [additional configuration](https://docs.digitalocean.com/products/volumes/how-to/mount/) - is required. - - ## Remove a Block Storage Volume from a Droplet - - | Attribute | Details | - | ---------- | ------------------------------------------------------------------- | - | type | This must be `detach` | - | droplet_id | Set to the Droplet's ID | - | region | Set to the slug representing the region where the volume is located | - - ## Resize a Volume - - | Attribute | Details | - | -------------- | ------------------------------------------------------------------- | - | type | This must be `resize` | - | size_gigabytes | The new size of the block storage volume in GiB (1024^3) | - | region | Set to the slug representing the region where the volume is located | - - Volumes may only be resized upwards. The maximum size for a volume is 16TiB. - - Args: - droplet_id: The unique identifier for the Droplet the volume will be attached or detached - from. - - type: The volume action to initiate. - - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - region: The slug identifier for the region where the resource will initially be - available. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @overload - def initiate_by_id( - self, - volume_id: str, - *, - size_gigabytes: int, - type: Literal["attach", "detach", "resize"], - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ] - | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionInitiateByIDResponse: - """ - To initiate an action on a block storage volume by Id, send a POST request to - `~/v2/volumes/$VOLUME_ID/actions`. The body should contain the appropriate - attributes for the respective action. - - ## Attach a Block Storage Volume to a Droplet - - | Attribute | Details | - | ---------- | ------------------------------------------------------------------- | - | type | This must be `attach` | - | droplet_id | Set to the Droplet's ID | - | region | Set to the slug representing the region where the volume is located | - - Each volume may only be attached to a single Droplet. However, up to fifteen - volumes may be attached to a Droplet at a time. Pre-formatted volumes will be - automatically mounted to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS - Droplets created on or after April 26, 2018 when attached. On older Droplets, - [additional configuration](https://docs.digitalocean.com/products/volumes/how-to/mount/) - is required. - - ## Remove a Block Storage Volume from a Droplet - - | Attribute | Details | - | ---------- | ------------------------------------------------------------------- | - | type | This must be `detach` | - | droplet_id | Set to the Droplet's ID | - | region | Set to the slug representing the region where the volume is located | - - ## Resize a Volume - - | Attribute | Details | - | -------------- | ------------------------------------------------------------------- | - | type | This must be `resize` | - | size_gigabytes | The new size of the block storage volume in GiB (1024^3) | - | region | Set to the slug representing the region where the volume is located | - - Volumes may only be resized upwards. The maximum size for a volume is 16TiB. - - Args: - size_gigabytes: The new size of the block storage volume in GiB (1024^3). - - type: The volume action to initiate. - - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - region: The slug identifier for the region where the resource will initially be - available. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @required_args(["droplet_id", "type"], ["size_gigabytes", "type"]) - def initiate_by_id( - self, - volume_id: str, - *, - droplet_id: int | NotGiven = NOT_GIVEN, - type: Literal["attach", "detach", "resize"], - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ] - | NotGiven = NOT_GIVEN, - tags: Optional[List[str]] | NotGiven = NOT_GIVEN, - size_gigabytes: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionInitiateByIDResponse: - if not volume_id: - raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}") - return self._post( - f"/v2/volumes/{volume_id}/actions" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/volumes/{volume_id}/actions", - body=maybe_transform( - { - "droplet_id": droplet_id, - "type": type, - "region": region, - "tags": tags, - "size_gigabytes": size_gigabytes, - }, - action_initiate_by_id_params.ActionInitiateByIDParams, - ), - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - }, - action_initiate_by_id_params.ActionInitiateByIDParams, - ), - ), - cast_to=ActionInitiateByIDResponse, - ) - - @overload - def initiate_by_name( - self, - *, - droplet_id: int, - type: Literal["attach", "detach", "resize"], - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ] - | NotGiven = NOT_GIVEN, - tags: Optional[List[str]] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionInitiateByNameResponse: - """ - To initiate an action on a block storage volume by Name, send a POST request to - `~/v2/volumes/actions`. The body should contain the appropriate attributes for - the respective action. - - ## Attach a Block Storage Volume to a Droplet - - | Attribute | Details | - | ----------- | ------------------------------------------------------------------- | - | type | This must be `attach` | - | volume_name | The name of the block storage volume | - | droplet_id | Set to the Droplet's ID | - | region | Set to the slug representing the region where the volume is located | - - Each volume may only be attached to a single Droplet. However, up to fifteen - volumes may be attached to a Droplet at a time. Pre-formatted volumes will be - automatically mounted to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS - Droplets created on or after April 26, 2018 when attached. On older Droplets, - [additional configuration](https://docs.digitalocean.com/products/volumes/how-to/mount/) - is required. - - ## Remove a Block Storage Volume from a Droplet - - | Attribute | Details | - | ----------- | ------------------------------------------------------------------- | - | type | This must be `detach` | - | volume_name | The name of the block storage volume | - | droplet_id | Set to the Droplet's ID | - | region | Set to the slug representing the region where the volume is located | - - Args: - droplet_id: The unique identifier for the Droplet the volume will be attached or detached - from. - - type: The volume action to initiate. - - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - region: The slug identifier for the region where the resource will initially be - available. - - tags: A flat array of tag names as strings to be applied to the resource. Tag names - may be for either existing or new tags. - - Requires `tag:create` scope. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @overload - def initiate_by_name( - self, - *, - droplet_id: int, - type: Literal["attach", "detach", "resize"], - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ] - | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionInitiateByNameResponse: - """ - To initiate an action on a block storage volume by Name, send a POST request to - `~/v2/volumes/actions`. The body should contain the appropriate attributes for - the respective action. - - ## Attach a Block Storage Volume to a Droplet - - | Attribute | Details | - | ----------- | ------------------------------------------------------------------- | - | type | This must be `attach` | - | volume_name | The name of the block storage volume | - | droplet_id | Set to the Droplet's ID | - | region | Set to the slug representing the region where the volume is located | - - Each volume may only be attached to a single Droplet. However, up to fifteen - volumes may be attached to a Droplet at a time. Pre-formatted volumes will be - automatically mounted to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS - Droplets created on or after April 26, 2018 when attached. On older Droplets, - [additional configuration](https://docs.digitalocean.com/products/volumes/how-to/mount/) - is required. - - ## Remove a Block Storage Volume from a Droplet - - | Attribute | Details | - | ----------- | ------------------------------------------------------------------- | - | type | This must be `detach` | - | volume_name | The name of the block storage volume | - | droplet_id | Set to the Droplet's ID | - | region | Set to the slug representing the region where the volume is located | - - Args: - droplet_id: The unique identifier for the Droplet the volume will be attached or detached - from. - - type: The volume action to initiate. - - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - region: The slug identifier for the region where the resource will initially be - available. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @required_args(["droplet_id", "type"]) - def initiate_by_name( - self, - *, - droplet_id: int, - type: Literal["attach", "detach", "resize"], - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ] - | NotGiven = NOT_GIVEN, - tags: Optional[List[str]] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionInitiateByNameResponse: - return self._post( - "/v2/volumes/actions" - if self._client._base_url_overridden - else "https://api.digitalocean.com/v2/volumes/actions", - body=maybe_transform( - { - "droplet_id": droplet_id, - "type": type, - "region": region, - "tags": tags, - }, - action_initiate_by_name_params.ActionInitiateByNameParams, - ), - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - }, - action_initiate_by_name_params.ActionInitiateByNameParams, - ), - ), - cast_to=ActionInitiateByNameResponse, - ) - - -class AsyncActionsResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncActionsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return AsyncActionsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncActionsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return AsyncActionsResourceWithStreamingResponse(self) - - async def retrieve( - self, - action_id: int, - *, - volume_id: str, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionRetrieveResponse: - """ - To retrieve the status of a volume action, send a GET request to - `/v2/volumes/$VOLUME_ID/actions/$ACTION_ID`. - - Args: - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not volume_id: - raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}") - return await self._get( - f"/v2/volumes/{volume_id}/actions/{action_id}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/volumes/{volume_id}/actions/{action_id}", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - }, - action_retrieve_params.ActionRetrieveParams, - ), - ), - cast_to=ActionRetrieveResponse, - ) - - async def list( - self, - volume_id: str, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionListResponse: - """ - To retrieve all actions that have been executed on a volume, send a GET request - to `/v2/volumes/$VOLUME_ID/actions`. - - Args: - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not volume_id: - raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}") - return await self._get( - f"/v2/volumes/{volume_id}/actions" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/volumes/{volume_id}/actions", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - }, - action_list_params.ActionListParams, - ), - ), - cast_to=ActionListResponse, - ) - - @overload - async def initiate_by_id( - self, - volume_id: str, - *, - droplet_id: int, - type: Literal["attach", "detach", "resize"], - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ] - | NotGiven = NOT_GIVEN, - tags: Optional[List[str]] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionInitiateByIDResponse: - """ - To initiate an action on a block storage volume by Id, send a POST request to - `~/v2/volumes/$VOLUME_ID/actions`. The body should contain the appropriate - attributes for the respective action. - - ## Attach a Block Storage Volume to a Droplet - - | Attribute | Details | - | ---------- | ------------------------------------------------------------------- | - | type | This must be `attach` | - | droplet_id | Set to the Droplet's ID | - | region | Set to the slug representing the region where the volume is located | - - Each volume may only be attached to a single Droplet. However, up to fifteen - volumes may be attached to a Droplet at a time. Pre-formatted volumes will be - automatically mounted to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS - Droplets created on or after April 26, 2018 when attached. On older Droplets, - [additional configuration](https://docs.digitalocean.com/products/volumes/how-to/mount/) - is required. - - ## Remove a Block Storage Volume from a Droplet - - | Attribute | Details | - | ---------- | ------------------------------------------------------------------- | - | type | This must be `detach` | - | droplet_id | Set to the Droplet's ID | - | region | Set to the slug representing the region where the volume is located | - - ## Resize a Volume - - | Attribute | Details | - | -------------- | ------------------------------------------------------------------- | - | type | This must be `resize` | - | size_gigabytes | The new size of the block storage volume in GiB (1024^3) | - | region | Set to the slug representing the region where the volume is located | - - Volumes may only be resized upwards. The maximum size for a volume is 16TiB. - - Args: - droplet_id: The unique identifier for the Droplet the volume will be attached or detached - from. - - type: The volume action to initiate. - - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - region: The slug identifier for the region where the resource will initially be - available. - - tags: A flat array of tag names as strings to be applied to the resource. Tag names - may be for either existing or new tags. - - Requires `tag:create` scope. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @overload - async def initiate_by_id( - self, - volume_id: str, - *, - droplet_id: int, - type: Literal["attach", "detach", "resize"], - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ] - | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionInitiateByIDResponse: - """ - To initiate an action on a block storage volume by Id, send a POST request to - `~/v2/volumes/$VOLUME_ID/actions`. The body should contain the appropriate - attributes for the respective action. - - ## Attach a Block Storage Volume to a Droplet - - | Attribute | Details | - | ---------- | ------------------------------------------------------------------- | - | type | This must be `attach` | - | droplet_id | Set to the Droplet's ID | - | region | Set to the slug representing the region where the volume is located | - - Each volume may only be attached to a single Droplet. However, up to fifteen - volumes may be attached to a Droplet at a time. Pre-formatted volumes will be - automatically mounted to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS - Droplets created on or after April 26, 2018 when attached. On older Droplets, - [additional configuration](https://docs.digitalocean.com/products/volumes/how-to/mount/) - is required. - - ## Remove a Block Storage Volume from a Droplet - - | Attribute | Details | - | ---------- | ------------------------------------------------------------------- | - | type | This must be `detach` | - | droplet_id | Set to the Droplet's ID | - | region | Set to the slug representing the region where the volume is located | - - ## Resize a Volume - - | Attribute | Details | - | -------------- | ------------------------------------------------------------------- | - | type | This must be `resize` | - | size_gigabytes | The new size of the block storage volume in GiB (1024^3) | - | region | Set to the slug representing the region where the volume is located | - - Volumes may only be resized upwards. The maximum size for a volume is 16TiB. - - Args: - droplet_id: The unique identifier for the Droplet the volume will be attached or detached - from. - - type: The volume action to initiate. - - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - region: The slug identifier for the region where the resource will initially be - available. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @overload - async def initiate_by_id( - self, - volume_id: str, - *, - size_gigabytes: int, - type: Literal["attach", "detach", "resize"], - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ] - | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionInitiateByIDResponse: - """ - To initiate an action on a block storage volume by Id, send a POST request to - `~/v2/volumes/$VOLUME_ID/actions`. The body should contain the appropriate - attributes for the respective action. - - ## Attach a Block Storage Volume to a Droplet - - | Attribute | Details | - | ---------- | ------------------------------------------------------------------- | - | type | This must be `attach` | - | droplet_id | Set to the Droplet's ID | - | region | Set to the slug representing the region where the volume is located | - - Each volume may only be attached to a single Droplet. However, up to fifteen - volumes may be attached to a Droplet at a time. Pre-formatted volumes will be - automatically mounted to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS - Droplets created on or after April 26, 2018 when attached. On older Droplets, - [additional configuration](https://docs.digitalocean.com/products/volumes/how-to/mount/) - is required. - - ## Remove a Block Storage Volume from a Droplet - - | Attribute | Details | - | ---------- | ------------------------------------------------------------------- | - | type | This must be `detach` | - | droplet_id | Set to the Droplet's ID | - | region | Set to the slug representing the region where the volume is located | - - ## Resize a Volume - - | Attribute | Details | - | -------------- | ------------------------------------------------------------------- | - | type | This must be `resize` | - | size_gigabytes | The new size of the block storage volume in GiB (1024^3) | - | region | Set to the slug representing the region where the volume is located | - - Volumes may only be resized upwards. The maximum size for a volume is 16TiB. - - Args: - size_gigabytes: The new size of the block storage volume in GiB (1024^3). - - type: The volume action to initiate. - - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - region: The slug identifier for the region where the resource will initially be - available. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @required_args(["droplet_id", "type"], ["size_gigabytes", "type"]) - async def initiate_by_id( - self, - volume_id: str, - *, - droplet_id: int | NotGiven = NOT_GIVEN, - type: Literal["attach", "detach", "resize"], - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ] - | NotGiven = NOT_GIVEN, - tags: Optional[List[str]] | NotGiven = NOT_GIVEN, - size_gigabytes: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionInitiateByIDResponse: - if not volume_id: - raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}") - return await self._post( - f"/v2/volumes/{volume_id}/actions" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/volumes/{volume_id}/actions", - body=await async_maybe_transform( - { - "droplet_id": droplet_id, - "type": type, - "region": region, - "tags": tags, - "size_gigabytes": size_gigabytes, - }, - action_initiate_by_id_params.ActionInitiateByIDParams, - ), - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - }, - action_initiate_by_id_params.ActionInitiateByIDParams, - ), - ), - cast_to=ActionInitiateByIDResponse, - ) - - @overload - async def initiate_by_name( - self, - *, - droplet_id: int, - type: Literal["attach", "detach", "resize"], - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ] - | NotGiven = NOT_GIVEN, - tags: Optional[List[str]] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionInitiateByNameResponse: - """ - To initiate an action on a block storage volume by Name, send a POST request to - `~/v2/volumes/actions`. The body should contain the appropriate attributes for - the respective action. - - ## Attach a Block Storage Volume to a Droplet - - | Attribute | Details | - | ----------- | ------------------------------------------------------------------- | - | type | This must be `attach` | - | volume_name | The name of the block storage volume | - | droplet_id | Set to the Droplet's ID | - | region | Set to the slug representing the region where the volume is located | - - Each volume may only be attached to a single Droplet. However, up to fifteen - volumes may be attached to a Droplet at a time. Pre-formatted volumes will be - automatically mounted to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS - Droplets created on or after April 26, 2018 when attached. On older Droplets, - [additional configuration](https://docs.digitalocean.com/products/volumes/how-to/mount/) - is required. - - ## Remove a Block Storage Volume from a Droplet - - | Attribute | Details | - | ----------- | ------------------------------------------------------------------- | - | type | This must be `detach` | - | volume_name | The name of the block storage volume | - | droplet_id | Set to the Droplet's ID | - | region | Set to the slug representing the region where the volume is located | - - Args: - droplet_id: The unique identifier for the Droplet the volume will be attached or detached - from. - - type: The volume action to initiate. - - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - region: The slug identifier for the region where the resource will initially be - available. - - tags: A flat array of tag names as strings to be applied to the resource. Tag names - may be for either existing or new tags. - - Requires `tag:create` scope. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @overload - async def initiate_by_name( - self, - *, - droplet_id: int, - type: Literal["attach", "detach", "resize"], - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ] - | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionInitiateByNameResponse: - """ - To initiate an action on a block storage volume by Name, send a POST request to - `~/v2/volumes/actions`. The body should contain the appropriate attributes for - the respective action. - - ## Attach a Block Storage Volume to a Droplet - - | Attribute | Details | - | ----------- | ------------------------------------------------------------------- | - | type | This must be `attach` | - | volume_name | The name of the block storage volume | - | droplet_id | Set to the Droplet's ID | - | region | Set to the slug representing the region where the volume is located | - - Each volume may only be attached to a single Droplet. However, up to fifteen - volumes may be attached to a Droplet at a time. Pre-formatted volumes will be - automatically mounted to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS - Droplets created on or after April 26, 2018 when attached. On older Droplets, - [additional configuration](https://docs.digitalocean.com/products/volumes/how-to/mount/) - is required. - - ## Remove a Block Storage Volume from a Droplet - - | Attribute | Details | - | ----------- | ------------------------------------------------------------------- | - | type | This must be `detach` | - | volume_name | The name of the block storage volume | - | droplet_id | Set to the Droplet's ID | - | region | Set to the slug representing the region where the volume is located | - - Args: - droplet_id: The unique identifier for the Droplet the volume will be attached or detached - from. - - type: The volume action to initiate. - - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - region: The slug identifier for the region where the resource will initially be - available. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @required_args(["droplet_id", "type"]) - async def initiate_by_name( - self, - *, - droplet_id: int, - type: Literal["attach", "detach", "resize"], - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ] - | NotGiven = NOT_GIVEN, - tags: Optional[List[str]] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ActionInitiateByNameResponse: - return await self._post( - "/v2/volumes/actions" - if self._client._base_url_overridden - else "https://api.digitalocean.com/v2/volumes/actions", - body=await async_maybe_transform( - { - "droplet_id": droplet_id, - "type": type, - "region": region, - "tags": tags, - }, - action_initiate_by_name_params.ActionInitiateByNameParams, - ), - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - }, - action_initiate_by_name_params.ActionInitiateByNameParams, - ), - ), - cast_to=ActionInitiateByNameResponse, - ) - - -class ActionsResourceWithRawResponse: - def __init__(self, actions: ActionsResource) -> None: - self._actions = actions - - self.retrieve = to_raw_response_wrapper( - actions.retrieve, - ) - self.list = to_raw_response_wrapper( - actions.list, - ) - self.initiate_by_id = to_raw_response_wrapper( - actions.initiate_by_id, - ) - self.initiate_by_name = to_raw_response_wrapper( - actions.initiate_by_name, - ) - - -class AsyncActionsResourceWithRawResponse: - def __init__(self, actions: AsyncActionsResource) -> None: - self._actions = actions - - self.retrieve = async_to_raw_response_wrapper( - actions.retrieve, - ) - self.list = async_to_raw_response_wrapper( - actions.list, - ) - self.initiate_by_id = async_to_raw_response_wrapper( - actions.initiate_by_id, - ) - self.initiate_by_name = async_to_raw_response_wrapper( - actions.initiate_by_name, - ) - - -class ActionsResourceWithStreamingResponse: - def __init__(self, actions: ActionsResource) -> None: - self._actions = actions - - self.retrieve = to_streamed_response_wrapper( - actions.retrieve, - ) - self.list = to_streamed_response_wrapper( - actions.list, - ) - self.initiate_by_id = to_streamed_response_wrapper( - actions.initiate_by_id, - ) - self.initiate_by_name = to_streamed_response_wrapper( - actions.initiate_by_name, - ) - - -class AsyncActionsResourceWithStreamingResponse: - def __init__(self, actions: AsyncActionsResource) -> None: - self._actions = actions - - self.retrieve = async_to_streamed_response_wrapper( - actions.retrieve, - ) - self.list = async_to_streamed_response_wrapper( - actions.list, - ) - self.initiate_by_id = async_to_streamed_response_wrapper( - actions.initiate_by_id, - ) - self.initiate_by_name = async_to_streamed_response_wrapper( - actions.initiate_by_name, - ) diff --git a/src/gradientai/resources/volumes/snapshots.py b/src/gradientai/resources/volumes/snapshots.py deleted file mode 100644 index 9bb50070..00000000 --- a/src/gradientai/resources/volumes/snapshots.py +++ /dev/null @@ -1,499 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List, Optional - -import httpx - -from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven -from ..._utils import maybe_transform, async_maybe_transform -from ..._compat import cached_property -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ..._base_client import make_request_options -from ...types.volumes import snapshot_list_params, snapshot_create_params -from ...types.volumes.snapshot_list_response import SnapshotListResponse -from ...types.volumes.snapshot_create_response import SnapshotCreateResponse -from ...types.volumes.snapshot_retrieve_response import SnapshotRetrieveResponse - -__all__ = ["SnapshotsResource", "AsyncSnapshotsResource"] - - -class SnapshotsResource(SyncAPIResource): - @cached_property - def with_raw_response(self) -> SnapshotsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return SnapshotsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> SnapshotsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return SnapshotsResourceWithStreamingResponse(self) - - def create( - self, - volume_id: str, - *, - name: str, - tags: Optional[List[str]] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> SnapshotCreateResponse: - """ - To create a snapshot from a volume, sent a POST request to - `/v2/volumes/$VOLUME_ID/snapshots`. - - Args: - name: A human-readable name for the volume snapshot. - - tags: A flat array of tag names as strings to be applied to the resource. Tag names - may be for either existing or new tags. - - Requires `tag:create` scope. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not volume_id: - raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}") - return self._post( - f"/v2/volumes/{volume_id}/snapshots" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/volumes/{volume_id}/snapshots", - body=maybe_transform( - { - "name": name, - "tags": tags, - }, - snapshot_create_params.SnapshotCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=SnapshotCreateResponse, - ) - - def retrieve( - self, - snapshot_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> SnapshotRetrieveResponse: - """ - To retrieve the details of a snapshot that has been created from a volume, send - a GET request to `/v2/volumes/snapshots/$VOLUME_SNAPSHOT_ID`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not snapshot_id: - raise ValueError(f"Expected a non-empty value for `snapshot_id` but received {snapshot_id!r}") - return self._get( - f"/v2/volumes/snapshots/{snapshot_id}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/volumes/snapshots/{snapshot_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=SnapshotRetrieveResponse, - ) - - def list( - self, - volume_id: str, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> SnapshotListResponse: - """ - To retrieve the snapshots that have been created from a volume, send a GET - request to `/v2/volumes/$VOLUME_ID/snapshots`. - - Args: - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not volume_id: - raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}") - return self._get( - f"/v2/volumes/{volume_id}/snapshots" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/volumes/{volume_id}/snapshots", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - }, - snapshot_list_params.SnapshotListParams, - ), - ), - cast_to=SnapshotListResponse, - ) - - def delete( - self, - snapshot_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - To delete a volume snapshot, send a DELETE request to - `/v2/volumes/snapshots/$VOLUME_SNAPSHOT_ID`. - - A status of 204 will be given. This indicates that the request was processed - successfully, but that no response body is needed. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not snapshot_id: - raise ValueError(f"Expected a non-empty value for `snapshot_id` but received {snapshot_id!r}") - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return self._delete( - f"/v2/volumes/snapshots/{snapshot_id}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/volumes/snapshots/{snapshot_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - -class AsyncSnapshotsResource(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncSnapshotsResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return AsyncSnapshotsResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncSnapshotsResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return AsyncSnapshotsResourceWithStreamingResponse(self) - - async def create( - self, - volume_id: str, - *, - name: str, - tags: Optional[List[str]] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> SnapshotCreateResponse: - """ - To create a snapshot from a volume, sent a POST request to - `/v2/volumes/$VOLUME_ID/snapshots`. - - Args: - name: A human-readable name for the volume snapshot. - - tags: A flat array of tag names as strings to be applied to the resource. Tag names - may be for either existing or new tags. - - Requires `tag:create` scope. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not volume_id: - raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}") - return await self._post( - f"/v2/volumes/{volume_id}/snapshots" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/volumes/{volume_id}/snapshots", - body=await async_maybe_transform( - { - "name": name, - "tags": tags, - }, - snapshot_create_params.SnapshotCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=SnapshotCreateResponse, - ) - - async def retrieve( - self, - snapshot_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> SnapshotRetrieveResponse: - """ - To retrieve the details of a snapshot that has been created from a volume, send - a GET request to `/v2/volumes/snapshots/$VOLUME_SNAPSHOT_ID`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not snapshot_id: - raise ValueError(f"Expected a non-empty value for `snapshot_id` but received {snapshot_id!r}") - return await self._get( - f"/v2/volumes/snapshots/{snapshot_id}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/volumes/snapshots/{snapshot_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=SnapshotRetrieveResponse, - ) - - async def list( - self, - volume_id: str, - *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> SnapshotListResponse: - """ - To retrieve the snapshots that have been created from a volume, send a GET - request to `/v2/volumes/$VOLUME_ID/snapshots`. - - Args: - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not volume_id: - raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}") - return await self._get( - f"/v2/volumes/{volume_id}/snapshots" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/volumes/{volume_id}/snapshots", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - }, - snapshot_list_params.SnapshotListParams, - ), - ), - cast_to=SnapshotListResponse, - ) - - async def delete( - self, - snapshot_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - To delete a volume snapshot, send a DELETE request to - `/v2/volumes/snapshots/$VOLUME_SNAPSHOT_ID`. - - A status of 204 will be given. This indicates that the request was processed - successfully, but that no response body is needed. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not snapshot_id: - raise ValueError(f"Expected a non-empty value for `snapshot_id` but received {snapshot_id!r}") - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return await self._delete( - f"/v2/volumes/snapshots/{snapshot_id}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/volumes/snapshots/{snapshot_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - -class SnapshotsResourceWithRawResponse: - def __init__(self, snapshots: SnapshotsResource) -> None: - self._snapshots = snapshots - - self.create = to_raw_response_wrapper( - snapshots.create, - ) - self.retrieve = to_raw_response_wrapper( - snapshots.retrieve, - ) - self.list = to_raw_response_wrapper( - snapshots.list, - ) - self.delete = to_raw_response_wrapper( - snapshots.delete, - ) - - -class AsyncSnapshotsResourceWithRawResponse: - def __init__(self, snapshots: AsyncSnapshotsResource) -> None: - self._snapshots = snapshots - - self.create = async_to_raw_response_wrapper( - snapshots.create, - ) - self.retrieve = async_to_raw_response_wrapper( - snapshots.retrieve, - ) - self.list = async_to_raw_response_wrapper( - snapshots.list, - ) - self.delete = async_to_raw_response_wrapper( - snapshots.delete, - ) - - -class SnapshotsResourceWithStreamingResponse: - def __init__(self, snapshots: SnapshotsResource) -> None: - self._snapshots = snapshots - - self.create = to_streamed_response_wrapper( - snapshots.create, - ) - self.retrieve = to_streamed_response_wrapper( - snapshots.retrieve, - ) - self.list = to_streamed_response_wrapper( - snapshots.list, - ) - self.delete = to_streamed_response_wrapper( - snapshots.delete, - ) - - -class AsyncSnapshotsResourceWithStreamingResponse: - def __init__(self, snapshots: AsyncSnapshotsResource) -> None: - self._snapshots = snapshots - - self.create = async_to_streamed_response_wrapper( - snapshots.create, - ) - self.retrieve = async_to_streamed_response_wrapper( - snapshots.retrieve, - ) - self.list = async_to_streamed_response_wrapper( - snapshots.list, - ) - self.delete = async_to_streamed_response_wrapper( - snapshots.delete, - ) diff --git a/src/gradientai/resources/volumes/volumes.py b/src/gradientai/resources/volumes/volumes.py deleted file mode 100644 index 04df1bce..00000000 --- a/src/gradientai/resources/volumes/volumes.py +++ /dev/null @@ -1,1144 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List, Optional -from typing_extensions import Literal, overload - -import httpx - -from ...types import volume_list_params, volume_create_params, volume_delete_by_name_params -from .actions import ( - ActionsResource, - AsyncActionsResource, - ActionsResourceWithRawResponse, - AsyncActionsResourceWithRawResponse, - ActionsResourceWithStreamingResponse, - AsyncActionsResourceWithStreamingResponse, -) -from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven -from ..._utils import required_args, maybe_transform, async_maybe_transform -from ..._compat import cached_property -from .snapshots import ( - SnapshotsResource, - AsyncSnapshotsResource, - SnapshotsResourceWithRawResponse, - AsyncSnapshotsResourceWithRawResponse, - SnapshotsResourceWithStreamingResponse, - AsyncSnapshotsResourceWithStreamingResponse, -) -from ..._resource import SyncAPIResource, AsyncAPIResource -from ..._response import ( - to_raw_response_wrapper, - to_streamed_response_wrapper, - async_to_raw_response_wrapper, - async_to_streamed_response_wrapper, -) -from ..._base_client import make_request_options -from ...types.volume_list_response import VolumeListResponse -from ...types.volume_create_response import VolumeCreateResponse -from ...types.volume_retrieve_response import VolumeRetrieveResponse - -__all__ = ["VolumesResource", "AsyncVolumesResource"] - - -class VolumesResource(SyncAPIResource): - @cached_property - def actions(self) -> ActionsResource: - return ActionsResource(self._client) - - @cached_property - def snapshots(self) -> SnapshotsResource: - return SnapshotsResource(self._client) - - @cached_property - def with_raw_response(self) -> VolumesResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return VolumesResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> VolumesResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return VolumesResourceWithStreamingResponse(self) - - @overload - def create( - self, - *, - name: str, - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ], - size_gigabytes: int, - description: str | NotGiven = NOT_GIVEN, - filesystem_label: str | NotGiven = NOT_GIVEN, - filesystem_type: str | NotGiven = NOT_GIVEN, - snapshot_id: str | NotGiven = NOT_GIVEN, - tags: Optional[List[str]] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> VolumeCreateResponse: - """To create a new volume, send a POST request to `/v2/volumes`. - - Optionally, a - `filesystem_type` attribute may be provided in order to automatically format the - volume's filesystem. Pre-formatted volumes are automatically mounted when - attached to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS Droplets created - on or after April 26, 2018. Attaching pre-formatted volumes to Droplets without - support for auto-mounting is not recommended. - - Args: - name: A human-readable name for the block storage volume. Must be lowercase and be - composed only of numbers, letters and "-", up to a limit of 64 characters. The - name must begin with a letter. - - region: The slug identifier for the region where the resource will initially be - available. - - size_gigabytes: The size of the block storage volume in GiB (1024^3). This field does not apply - when creating a volume from a snapshot. - - description: An optional free-form text field to describe a block storage volume. - - filesystem_label: The label applied to the filesystem. Labels for ext4 type filesystems may - contain 16 characters while labels for xfs type filesystems are limited to 12 - characters. May only be used in conjunction with filesystem_type. - - filesystem_type: The name of the filesystem type to be used on the volume. When provided, the - volume will automatically be formatted to the specified filesystem type. - Currently, the available options are `ext4` and `xfs`. Pre-formatted volumes are - automatically mounted when attached to Ubuntu, Debian, Fedora, Fedora Atomic, - and CentOS Droplets created on or after April 26, 2018. Attaching pre-formatted - volumes to other Droplets is not recommended. - - snapshot_id: The unique identifier for the volume snapshot from which to create the volume. - - tags: A flat array of tag names as strings to be applied to the resource. Tag names - may be for either existing or new tags. - - Requires `tag:create` scope. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @overload - def create( - self, - *, - name: str, - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ], - size_gigabytes: int, - description: str | NotGiven = NOT_GIVEN, - filesystem_label: str | NotGiven = NOT_GIVEN, - filesystem_type: str | NotGiven = NOT_GIVEN, - snapshot_id: str | NotGiven = NOT_GIVEN, - tags: Optional[List[str]] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> VolumeCreateResponse: - """To create a new volume, send a POST request to `/v2/volumes`. - - Optionally, a - `filesystem_type` attribute may be provided in order to automatically format the - volume's filesystem. Pre-formatted volumes are automatically mounted when - attached to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS Droplets created - on or after April 26, 2018. Attaching pre-formatted volumes to Droplets without - support for auto-mounting is not recommended. - - Args: - name: A human-readable name for the block storage volume. Must be lowercase and be - composed only of numbers, letters and "-", up to a limit of 64 characters. The - name must begin with a letter. - - region: The slug identifier for the region where the resource will initially be - available. - - size_gigabytes: The size of the block storage volume in GiB (1024^3). This field does not apply - when creating a volume from a snapshot. - - description: An optional free-form text field to describe a block storage volume. - - filesystem_label: The label applied to the filesystem. Labels for ext4 type filesystems may - contain 16 characters while labels for xfs type filesystems are limited to 12 - characters. May only be used in conjunction with filesystem_type. - - filesystem_type: The name of the filesystem type to be used on the volume. When provided, the - volume will automatically be formatted to the specified filesystem type. - Currently, the available options are `ext4` and `xfs`. Pre-formatted volumes are - automatically mounted when attached to Ubuntu, Debian, Fedora, Fedora Atomic, - and CentOS Droplets created on or after April 26, 2018. Attaching pre-formatted - volumes to other Droplets is not recommended. - - snapshot_id: The unique identifier for the volume snapshot from which to create the volume. - - tags: A flat array of tag names as strings to be applied to the resource. Tag names - may be for either existing or new tags. - - Requires `tag:create` scope. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @required_args(["name", "region", "size_gigabytes"]) - def create( - self, - *, - name: str, - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ], - size_gigabytes: int, - description: str | NotGiven = NOT_GIVEN, - filesystem_label: str | NotGiven = NOT_GIVEN, - filesystem_type: str | NotGiven = NOT_GIVEN, - snapshot_id: str | NotGiven = NOT_GIVEN, - tags: Optional[List[str]] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> VolumeCreateResponse: - return self._post( - "/v2/volumes" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/volumes", - body=maybe_transform( - { - "name": name, - "region": region, - "size_gigabytes": size_gigabytes, - "description": description, - "filesystem_label": filesystem_label, - "filesystem_type": filesystem_type, - "snapshot_id": snapshot_id, - "tags": tags, - }, - volume_create_params.VolumeCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=VolumeCreateResponse, - ) - - def retrieve( - self, - volume_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> VolumeRetrieveResponse: - """ - To show information about a block storage volume, send a GET request to - `/v2/volumes/$VOLUME_ID`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not volume_id: - raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}") - return self._get( - f"/v2/volumes/{volume_id}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/volumes/{volume_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=VolumeRetrieveResponse, - ) - - def list( - self, - *, - name: str | NotGiven = NOT_GIVEN, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ] - | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> VolumeListResponse: - """ - To list all of the block storage volumes available on your account, send a GET - request to `/v2/volumes`. - - ## Filtering Results - - ### By Region - - The `region` may be provided as query parameter in order to restrict results to - volumes available in a specific region. For example: `/v2/volumes?region=nyc1` - - ### By Name - - It is also possible to list volumes on your account that match a specified name. - To do so, send a GET request with the volume's name as a query parameter to - `/v2/volumes?name=$VOLUME_NAME`. **Note:** You can only create one volume per - region with the same name. - - ### By Name and Region - - It is also possible to retrieve information about a block storage volume by - name. To do so, send a GET request with the volume's name and the region slug - for the region it is located in as query parameters to - `/v2/volumes?name=$VOLUME_NAME®ion=nyc1`. - - Args: - name: The block storage volume's name. - - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - region: The slug identifier for the region where the resource is available. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return self._get( - "/v2/volumes" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/volumes", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "name": name, - "page": page, - "per_page": per_page, - "region": region, - }, - volume_list_params.VolumeListParams, - ), - ), - cast_to=VolumeListResponse, - ) - - def delete( - self, - volume_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - To delete a block storage volume, destroying all data and removing it from your - account, send a DELETE request to `/v2/volumes/$VOLUME_ID`. No response body - will be sent back, but the response code will indicate success. Specifically, - the response code will be a 204, which means that the action was successful with - no returned body data. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not volume_id: - raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}") - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return self._delete( - f"/v2/volumes/{volume_id}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/volumes/{volume_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - def delete_by_name( - self, - *, - name: str | NotGiven = NOT_GIVEN, - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ] - | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - Block storage volumes may also be deleted by name by sending a DELETE request - with the volume's **name** and the **region slug** for the region it is located - in as query parameters to `/v2/volumes?name=$VOLUME_NAME®ion=nyc1`. No - response body will be sent back, but the response code will indicate success. - Specifically, the response code will be a 204, which means that the action was - successful with no returned body data. - - Args: - name: The block storage volume's name. - - region: The slug identifier for the region where the resource is available. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return self._delete( - "/v2/volumes" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/volumes", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "name": name, - "region": region, - }, - volume_delete_by_name_params.VolumeDeleteByNameParams, - ), - ), - cast_to=NoneType, - ) - - -class AsyncVolumesResource(AsyncAPIResource): - @cached_property - def actions(self) -> AsyncActionsResource: - return AsyncActionsResource(self._client) - - @cached_property - def snapshots(self) -> AsyncSnapshotsResource: - return AsyncSnapshotsResource(self._client) - - @cached_property - def with_raw_response(self) -> AsyncVolumesResourceWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers - """ - return AsyncVolumesResourceWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncVolumesResourceWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response - """ - return AsyncVolumesResourceWithStreamingResponse(self) - - @overload - async def create( - self, - *, - name: str, - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ], - size_gigabytes: int, - description: str | NotGiven = NOT_GIVEN, - filesystem_label: str | NotGiven = NOT_GIVEN, - filesystem_type: str | NotGiven = NOT_GIVEN, - snapshot_id: str | NotGiven = NOT_GIVEN, - tags: Optional[List[str]] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> VolumeCreateResponse: - """To create a new volume, send a POST request to `/v2/volumes`. - - Optionally, a - `filesystem_type` attribute may be provided in order to automatically format the - volume's filesystem. Pre-formatted volumes are automatically mounted when - attached to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS Droplets created - on or after April 26, 2018. Attaching pre-formatted volumes to Droplets without - support for auto-mounting is not recommended. - - Args: - name: A human-readable name for the block storage volume. Must be lowercase and be - composed only of numbers, letters and "-", up to a limit of 64 characters. The - name must begin with a letter. - - region: The slug identifier for the region where the resource will initially be - available. - - size_gigabytes: The size of the block storage volume in GiB (1024^3). This field does not apply - when creating a volume from a snapshot. - - description: An optional free-form text field to describe a block storage volume. - - filesystem_label: The label applied to the filesystem. Labels for ext4 type filesystems may - contain 16 characters while labels for xfs type filesystems are limited to 12 - characters. May only be used in conjunction with filesystem_type. - - filesystem_type: The name of the filesystem type to be used on the volume. When provided, the - volume will automatically be formatted to the specified filesystem type. - Currently, the available options are `ext4` and `xfs`. Pre-formatted volumes are - automatically mounted when attached to Ubuntu, Debian, Fedora, Fedora Atomic, - and CentOS Droplets created on or after April 26, 2018. Attaching pre-formatted - volumes to other Droplets is not recommended. - - snapshot_id: The unique identifier for the volume snapshot from which to create the volume. - - tags: A flat array of tag names as strings to be applied to the resource. Tag names - may be for either existing or new tags. - - Requires `tag:create` scope. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @overload - async def create( - self, - *, - name: str, - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ], - size_gigabytes: int, - description: str | NotGiven = NOT_GIVEN, - filesystem_label: str | NotGiven = NOT_GIVEN, - filesystem_type: str | NotGiven = NOT_GIVEN, - snapshot_id: str | NotGiven = NOT_GIVEN, - tags: Optional[List[str]] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> VolumeCreateResponse: - """To create a new volume, send a POST request to `/v2/volumes`. - - Optionally, a - `filesystem_type` attribute may be provided in order to automatically format the - volume's filesystem. Pre-formatted volumes are automatically mounted when - attached to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS Droplets created - on or after April 26, 2018. Attaching pre-formatted volumes to Droplets without - support for auto-mounting is not recommended. - - Args: - name: A human-readable name for the block storage volume. Must be lowercase and be - composed only of numbers, letters and "-", up to a limit of 64 characters. The - name must begin with a letter. - - region: The slug identifier for the region where the resource will initially be - available. - - size_gigabytes: The size of the block storage volume in GiB (1024^3). This field does not apply - when creating a volume from a snapshot. - - description: An optional free-form text field to describe a block storage volume. - - filesystem_label: The label applied to the filesystem. Labels for ext4 type filesystems may - contain 16 characters while labels for xfs type filesystems are limited to 12 - characters. May only be used in conjunction with filesystem_type. - - filesystem_type: The name of the filesystem type to be used on the volume. When provided, the - volume will automatically be formatted to the specified filesystem type. - Currently, the available options are `ext4` and `xfs`. Pre-formatted volumes are - automatically mounted when attached to Ubuntu, Debian, Fedora, Fedora Atomic, - and CentOS Droplets created on or after April 26, 2018. Attaching pre-formatted - volumes to other Droplets is not recommended. - - snapshot_id: The unique identifier for the volume snapshot from which to create the volume. - - tags: A flat array of tag names as strings to be applied to the resource. Tag names - may be for either existing or new tags. - - Requires `tag:create` scope. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - ... - - @required_args(["name", "region", "size_gigabytes"]) - async def create( - self, - *, - name: str, - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ], - size_gigabytes: int, - description: str | NotGiven = NOT_GIVEN, - filesystem_label: str | NotGiven = NOT_GIVEN, - filesystem_type: str | NotGiven = NOT_GIVEN, - snapshot_id: str | NotGiven = NOT_GIVEN, - tags: Optional[List[str]] | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> VolumeCreateResponse: - return await self._post( - "/v2/volumes" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/volumes", - body=await async_maybe_transform( - { - "name": name, - "region": region, - "size_gigabytes": size_gigabytes, - "description": description, - "filesystem_label": filesystem_label, - "filesystem_type": filesystem_type, - "snapshot_id": snapshot_id, - "tags": tags, - }, - volume_create_params.VolumeCreateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=VolumeCreateResponse, - ) - - async def retrieve( - self, - volume_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> VolumeRetrieveResponse: - """ - To show information about a block storage volume, send a GET request to - `/v2/volumes/$VOLUME_ID`. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not volume_id: - raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}") - return await self._get( - f"/v2/volumes/{volume_id}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/volumes/{volume_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=VolumeRetrieveResponse, - ) - - async def list( - self, - *, - name: str | NotGiven = NOT_GIVEN, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ] - | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> VolumeListResponse: - """ - To list all of the block storage volumes available on your account, send a GET - request to `/v2/volumes`. - - ## Filtering Results - - ### By Region - - The `region` may be provided as query parameter in order to restrict results to - volumes available in a specific region. For example: `/v2/volumes?region=nyc1` - - ### By Name - - It is also possible to list volumes on your account that match a specified name. - To do so, send a GET request with the volume's name as a query parameter to - `/v2/volumes?name=$VOLUME_NAME`. **Note:** You can only create one volume per - region with the same name. - - ### By Name and Region - - It is also possible to retrieve information about a block storage volume by - name. To do so, send a GET request with the volume's name and the region slug - for the region it is located in as query parameters to - `/v2/volumes?name=$VOLUME_NAME®ion=nyc1`. - - Args: - name: The block storage volume's name. - - page: Which 'page' of paginated results to return. - - per_page: Number of items returned per page - - region: The slug identifier for the region where the resource is available. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - return await self._get( - "/v2/volumes" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/volumes", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "name": name, - "page": page, - "per_page": per_page, - "region": region, - }, - volume_list_params.VolumeListParams, - ), - ), - cast_to=VolumeListResponse, - ) - - async def delete( - self, - volume_id: str, - *, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - To delete a block storage volume, destroying all data and removing it from your - account, send a DELETE request to `/v2/volumes/$VOLUME_ID`. No response body - will be sent back, but the response code will indicate success. Specifically, - the response code will be a 204, which means that the action was successful with - no returned body data. - - Args: - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - if not volume_id: - raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}") - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return await self._delete( - f"/v2/volumes/{volume_id}" - if self._client._base_url_overridden - else f"https://api.digitalocean.com/v2/volumes/{volume_id}", - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=NoneType, - ) - - async def delete_by_name( - self, - *, - name: str | NotGiven = NOT_GIVEN, - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ] - | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> None: - """ - Block storage volumes may also be deleted by name by sending a DELETE request - with the volume's **name** and the **region slug** for the region it is located - in as query parameters to `/v2/volumes?name=$VOLUME_NAME®ion=nyc1`. No - response body will be sent back, but the response code will indicate success. - Specifically, the response code will be a 204, which means that the action was - successful with no returned body data. - - Args: - name: The block storage volume's name. - - region: The slug identifier for the region where the resource is available. - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - extra_headers = {"Accept": "*/*", **(extra_headers or {})} - return await self._delete( - "/v2/volumes" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/volumes", - options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "name": name, - "region": region, - }, - volume_delete_by_name_params.VolumeDeleteByNameParams, - ), - ), - cast_to=NoneType, - ) - - -class VolumesResourceWithRawResponse: - def __init__(self, volumes: VolumesResource) -> None: - self._volumes = volumes - - self.create = to_raw_response_wrapper( - volumes.create, - ) - self.retrieve = to_raw_response_wrapper( - volumes.retrieve, - ) - self.list = to_raw_response_wrapper( - volumes.list, - ) - self.delete = to_raw_response_wrapper( - volumes.delete, - ) - self.delete_by_name = to_raw_response_wrapper( - volumes.delete_by_name, - ) - - @cached_property - def actions(self) -> ActionsResourceWithRawResponse: - return ActionsResourceWithRawResponse(self._volumes.actions) - - @cached_property - def snapshots(self) -> SnapshotsResourceWithRawResponse: - return SnapshotsResourceWithRawResponse(self._volumes.snapshots) - - -class AsyncVolumesResourceWithRawResponse: - def __init__(self, volumes: AsyncVolumesResource) -> None: - self._volumes = volumes - - self.create = async_to_raw_response_wrapper( - volumes.create, - ) - self.retrieve = async_to_raw_response_wrapper( - volumes.retrieve, - ) - self.list = async_to_raw_response_wrapper( - volumes.list, - ) - self.delete = async_to_raw_response_wrapper( - volumes.delete, - ) - self.delete_by_name = async_to_raw_response_wrapper( - volumes.delete_by_name, - ) - - @cached_property - def actions(self) -> AsyncActionsResourceWithRawResponse: - return AsyncActionsResourceWithRawResponse(self._volumes.actions) - - @cached_property - def snapshots(self) -> AsyncSnapshotsResourceWithRawResponse: - return AsyncSnapshotsResourceWithRawResponse(self._volumes.snapshots) - - -class VolumesResourceWithStreamingResponse: - def __init__(self, volumes: VolumesResource) -> None: - self._volumes = volumes - - self.create = to_streamed_response_wrapper( - volumes.create, - ) - self.retrieve = to_streamed_response_wrapper( - volumes.retrieve, - ) - self.list = to_streamed_response_wrapper( - volumes.list, - ) - self.delete = to_streamed_response_wrapper( - volumes.delete, - ) - self.delete_by_name = to_streamed_response_wrapper( - volumes.delete_by_name, - ) - - @cached_property - def actions(self) -> ActionsResourceWithStreamingResponse: - return ActionsResourceWithStreamingResponse(self._volumes.actions) - - @cached_property - def snapshots(self) -> SnapshotsResourceWithStreamingResponse: - return SnapshotsResourceWithStreamingResponse(self._volumes.snapshots) - - -class AsyncVolumesResourceWithStreamingResponse: - def __init__(self, volumes: AsyncVolumesResource) -> None: - self._volumes = volumes - - self.create = async_to_streamed_response_wrapper( - volumes.create, - ) - self.retrieve = async_to_streamed_response_wrapper( - volumes.retrieve, - ) - self.list = async_to_streamed_response_wrapper( - volumes.list, - ) - self.delete = async_to_streamed_response_wrapper( - volumes.delete, - ) - self.delete_by_name = async_to_streamed_response_wrapper( - volumes.delete_by_name, - ) - - @cached_property - def actions(self) -> AsyncActionsResourceWithStreamingResponse: - return AsyncActionsResourceWithStreamingResponse(self._volumes.actions) - - @cached_property - def snapshots(self) -> AsyncSnapshotsResourceWithStreamingResponse: - return AsyncSnapshotsResourceWithStreamingResponse(self._volumes.snapshots) diff --git a/src/gradientai/types/__init__.py b/src/gradientai/types/__init__.py index e0c8310d..c8144381 100644 --- a/src/gradientai/types/__init__.py +++ b/src/gradientai/types/__init__.py @@ -3,137 +3,42 @@ from __future__ import annotations from .shared import ( - Size as Size, - Image as Image, - Action as Action, - Kernel as Kernel, - Region as Region, APIMeta as APIMeta, - Droplet as Droplet, - GPUInfo as GPUInfo, APILinks as APILinks, - DiskInfo as DiskInfo, - NetworkV4 as NetworkV4, - NetworkV6 as NetworkV6, - PageLinks as PageLinks, - Snapshots as Snapshots, - ActionLink as ActionLink, - VpcPeering as VpcPeering, - ForwardLinks as ForwardLinks, - Subscription as Subscription, - BackwardLinks as BackwardLinks, - RepositoryTag as RepositoryTag, - MetaProperties as MetaProperties, - RepositoryBlob as RepositoryBlob, - CompletionUsage as CompletionUsage, - GarbageCollection as GarbageCollection, - FirewallRuleTarget as FirewallRuleTarget, - RepositoryManifest as RepositoryManifest, ChatCompletionChunk as ChatCompletionChunk, - SubscriptionTierBase as SubscriptionTierBase, - DropletNextBackupWindow as DropletNextBackupWindow, ChatCompletionTokenLogprob as ChatCompletionTokenLogprob, ) -from .domains import Domains as Domains -from .firewall import Firewall as Firewall from .api_agent import APIAgent as APIAgent from .api_model import APIModel as APIModel -from .floating_ip import FloatingIP as FloatingIP -from .lb_firewall import LbFirewall as LbFirewall -from .glb_settings import GlbSettings as GlbSettings -from .health_check import HealthCheck as HealthCheck from .api_agreement import APIAgreement as APIAgreement from .api_workspace import APIWorkspace as APIWorkspace -from .domains_param import DomainsParam as DomainsParam -from .load_balancer import LoadBalancer as LoadBalancer -from .firewall_param import FirewallParam as FirewallParam from .api_agent_model import APIAgentModel as APIAgentModel -from .forwarding_rule import ForwardingRule as ForwardingRule -from .sticky_sessions import StickySessions as StickySessions -from .size_list_params import SizeListParams as SizeListParams from .agent_list_params import AgentListParams as AgentListParams from .api_model_version import APIModelVersion as APIModelVersion -from .image_list_params import ImageListParams as ImageListParams -from .lb_firewall_param import LbFirewallParam as LbFirewallParam +from .model_list_params import ModelListParams as ModelListParams from .api_knowledge_base import APIKnowledgeBase as APIKnowledgeBase -from .glb_settings_param import GlbSettingsParam as GlbSettingsParam -from .health_check_param import HealthCheckParam as HealthCheckParam from .region_list_params import RegionListParams as RegionListParams -from .size_list_response import SizeListResponse as SizeListResponse -from .volume_list_params import VolumeListParams as VolumeListParams from .agent_create_params import AgentCreateParams as AgentCreateParams from .agent_list_response import AgentListResponse as AgentListResponse from .agent_update_params import AgentUpdateParams as AgentUpdateParams -from .droplet_list_params import DropletListParams as DropletListParams -from .image_create_params import ImageCreateParams as ImageCreateParams -from .image_list_response import ImageListResponse as ImageListResponse -from .image_update_params import ImageUpdateParams as ImageUpdateParams from .model_list_response import ModelListResponse as ModelListResponse from .api_retrieval_method import APIRetrievalMethod as APIRetrievalMethod -from .firewall_list_params import FirewallListParams as FirewallListParams from .region_list_response import RegionListResponse as RegionListResponse -from .snapshot_list_params import SnapshotListParams as SnapshotListParams -from .volume_create_params import VolumeCreateParams as VolumeCreateParams -from .volume_list_response import VolumeListResponse as VolumeListResponse from .agent_create_response import AgentCreateResponse as AgentCreateResponse from .agent_delete_response import AgentDeleteResponse as AgentDeleteResponse from .agent_update_response import AgentUpdateResponse as AgentUpdateResponse -from .droplet_backup_policy import DropletBackupPolicy as DropletBackupPolicy -from .droplet_create_params import DropletCreateParams as DropletCreateParams -from .droplet_list_response import DropletListResponse as DropletListResponse -from .forwarding_rule_param import ForwardingRuleParam as ForwardingRuleParam -from .image_create_response import ImageCreateResponse as ImageCreateResponse -from .image_update_response import ImageUpdateResponse as ImageUpdateResponse -from .sticky_sessions_param import StickySessionsParam as StickySessionsParam from .api_agent_api_key_info import APIAgentAPIKeyInfo as APIAgentAPIKeyInfo -from .firewall_create_params import FirewallCreateParams as FirewallCreateParams -from .firewall_list_response import FirewallListResponse as FirewallListResponse -from .firewall_update_params import FirewallUpdateParams as FirewallUpdateParams -from .snapshot_list_response import SnapshotListResponse as SnapshotListResponse -from .volume_create_response import VolumeCreateResponse as VolumeCreateResponse from .agent_retrieve_response import AgentRetrieveResponse as AgentRetrieveResponse from .api_openai_api_key_info import APIOpenAIAPIKeyInfo as APIOpenAIAPIKeyInfo -from .droplet_create_response import DropletCreateResponse as DropletCreateResponse -from .floating_ip_list_params import FloatingIPListParams as FloatingIPListParams -from .image_retrieve_response import ImageRetrieveResponse as ImageRetrieveResponse -from .model_retrieve_response import ModelRetrieveResponse as ModelRetrieveResponse -from .firewall_create_response import FirewallCreateResponse as FirewallCreateResponse -from .firewall_update_response import FirewallUpdateResponse as FirewallUpdateResponse -from .volume_retrieve_response import VolumeRetrieveResponse as VolumeRetrieveResponse -from .account_retrieve_response import AccountRetrieveResponse as AccountRetrieveResponse from .api_deployment_visibility import APIDeploymentVisibility as APIDeploymentVisibility -from .droplet_retrieve_response import DropletRetrieveResponse as DropletRetrieveResponse -from .floating_ip_create_params import FloatingIPCreateParams as FloatingIPCreateParams -from .floating_ip_list_response import FloatingIPListResponse as FloatingIPListResponse -from .load_balancer_list_params import LoadBalancerListParams as LoadBalancerListParams from .agent_update_status_params import AgentUpdateStatusParams as AgentUpdateStatusParams from .api_anthropic_api_key_info import APIAnthropicAPIKeyInfo as APIAnthropicAPIKeyInfo -from .firewall_retrieve_response import FirewallRetrieveResponse as FirewallRetrieveResponse from .knowledge_base_list_params import KnowledgeBaseListParams as KnowledgeBaseListParams -from .snapshot_retrieve_response import SnapshotRetrieveResponse as SnapshotRetrieveResponse -from .droplet_backup_policy_param import DropletBackupPolicyParam as DropletBackupPolicyParam -from .droplet_list_kernels_params import DropletListKernelsParams as DropletListKernelsParams -from .floating_ip_create_response import FloatingIPCreateResponse as FloatingIPCreateResponse -from .load_balancer_create_params import LoadBalancerCreateParams as LoadBalancerCreateParams -from .load_balancer_list_response import LoadBalancerListResponse as LoadBalancerListResponse -from .load_balancer_update_params import LoadBalancerUpdateParams as LoadBalancerUpdateParams from .agent_update_status_response import AgentUpdateStatusResponse as AgentUpdateStatusResponse -from .droplet_delete_by_tag_params import DropletDeleteByTagParams as DropletDeleteByTagParams from .knowledge_base_create_params import KnowledgeBaseCreateParams as KnowledgeBaseCreateParams from .knowledge_base_list_response import KnowledgeBaseListResponse as KnowledgeBaseListResponse from .knowledge_base_update_params import KnowledgeBaseUpdateParams as KnowledgeBaseUpdateParams -from .volume_delete_by_name_params import VolumeDeleteByNameParams as VolumeDeleteByNameParams -from .droplet_list_firewalls_params import DropletListFirewallsParams as DropletListFirewallsParams -from .droplet_list_kernels_response import DropletListKernelsResponse as DropletListKernelsResponse -from .droplet_list_snapshots_params import DropletListSnapshotsParams as DropletListSnapshotsParams -from .floating_ip_retrieve_response import FloatingIPRetrieveResponse as FloatingIPRetrieveResponse -from .load_balancer_create_response import LoadBalancerCreateResponse as LoadBalancerCreateResponse -from .load_balancer_update_response import LoadBalancerUpdateResponse as LoadBalancerUpdateResponse from .knowledge_base_create_response import KnowledgeBaseCreateResponse as KnowledgeBaseCreateResponse from .knowledge_base_delete_response import KnowledgeBaseDeleteResponse as KnowledgeBaseDeleteResponse from .knowledge_base_update_response import KnowledgeBaseUpdateResponse as KnowledgeBaseUpdateResponse -from .droplet_list_firewalls_response import DropletListFirewallsResponse as DropletListFirewallsResponse -from .droplet_list_neighbors_response import DropletListNeighborsResponse as DropletListNeighborsResponse -from .droplet_list_snapshots_response import DropletListSnapshotsResponse as DropletListSnapshotsResponse -from .load_balancer_retrieve_response import LoadBalancerRetrieveResponse as LoadBalancerRetrieveResponse from .knowledge_base_retrieve_response import KnowledgeBaseRetrieveResponse as KnowledgeBaseRetrieveResponse diff --git a/src/gradientai/types/account/__init__.py b/src/gradientai/types/account/__init__.py deleted file mode 100644 index 4cd64974..00000000 --- a/src/gradientai/types/account/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from .key_list_params import KeyListParams as KeyListParams -from .key_create_params import KeyCreateParams as KeyCreateParams -from .key_list_response import KeyListResponse as KeyListResponse -from .key_update_params import KeyUpdateParams as KeyUpdateParams -from .key_create_response import KeyCreateResponse as KeyCreateResponse -from .key_update_response import KeyUpdateResponse as KeyUpdateResponse -from .key_retrieve_response import KeyRetrieveResponse as KeyRetrieveResponse diff --git a/src/gradientai/types/account/key_create_params.py b/src/gradientai/types/account/key_create_params.py deleted file mode 100644 index 4e7c1cef..00000000 --- a/src/gradientai/types/account/key_create_params.py +++ /dev/null @@ -1,22 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, TypedDict - -__all__ = ["KeyCreateParams"] - - -class KeyCreateParams(TypedDict, total=False): - name: Required[str] - """ - A human-readable display name for this key, used to easily identify the SSH keys - when they are displayed. - """ - - public_key: Required[str] - """The entire public key string that was uploaded. - - Embedded into the root user's `authorized_keys` file if you include this key - during Droplet creation. - """ diff --git a/src/gradientai/types/account/key_create_response.py b/src/gradientai/types/account/key_create_response.py deleted file mode 100644 index 883be88a..00000000 --- a/src/gradientai/types/account/key_create_response.py +++ /dev/null @@ -1,39 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel - -__all__ = ["KeyCreateResponse", "SSHKey"] - - -class SSHKey(BaseModel): - name: str - """ - A human-readable display name for this key, used to easily identify the SSH keys - when they are displayed. - """ - - public_key: str - """The entire public key string that was uploaded. - - Embedded into the root user's `authorized_keys` file if you include this key - during Droplet creation. - """ - - id: Optional[int] = None - """A unique identification number for this key. - - Can be used to embed a specific SSH key into a Droplet. - """ - - fingerprint: Optional[str] = None - """ - A unique identifier that differentiates this key from other keys using a format - that SSH recognizes. The fingerprint is created when the key is added to your - account. - """ - - -class KeyCreateResponse(BaseModel): - ssh_key: Optional[SSHKey] = None diff --git a/src/gradientai/types/account/key_list_params.py b/src/gradientai/types/account/key_list_params.py deleted file mode 100644 index 44a455f3..00000000 --- a/src/gradientai/types/account/key_list_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["KeyListParams"] - - -class KeyListParams(TypedDict, total=False): - page: int - """Which 'page' of paginated results to return.""" - - per_page: int - """Number of items returned per page""" diff --git a/src/gradientai/types/account/key_list_response.py b/src/gradientai/types/account/key_list_response.py deleted file mode 100644 index 64dc6de8..00000000 --- a/src/gradientai/types/account/key_list_response.py +++ /dev/null @@ -1,46 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from ..._models import BaseModel -from ..shared.page_links import PageLinks -from ..shared.meta_properties import MetaProperties - -__all__ = ["KeyListResponse", "SSHKey"] - - -class SSHKey(BaseModel): - name: str - """ - A human-readable display name for this key, used to easily identify the SSH keys - when they are displayed. - """ - - public_key: str - """The entire public key string that was uploaded. - - Embedded into the root user's `authorized_keys` file if you include this key - during Droplet creation. - """ - - id: Optional[int] = None - """A unique identification number for this key. - - Can be used to embed a specific SSH key into a Droplet. - """ - - fingerprint: Optional[str] = None - """ - A unique identifier that differentiates this key from other keys using a format - that SSH recognizes. The fingerprint is created when the key is added to your - account. - """ - - -class KeyListResponse(BaseModel): - meta: MetaProperties - """Information about the response itself.""" - - links: Optional[PageLinks] = None - - ssh_keys: Optional[List[SSHKey]] = None diff --git a/src/gradientai/types/account/key_retrieve_response.py b/src/gradientai/types/account/key_retrieve_response.py deleted file mode 100644 index 377f57e1..00000000 --- a/src/gradientai/types/account/key_retrieve_response.py +++ /dev/null @@ -1,39 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel - -__all__ = ["KeyRetrieveResponse", "SSHKey"] - - -class SSHKey(BaseModel): - name: str - """ - A human-readable display name for this key, used to easily identify the SSH keys - when they are displayed. - """ - - public_key: str - """The entire public key string that was uploaded. - - Embedded into the root user's `authorized_keys` file if you include this key - during Droplet creation. - """ - - id: Optional[int] = None - """A unique identification number for this key. - - Can be used to embed a specific SSH key into a Droplet. - """ - - fingerprint: Optional[str] = None - """ - A unique identifier that differentiates this key from other keys using a format - that SSH recognizes. The fingerprint is created when the key is added to your - account. - """ - - -class KeyRetrieveResponse(BaseModel): - ssh_key: Optional[SSHKey] = None diff --git a/src/gradientai/types/account/key_update_params.py b/src/gradientai/types/account/key_update_params.py deleted file mode 100644 index e73d8b7b..00000000 --- a/src/gradientai/types/account/key_update_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["KeyUpdateParams"] - - -class KeyUpdateParams(TypedDict, total=False): - name: str - """ - A human-readable display name for this key, used to easily identify the SSH keys - when they are displayed. - """ diff --git a/src/gradientai/types/account/key_update_response.py b/src/gradientai/types/account/key_update_response.py deleted file mode 100644 index eee61419..00000000 --- a/src/gradientai/types/account/key_update_response.py +++ /dev/null @@ -1,39 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel - -__all__ = ["KeyUpdateResponse", "SSHKey"] - - -class SSHKey(BaseModel): - name: str - """ - A human-readable display name for this key, used to easily identify the SSH keys - when they are displayed. - """ - - public_key: str - """The entire public key string that was uploaded. - - Embedded into the root user's `authorized_keys` file if you include this key - during Droplet creation. - """ - - id: Optional[int] = None - """A unique identification number for this key. - - Can be used to embed a specific SSH key into a Droplet. - """ - - fingerprint: Optional[str] = None - """ - A unique identifier that differentiates this key from other keys using a format - that SSH recognizes. The fingerprint is created when the key is added to your - account. - """ - - -class KeyUpdateResponse(BaseModel): - ssh_key: Optional[SSHKey] = None diff --git a/src/gradientai/types/account_retrieve_response.py b/src/gradientai/types/account_retrieve_response.py deleted file mode 100644 index 630f33e6..00000000 --- a/src/gradientai/types/account_retrieve_response.py +++ /dev/null @@ -1,55 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional -from typing_extensions import Literal - -from .._models import BaseModel - -__all__ = ["AccountRetrieveResponse", "Account", "AccountTeam"] - - -class AccountTeam(BaseModel): - name: Optional[str] = None - """The name for the current team.""" - - uuid: Optional[str] = None - """The unique universal identifier for the current team.""" - - -class Account(BaseModel): - droplet_limit: int - """The total number of Droplets current user or team may have active at one time. - - Requires `droplet:read` scope. - """ - - email: str - """The email address used by the current user to register for DigitalOcean.""" - - email_verified: bool - """If true, the user has verified their account via email. False otherwise.""" - - floating_ip_limit: int - """The total number of Floating IPs the current user or team may have. - - Requires `reserved_ip:read` scope. - """ - - status: Literal["active", "warning", "locked"] - """This value is one of "active", "warning" or "locked".""" - - status_message: str - """A human-readable message giving more details about the status of the account.""" - - uuid: str - """The unique universal identifier for the current user.""" - - name: Optional[str] = None - """The display name for the current user.""" - - team: Optional[AccountTeam] = None - """When authorized in a team context, includes information about the current team.""" - - -class AccountRetrieveResponse(BaseModel): - account: Optional[Account] = None diff --git a/src/gradientai/types/agent_create_params.py b/src/gradientai/types/agent_create_params.py index 68ebd227..58b99df7 100644 --- a/src/gradientai/types/agent_create_params.py +++ b/src/gradientai/types/agent_create_params.py @@ -12,10 +12,8 @@ class AgentCreateParams(TypedDict, total=False): anthropic_key_uuid: str - """Optional Anthropic API key ID to use with Anthropic models""" description: str - """A text description of the agent, not used in inference""" instruction: str """Agent instruction. @@ -26,22 +24,16 @@ class AgentCreateParams(TypedDict, total=False): """ knowledge_base_uuid: List[str] - """Ids of the knowledge base(s) to attach to the agent""" model_uuid: str """Identifier for the foundation model.""" name: str - """Agent name""" openai_key_uuid: Annotated[str, PropertyInfo(alias="open_ai_key_uuid")] - """Optional OpenAI API key ID to use with OpenAI models""" project_id: str - """The id of the DigitalOcean project this agent will belong to""" region: str - """The DigitalOcean region to deploy your agent in""" tags: List[str] - """Agent tag to organize related resources""" diff --git a/src/gradientai/types/agent_create_response.py b/src/gradientai/types/agent_create_response.py index edd48b7d..48545fe9 100644 --- a/src/gradientai/types/agent_create_response.py +++ b/src/gradientai/types/agent_create_response.py @@ -11,7 +11,6 @@ class AgentCreateResponse(BaseModel): agent: Optional["APIAgent"] = None - """An Agent""" from .api_agent import APIAgent diff --git a/src/gradientai/types/agent_delete_response.py b/src/gradientai/types/agent_delete_response.py index 8c2b2e14..eb1d440d 100644 --- a/src/gradientai/types/agent_delete_response.py +++ b/src/gradientai/types/agent_delete_response.py @@ -11,7 +11,6 @@ class AgentDeleteResponse(BaseModel): agent: Optional["APIAgent"] = None - """An Agent""" from .api_agent import APIAgent diff --git a/src/gradientai/types/agent_list_params.py b/src/gradientai/types/agent_list_params.py index b56d0395..e13a10c9 100644 --- a/src/gradientai/types/agent_list_params.py +++ b/src/gradientai/types/agent_list_params.py @@ -9,10 +9,10 @@ class AgentListParams(TypedDict, total=False): only_deployed: bool - """Only list agents that are deployed.""" + """only list agents that are deployed.""" page: int - """Page number.""" + """page number.""" per_page: int - """Items per page.""" + """items per page.""" diff --git a/src/gradientai/types/agent_list_response.py b/src/gradientai/types/agent_list_response.py index 7a64c66e..397d9fd2 100644 --- a/src/gradientai/types/agent_list_response.py +++ b/src/gradientai/types/agent_list_response.py @@ -29,7 +29,6 @@ class AgentChatbot(BaseModel): logo: Optional[str] = None name: Optional[str] = None - """Name of chatbot""" primary_color: Optional[str] = None @@ -40,15 +39,12 @@ class AgentChatbot(BaseModel): class AgentChatbotIdentifier(BaseModel): agent_chatbot_identifier: Optional[str] = None - """Agent chatbot identifier""" class AgentDeployment(BaseModel): created_at: Optional[datetime] = None - """Creation date / time""" name: Optional[str] = None - """Name""" status: Optional[ Literal[ @@ -65,112 +61,70 @@ class AgentDeployment(BaseModel): ] = None updated_at: Optional[datetime] = None - """Last modified""" url: Optional[str] = None - """Access your deployed agent here""" uuid: Optional[str] = None - """Unique id""" visibility: Optional[APIDeploymentVisibility] = None - """ - - VISIBILITY_UNKNOWN: The status of the deployment is unknown - - VISIBILITY_DISABLED: The deployment is disabled and will no longer service - requests - - VISIBILITY_PLAYGROUND: Deprecated: No longer a valid state - - VISIBILITY_PUBLIC: The deployment is public and will service requests from the - public internet - - VISIBILITY_PRIVATE: The deployment is private and will only service requests - from other agents, or through API keys - """ class AgentTemplateGuardrail(BaseModel): priority: Optional[int] = None - """Priority of the guardrail""" uuid: Optional[str] = None - """Uuid of the guardrail""" class AgentTemplate(BaseModel): created_at: Optional[datetime] = None - """The agent template's creation date""" description: Optional[str] = None - """Deprecated - Use summary instead""" guardrails: Optional[List[AgentTemplateGuardrail]] = None - """List of guardrails associated with the agent template""" instruction: Optional[str] = None - """Instructions for the agent template""" k: Optional[int] = None - """The 'k' value for the agent template""" knowledge_bases: Optional[List[APIKnowledgeBase]] = None - """List of knowledge bases associated with the agent template""" long_description: Optional[str] = None - """The long description of the agent template""" max_tokens: Optional[int] = None - """The max_tokens setting for the agent template""" model: Optional[APIAgentModel] = None - """Description of a Model""" name: Optional[str] = None - """Name of the agent template""" short_description: Optional[str] = None - """The short description of the agent template""" summary: Optional[str] = None - """The summary of the agent template""" tags: Optional[List[str]] = None - """List of tags associated with the agent template""" temperature: Optional[float] = None - """The temperature setting for the agent template""" template_type: Optional[Literal["AGENT_TEMPLATE_TYPE_STANDARD", "AGENT_TEMPLATE_TYPE_ONE_CLICK"]] = None - """ - - AGENT_TEMPLATE_TYPE_STANDARD: The standard agent template - - AGENT_TEMPLATE_TYPE_ONE_CLICK: The one click agent template - """ top_p: Optional[float] = None - """The top_p setting for the agent template""" updated_at: Optional[datetime] = None - """The agent template's last updated date""" uuid: Optional[str] = None - """Unique id""" class Agent(BaseModel): chatbot: Optional[AgentChatbot] = None - """A Chatbot""" chatbot_identifiers: Optional[List[AgentChatbotIdentifier]] = None - """Chatbot identifiers""" created_at: Optional[datetime] = None - """Creation date / time""" deployment: Optional[AgentDeployment] = None - """Description of deployment""" description: Optional[str] = None - """Description of agent""" if_case: Optional[str] = None - """Instructions to the agent on how to use the route""" instruction: Optional[str] = None """Agent instruction. @@ -181,7 +135,6 @@ class Agent(BaseModel): """ k: Optional[int] = None - """How many results should be considered from an attached knowledge base""" max_tokens: Optional[int] = None """ @@ -191,43 +144,26 @@ class Agent(BaseModel): """ model: Optional[APIAgentModel] = None - """Description of a Model""" name: Optional[str] = None - """Agent name""" project_id: Optional[str] = None - """The DigitalOcean project ID associated with the agent""" provide_citations: Optional[bool] = None - """Whether the agent should provide in-response citations""" region: Optional[str] = None - """Region code""" retrieval_method: Optional[APIRetrievalMethod] = None - """ - - RETRIEVAL_METHOD_UNKNOWN: The retrieval method is unknown - - RETRIEVAL_METHOD_REWRITE: The retrieval method is rewrite - - RETRIEVAL_METHOD_STEP_BACK: The retrieval method is step back - - RETRIEVAL_METHOD_SUB_QUERIES: The retrieval method is sub queries - - RETRIEVAL_METHOD_NONE: The retrieval method is none - """ route_created_at: Optional[datetime] = None - """Creation of route date / time""" route_created_by: Optional[str] = None - """Id of user that created the route""" route_name: Optional[str] = None - """Route name""" route_uuid: Optional[str] = None - """Route uuid""" tags: Optional[List[str]] = None - """A set of abitrary tags to organize your agent""" temperature: Optional[float] = None """Controls the model’s creativity, specified as a number between 0 and 1. @@ -237,7 +173,6 @@ class Agent(BaseModel): """ template: Optional[AgentTemplate] = None - """Represents an AgentTemplate entity""" top_p: Optional[float] = None """ @@ -247,27 +182,17 @@ class Agent(BaseModel): """ updated_at: Optional[datetime] = None - """Last modified""" url: Optional[str] = None - """Access your agent under this url""" user_id: Optional[str] = None - """Id of user that created the agent""" uuid: Optional[str] = None - """Unique agent id""" - - version_hash: Optional[str] = None - """The latest version of the agent""" class AgentListResponse(BaseModel): agents: Optional[List[Agent]] = None - """Agents""" links: Optional[APILinks] = None - """Links to other pages""" meta: Optional[APIMeta] = None - """Meta information about the data set""" diff --git a/src/gradientai/types/agent_retrieve_response.py b/src/gradientai/types/agent_retrieve_response.py index 2836558b..2eed88af 100644 --- a/src/gradientai/types/agent_retrieve_response.py +++ b/src/gradientai/types/agent_retrieve_response.py @@ -11,7 +11,6 @@ class AgentRetrieveResponse(BaseModel): agent: Optional["APIAgent"] = None - """An Agent""" from .api_agent import APIAgent diff --git a/src/gradientai/types/agent_update_params.py b/src/gradientai/types/agent_update_params.py index 5d2b5597..85f9a9c2 100644 --- a/src/gradientai/types/agent_update_params.py +++ b/src/gradientai/types/agent_update_params.py @@ -13,13 +13,8 @@ class AgentUpdateParams(TypedDict, total=False): anthropic_key_uuid: str - """Optional anthropic key uuid for use with anthropic models""" - - conversation_logs_enabled: bool - """Optional update of conversation logs enabled""" description: str - """Agent description""" instruction: str """Agent instruction. @@ -30,7 +25,6 @@ class AgentUpdateParams(TypedDict, total=False): """ k: int - """How many results should be considered from an attached knowledge base""" max_tokens: int """ @@ -43,27 +37,16 @@ class AgentUpdateParams(TypedDict, total=False): """Identifier for the foundation model.""" name: str - """Agent name""" openai_key_uuid: Annotated[str, PropertyInfo(alias="open_ai_key_uuid")] - """Optional OpenAI key uuid for use with OpenAI models""" project_id: str - """The id of the DigitalOcean project this agent will belong to""" provide_citations: bool retrieval_method: APIRetrievalMethod - """ - - RETRIEVAL_METHOD_UNKNOWN: The retrieval method is unknown - - RETRIEVAL_METHOD_REWRITE: The retrieval method is rewrite - - RETRIEVAL_METHOD_STEP_BACK: The retrieval method is step back - - RETRIEVAL_METHOD_SUB_QUERIES: The retrieval method is sub queries - - RETRIEVAL_METHOD_NONE: The retrieval method is none - """ tags: List[str] - """A set of abitrary tags to organize your agent""" temperature: float """Controls the model’s creativity, specified as a number between 0 and 1. @@ -80,4 +63,3 @@ class AgentUpdateParams(TypedDict, total=False): """ body_uuid: Annotated[str, PropertyInfo(alias="uuid")] - """Unique agent id""" diff --git a/src/gradientai/types/agent_update_response.py b/src/gradientai/types/agent_update_response.py index 1976089b..2948aa1c 100644 --- a/src/gradientai/types/agent_update_response.py +++ b/src/gradientai/types/agent_update_response.py @@ -11,7 +11,6 @@ class AgentUpdateResponse(BaseModel): agent: Optional["APIAgent"] = None - """An Agent""" from .api_agent import APIAgent diff --git a/src/gradientai/types/agent_update_status_params.py b/src/gradientai/types/agent_update_status_params.py index 3f16fdc2..a0cdc0b9 100644 --- a/src/gradientai/types/agent_update_status_params.py +++ b/src/gradientai/types/agent_update_status_params.py @@ -12,16 +12,5 @@ class AgentUpdateStatusParams(TypedDict, total=False): body_uuid: Annotated[str, PropertyInfo(alias="uuid")] - """Unique id""" visibility: APIDeploymentVisibility - """ - - VISIBILITY_UNKNOWN: The status of the deployment is unknown - - VISIBILITY_DISABLED: The deployment is disabled and will no longer service - requests - - VISIBILITY_PLAYGROUND: Deprecated: No longer a valid state - - VISIBILITY_PUBLIC: The deployment is public and will service requests from the - public internet - - VISIBILITY_PRIVATE: The deployment is private and will only service requests - from other agents, or through API keys - """ diff --git a/src/gradientai/types/agent_update_status_response.py b/src/gradientai/types/agent_update_status_response.py index 84457d85..b200f99d 100644 --- a/src/gradientai/types/agent_update_status_response.py +++ b/src/gradientai/types/agent_update_status_response.py @@ -11,7 +11,6 @@ class AgentUpdateStatusResponse(BaseModel): agent: Optional["APIAgent"] = None - """An Agent""" from .api_agent import APIAgent diff --git a/src/gradientai/types/agents/__init__.py b/src/gradientai/types/agents/__init__.py index 39b82ebc..9c6508f6 100644 --- a/src/gradientai/types/agents/__init__.py +++ b/src/gradientai/types/agents/__init__.py @@ -40,7 +40,6 @@ from .evaluation_dataset_create_params import EvaluationDatasetCreateParams as EvaluationDatasetCreateParams from .evaluation_run_retrieve_response import EvaluationRunRetrieveResponse as EvaluationRunRetrieveResponse from .evaluation_dataset_create_response import EvaluationDatasetCreateResponse as EvaluationDatasetCreateResponse -from .evaluation_run_list_results_params import EvaluationRunListResultsParams as EvaluationRunListResultsParams from .evaluation_test_case_create_params import EvaluationTestCaseCreateParams as EvaluationTestCaseCreateParams from .evaluation_test_case_list_response import EvaluationTestCaseListResponse as EvaluationTestCaseListResponse from .evaluation_test_case_update_params import EvaluationTestCaseUpdateParams as EvaluationTestCaseUpdateParams @@ -48,15 +47,9 @@ from .evaluation_test_case_create_response import EvaluationTestCaseCreateResponse as EvaluationTestCaseCreateResponse from .evaluation_test_case_retrieve_params import EvaluationTestCaseRetrieveParams as EvaluationTestCaseRetrieveParams from .evaluation_test_case_update_response import EvaluationTestCaseUpdateResponse as EvaluationTestCaseUpdateResponse -from .evaluation_metric_list_regions_params import ( - EvaluationMetricListRegionsParams as EvaluationMetricListRegionsParams, -) from .evaluation_test_case_retrieve_response import ( EvaluationTestCaseRetrieveResponse as EvaluationTestCaseRetrieveResponse, ) -from .evaluation_metric_list_regions_response import ( - EvaluationMetricListRegionsResponse as EvaluationMetricListRegionsResponse, -) from .evaluation_run_retrieve_results_response import ( EvaluationRunRetrieveResultsResponse as EvaluationRunRetrieveResultsResponse, ) diff --git a/src/gradientai/types/agents/api_evaluation_metric.py b/src/gradientai/types/agents/api_evaluation_metric.py index 2d3b4194..1aa85306 100644 --- a/src/gradientai/types/agents/api_evaluation_metric.py +++ b/src/gradientai/types/agents/api_evaluation_metric.py @@ -11,9 +11,6 @@ class APIEvaluationMetric(BaseModel): description: Optional[str] = None - inverted: Optional[bool] = None - """If true, the metric is inverted, meaning that a lower value is better.""" - metric_name: Optional[str] = None metric_type: Optional[ @@ -23,16 +20,5 @@ class APIEvaluationMetric(BaseModel): metric_uuid: Optional[str] = None metric_value_type: Optional[ - Literal[ - "METRIC_VALUE_TYPE_UNSPECIFIED", - "METRIC_VALUE_TYPE_NUMBER", - "METRIC_VALUE_TYPE_STRING", - "METRIC_VALUE_TYPE_PERCENTAGE", - ] + Literal["METRIC_VALUE_TYPE_UNSPECIFIED", "METRIC_VALUE_TYPE_NUMBER", "METRIC_VALUE_TYPE_STRING"] ] = None - - range_max: Optional[float] = None - """The maximum value for the metric.""" - - range_min: Optional[float] = None - """The minimum value for the metric.""" diff --git a/src/gradientai/types/agents/api_evaluation_metric_result.py b/src/gradientai/types/agents/api_evaluation_metric_result.py index 3d6ea84f..35146c00 100644 --- a/src/gradientai/types/agents/api_evaluation_metric_result.py +++ b/src/gradientai/types/agents/api_evaluation_metric_result.py @@ -1,7 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from typing import Optional -from typing_extensions import Literal from ..._models import BaseModel @@ -9,26 +8,10 @@ class APIEvaluationMetricResult(BaseModel): - error_description: Optional[str] = None - """Error description if the metric could not be calculated.""" - metric_name: Optional[str] = None - """Metric name""" - - metric_value_type: Optional[ - Literal[ - "METRIC_VALUE_TYPE_UNSPECIFIED", - "METRIC_VALUE_TYPE_NUMBER", - "METRIC_VALUE_TYPE_STRING", - "METRIC_VALUE_TYPE_PERCENTAGE", - ] - ] = None number_value: Optional[float] = None """The value of the metric as a number.""" - reasoning: Optional[str] = None - """Reasoning of the metric result.""" - string_value: Optional[str] = None """The value of the metric as a string.""" diff --git a/src/gradientai/types/agents/api_evaluation_prompt.py b/src/gradientai/types/agents/api_evaluation_prompt.py index 7471e9ae..750e62fb 100644 --- a/src/gradientai/types/agents/api_evaluation_prompt.py +++ b/src/gradientai/types/agents/api_evaluation_prompt.py @@ -31,19 +31,12 @@ class APIEvaluationPrompt(BaseModel): input: Optional[str] = None - input_tokens: Optional[str] = None - """The number of input tokens used in the prompt.""" - output: Optional[str] = None - output_tokens: Optional[str] = None - """The number of output tokens used in the prompt.""" - prompt_chunks: Optional[List[PromptChunk]] = None """The list of prompt chunks.""" prompt_id: Optional[int] = None - """Prompt ID""" prompt_level_metric_results: Optional[List[APIEvaluationMetricResult]] = None """The metric results for the prompt.""" diff --git a/src/gradientai/types/agents/api_evaluation_run.py b/src/gradientai/types/agents/api_evaluation_run.py index 5a758898..b879f756 100644 --- a/src/gradientai/types/agents/api_evaluation_run.py +++ b/src/gradientai/types/agents/api_evaluation_run.py @@ -12,42 +12,31 @@ class APIEvaluationRun(BaseModel): agent_deleted: Optional[bool] = None - """Whether agent is deleted""" agent_name: Optional[str] = None - """Agent name""" agent_uuid: Optional[str] = None """Agent UUID.""" agent_version_hash: Optional[str] = None - """Version hash""" agent_workspace_uuid: Optional[str] = None - """Agent workspace uuid""" created_by_user_email: Optional[str] = None created_by_user_id: Optional[str] = None error_description: Optional[str] = None - """The error description""" evaluation_run_uuid: Optional[str] = None """Evaluation run UUID.""" - evaluation_test_case_workspace_uuid: Optional[str] = None - """Evaluation test case workspace uuid""" - finished_at: Optional[datetime] = None """Run end time.""" pass_status: Optional[bool] = None """The pass status of the evaluation run based on the star metric.""" - queued_at: Optional[datetime] = None - """Run queued time.""" - run_level_metric_results: Optional[List[APIEvaluationMetricResult]] = None run_name: Optional[str] = None @@ -71,13 +60,6 @@ class APIEvaluationRun(BaseModel): "EVALUATION_RUN_FAILED", ] ] = None - """Evaluation Run Statuses""" - - test_case_description: Optional[str] = None - """Test case description.""" - - test_case_name: Optional[str] = None - """Test case name.""" test_case_uuid: Optional[str] = None """Test-case UUID.""" diff --git a/src/gradientai/types/agents/api_evaluation_test_case.py b/src/gradientai/types/agents/api_evaluation_test_case.py index dc4c55f0..09ce5e48 100644 --- a/src/gradientai/types/agents/api_evaluation_test_case.py +++ b/src/gradientai/types/agents/api_evaluation_test_case.py @@ -7,27 +7,7 @@ from .api_star_metric import APIStarMetric from .api_evaluation_metric import APIEvaluationMetric -__all__ = ["APIEvaluationTestCase", "Dataset"] - - -class Dataset(BaseModel): - created_at: Optional[datetime] = None - """Time created at.""" - - dataset_name: Optional[str] = None - """Name of the dataset.""" - - dataset_uuid: Optional[str] = None - """UUID of the dataset.""" - - file_size: Optional[str] = None - """The size of the dataset uploaded file in bytes.""" - - has_ground_truth: Optional[bool] = None - """Does the dataset have a ground truth column?""" - - row_count: Optional[int] = None - """Number of rows in the dataset.""" +__all__ = ["APIEvaluationTestCase"] class APIEvaluationTestCase(BaseModel): @@ -39,8 +19,6 @@ class APIEvaluationTestCase(BaseModel): created_by_user_id: Optional[str] = None - dataset: Optional[Dataset] = None - dataset_name: Optional[str] = None dataset_uuid: Optional[str] = None diff --git a/src/gradientai/types/agents/api_key_create_params.py b/src/gradientai/types/agents/api_key_create_params.py index 184c330c..c3fc44cd 100644 --- a/src/gradientai/types/agents/api_key_create_params.py +++ b/src/gradientai/types/agents/api_key_create_params.py @@ -11,7 +11,5 @@ class APIKeyCreateParams(TypedDict, total=False): body_agent_uuid: Annotated[str, PropertyInfo(alias="agent_uuid")] - """Agent id""" name: str - """A human friendly name to identify the key""" diff --git a/src/gradientai/types/agents/api_key_create_response.py b/src/gradientai/types/agents/api_key_create_response.py index ed8906c8..09689fe7 100644 --- a/src/gradientai/types/agents/api_key_create_response.py +++ b/src/gradientai/types/agents/api_key_create_response.py @@ -10,4 +10,3 @@ class APIKeyCreateResponse(BaseModel): api_key_info: Optional[APIAgentAPIKeyInfo] = None - """Agent API Key Info""" diff --git a/src/gradientai/types/agents/api_key_delete_response.py b/src/gradientai/types/agents/api_key_delete_response.py index 1f38c52e..02b03f61 100644 --- a/src/gradientai/types/agents/api_key_delete_response.py +++ b/src/gradientai/types/agents/api_key_delete_response.py @@ -10,4 +10,3 @@ class APIKeyDeleteResponse(BaseModel): api_key_info: Optional[APIAgentAPIKeyInfo] = None - """Agent API Key Info""" diff --git a/src/gradientai/types/agents/api_key_list_params.py b/src/gradientai/types/agents/api_key_list_params.py index 1f8f96b7..11da9398 100644 --- a/src/gradientai/types/agents/api_key_list_params.py +++ b/src/gradientai/types/agents/api_key_list_params.py @@ -9,7 +9,7 @@ class APIKeyListParams(TypedDict, total=False): page: int - """Page number.""" + """page number.""" per_page: int - """Items per page.""" + """items per page.""" diff --git a/src/gradientai/types/agents/api_key_list_response.py b/src/gradientai/types/agents/api_key_list_response.py index 0040e91c..aedb88ca 100644 --- a/src/gradientai/types/agents/api_key_list_response.py +++ b/src/gradientai/types/agents/api_key_list_response.py @@ -12,10 +12,7 @@ class APIKeyListResponse(BaseModel): api_key_infos: Optional[List[APIAgentAPIKeyInfo]] = None - """Api key infos""" links: Optional[APILinks] = None - """Links to other pages""" meta: Optional[APIMeta] = None - """Meta information about the data set""" diff --git a/src/gradientai/types/agents/api_key_regenerate_response.py b/src/gradientai/types/agents/api_key_regenerate_response.py index 400140fb..ea2f761e 100644 --- a/src/gradientai/types/agents/api_key_regenerate_response.py +++ b/src/gradientai/types/agents/api_key_regenerate_response.py @@ -10,4 +10,3 @@ class APIKeyRegenerateResponse(BaseModel): api_key_info: Optional[APIAgentAPIKeyInfo] = None - """Agent API Key Info""" diff --git a/src/gradientai/types/agents/api_key_update_params.py b/src/gradientai/types/agents/api_key_update_params.py index ba997a2f..b49ebb38 100644 --- a/src/gradientai/types/agents/api_key_update_params.py +++ b/src/gradientai/types/agents/api_key_update_params.py @@ -13,10 +13,7 @@ class APIKeyUpdateParams(TypedDict, total=False): path_agent_uuid: Required[Annotated[str, PropertyInfo(alias="agent_uuid")]] body_agent_uuid: Annotated[str, PropertyInfo(alias="agent_uuid")] - """Agent id""" body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")] - """API key ID""" name: str - """Name""" diff --git a/src/gradientai/types/agents/api_key_update_response.py b/src/gradientai/types/agents/api_key_update_response.py index 56154b16..87442329 100644 --- a/src/gradientai/types/agents/api_key_update_response.py +++ b/src/gradientai/types/agents/api_key_update_response.py @@ -10,4 +10,3 @@ class APIKeyUpdateResponse(BaseModel): api_key_info: Optional[APIAgentAPIKeyInfo] = None - """Agent API Key Info""" diff --git a/src/gradientai/types/agents/api_link_knowledge_base_output.py b/src/gradientai/types/agents/api_link_knowledge_base_output.py index 2e7cec1e..a38f021b 100644 --- a/src/gradientai/types/agents/api_link_knowledge_base_output.py +++ b/src/gradientai/types/agents/api_link_knowledge_base_output.py @@ -11,7 +11,6 @@ class APILinkKnowledgeBaseOutput(BaseModel): agent: Optional["APIAgent"] = None - """An Agent""" from ..api_agent import APIAgent diff --git a/src/gradientai/types/agents/api_star_metric.py b/src/gradientai/types/agents/api_star_metric.py index 0d04dea9..c9ecc60a 100644 --- a/src/gradientai/types/agents/api_star_metric.py +++ b/src/gradientai/types/agents/api_star_metric.py @@ -12,12 +12,6 @@ class APIStarMetric(BaseModel): name: Optional[str] = None - success_threshold: Optional[float] = None - """ - The success threshold for the star metric. This is a value that the metric must - reach to be considered successful. - """ - success_threshold_pct: Optional[int] = None """ The success threshold for the star metric. This is a percentage value between 0 diff --git a/src/gradientai/types/agents/api_star_metric_param.py b/src/gradientai/types/agents/api_star_metric_param.py index 781fb2b1..5f7b2fd9 100644 --- a/src/gradientai/types/agents/api_star_metric_param.py +++ b/src/gradientai/types/agents/api_star_metric_param.py @@ -12,12 +12,6 @@ class APIStarMetricParam(TypedDict, total=False): name: str - success_threshold: float - """ - The success threshold for the star metric. This is a value that the metric must - reach to be considered successful. - """ - success_threshold_pct: int """ The success threshold for the star metric. This is a percentage value between 0 diff --git a/src/gradientai/types/agents/chat/completion_create_params.py b/src/gradientai/types/agents/chat/completion_create_params.py index aaec2ba5..ec5c6b70 100644 --- a/src/gradientai/types/agents/chat/completion_create_params.py +++ b/src/gradientai/types/agents/chat/completion_create_params.py @@ -12,15 +12,7 @@ "MessageChatCompletionRequestDeveloperMessage", "MessageChatCompletionRequestUserMessage", "MessageChatCompletionRequestAssistantMessage", - "MessageChatCompletionRequestAssistantMessageToolCall", - "MessageChatCompletionRequestAssistantMessageToolCallFunction", - "MessageChatCompletionRequestToolMessage", "StreamOptions", - "ToolChoice", - "ToolChoiceChatCompletionNamedToolChoice", - "ToolChoiceChatCompletionNamedToolChoiceFunction", - "Tool", - "ToolFunction", "CompletionCreateParamsNonStreaming", "CompletionCreateParamsStreaming", ] @@ -113,25 +105,6 @@ class CompletionCreateParamsBase(TypedDict, total=False): this or `top_p` but not both. """ - tool_choice: ToolChoice - """ - Controls which (if any) tool is called by the model. `none` means the model will - not call any tool and instead generates a message. `auto` means the model can - pick between generating a message or calling one or more tools. `required` means - the model must call one or more tools. Specifying a particular tool via - `{"type": "function", "function": {"name": "my_function"}}` forces the model to - call that tool. - - `none` is the default when no tools are present. `auto` is the default if tools - are present. - """ - - tools: Iterable[Tool] - """A list of tools the model may call. - - Currently, only functions are supported as a tool. - """ - top_logprobs: Optional[int] """ An integer between 0 and 20 specifying the number of most likely tokens to @@ -179,30 +152,6 @@ class MessageChatCompletionRequestUserMessage(TypedDict, total=False): """The role of the messages author, in this case `user`.""" -class MessageChatCompletionRequestAssistantMessageToolCallFunction(TypedDict, total=False): - arguments: Required[str] - """ - The arguments to call the function with, as generated by the model in JSON - format. Note that the model does not always generate valid JSON, and may - hallucinate parameters not defined by your function schema. Validate the - arguments in your code before calling your function. - """ - - name: Required[str] - """The name of the function to call.""" - - -class MessageChatCompletionRequestAssistantMessageToolCall(TypedDict, total=False): - id: Required[str] - """The ID of the tool call.""" - - function: Required[MessageChatCompletionRequestAssistantMessageToolCallFunction] - """The function that the model called.""" - - type: Required[Literal["function"]] - """The type of the tool. Currently, only `function` is supported.""" - - class MessageChatCompletionRequestAssistantMessage(TypedDict, total=False): role: Required[Literal["assistant"]] """The role of the messages author, in this case `assistant`.""" @@ -210,27 +159,12 @@ class MessageChatCompletionRequestAssistantMessage(TypedDict, total=False): content: Union[str, List[str], None] """The contents of the assistant message.""" - tool_calls: Iterable[MessageChatCompletionRequestAssistantMessageToolCall] - """The tool calls generated by the model, such as function calls.""" - - -class MessageChatCompletionRequestToolMessage(TypedDict, total=False): - content: Required[str] - """The contents of the tool message.""" - - role: Required[Literal["tool"]] - """The role of the messages author, in this case `tool`.""" - - tool_call_id: Required[str] - """Tool call that this message is responding to.""" - Message: TypeAlias = Union[ MessageChatCompletionRequestSystemMessage, MessageChatCompletionRequestDeveloperMessage, MessageChatCompletionRequestUserMessage, MessageChatCompletionRequestAssistantMessage, - MessageChatCompletionRequestToolMessage, ] @@ -247,53 +181,6 @@ class StreamOptions(TypedDict, total=False): """ -class ToolChoiceChatCompletionNamedToolChoiceFunction(TypedDict, total=False): - name: Required[str] - """The name of the function to call.""" - - -class ToolChoiceChatCompletionNamedToolChoice(TypedDict, total=False): - function: Required[ToolChoiceChatCompletionNamedToolChoiceFunction] - - type: Required[Literal["function"]] - """The type of the tool. Currently, only `function` is supported.""" - - -ToolChoice: TypeAlias = Union[Literal["none", "auto", "required"], ToolChoiceChatCompletionNamedToolChoice] - - -class ToolFunction(TypedDict, total=False): - name: Required[str] - """The name of the function to be called. - - Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length - of 64. - """ - - description: str - """ - A description of what the function does, used by the model to choose when and - how to call the function. - """ - - parameters: Dict[str, object] - """The parameters the functions accepts, described as a JSON Schema object. - - See the [guide](/docs/guides/function-calling) for examples, and the - [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for - documentation about the format. - - Omitting `parameters` defines a function with an empty parameter list. - """ - - -class Tool(TypedDict, total=False): - function: Required[ToolFunction] - - type: Required[Literal["function"]] - """The type of the tool. Currently, only `function` is supported.""" - - class CompletionCreateParamsNonStreaming(CompletionCreateParamsBase, total=False): stream: Optional[Literal[False]] """ diff --git a/src/gradientai/types/agents/chat/completion_create_response.py b/src/gradientai/types/agents/chat/completion_create_response.py index 4c839ded..f2860c31 100644 --- a/src/gradientai/types/agents/chat/completion_create_response.py +++ b/src/gradientai/types/agents/chat/completion_create_response.py @@ -4,17 +4,9 @@ from typing_extensions import Literal from ...._models import BaseModel -from ...shared.completion_usage import CompletionUsage from ...shared.chat_completion_token_logprob import ChatCompletionTokenLogprob -__all__ = [ - "CompletionCreateResponse", - "Choice", - "ChoiceLogprobs", - "ChoiceMessage", - "ChoiceMessageToolCall", - "ChoiceMessageToolCallFunction", -] +__all__ = ["CompletionCreateResponse", "Choice", "ChoiceLogprobs", "ChoiceMessage", "Usage"] class ChoiceLogprobs(BaseModel): @@ -25,30 +17,6 @@ class ChoiceLogprobs(BaseModel): """A list of message refusal tokens with log probability information.""" -class ChoiceMessageToolCallFunction(BaseModel): - arguments: str - """ - The arguments to call the function with, as generated by the model in JSON - format. Note that the model does not always generate valid JSON, and may - hallucinate parameters not defined by your function schema. Validate the - arguments in your code before calling your function. - """ - - name: str - """The name of the function to call.""" - - -class ChoiceMessageToolCall(BaseModel): - id: str - """The ID of the tool call.""" - - function: ChoiceMessageToolCallFunction - """The function that the model called.""" - - type: Literal["function"] - """The type of the tool. Currently, only `function` is supported.""" - - class ChoiceMessage(BaseModel): content: Optional[str] = None """The contents of the message.""" @@ -59,17 +27,14 @@ class ChoiceMessage(BaseModel): role: Literal["assistant"] """The role of the author of this message.""" - tool_calls: Optional[List[ChoiceMessageToolCall]] = None - """The tool calls generated by the model, such as function calls.""" - class Choice(BaseModel): - finish_reason: Literal["stop", "length", "tool_calls"] + finish_reason: Literal["stop", "length"] """The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, or `length` if the maximum number of tokens specified in the request - was reached, `tool_calls` if the model called a tool. + was reached. """ index: int @@ -82,6 +47,17 @@ class Choice(BaseModel): """A chat completion message generated by the model.""" +class Usage(BaseModel): + completion_tokens: int + """Number of tokens in the generated completion.""" + + prompt_tokens: int + """Number of tokens in the prompt.""" + + total_tokens: int + """Total number of tokens used in the request (prompt + completion).""" + + class CompletionCreateResponse(BaseModel): id: str """A unique identifier for the chat completion.""" @@ -101,5 +77,5 @@ class CompletionCreateResponse(BaseModel): object: Literal["chat.completion"] """The object type, which is always `chat.completion`.""" - usage: Optional[CompletionUsage] = None + usage: Optional[Usage] = None """Usage statistics for the completion request.""" diff --git a/src/gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py b/src/gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py index 9a4000c0..6aa6d27a 100644 --- a/src/gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py +++ b/src/gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py @@ -15,7 +15,6 @@ class EvaluationDatasetCreateFileUploadPresignedURLsParams(TypedDict, total=Fals class File(TypedDict, total=False): file_name: str - """Local filename""" file_size: str """The size of the file in bytes.""" diff --git a/src/gradientai/types/agents/evaluation_metric_list_regions_params.py b/src/gradientai/types/agents/evaluation_metric_list_regions_params.py deleted file mode 100644 index 701e7d4e..00000000 --- a/src/gradientai/types/agents/evaluation_metric_list_regions_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["EvaluationMetricListRegionsParams"] - - -class EvaluationMetricListRegionsParams(TypedDict, total=False): - serves_batch: bool - """Include datacenters that are capable of running batch jobs.""" - - serves_inference: bool - """Include datacenters that serve inference.""" diff --git a/src/gradientai/types/agents/evaluation_metric_list_regions_response.py b/src/gradientai/types/agents/evaluation_metric_list_regions_response.py deleted file mode 100644 index 7246d484..00000000 --- a/src/gradientai/types/agents/evaluation_metric_list_regions_response.py +++ /dev/null @@ -1,29 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from ..._models import BaseModel - -__all__ = ["EvaluationMetricListRegionsResponse", "Region"] - - -class Region(BaseModel): - inference_url: Optional[str] = None - """Url for inference server""" - - region: Optional[str] = None - """Region code""" - - serves_batch: Optional[bool] = None - """This datacenter is capable of running batch jobs""" - - serves_inference: Optional[bool] = None - """This datacenter is capable of serving inference""" - - stream_inference_url: Optional[str] = None - """The url for the inference streaming server""" - - -class EvaluationMetricListRegionsResponse(BaseModel): - regions: Optional[List[Region]] = None - """Region code""" diff --git a/src/gradientai/types/agents/evaluation_metrics/__init__.py b/src/gradientai/types/agents/evaluation_metrics/__init__.py index c349624b..7af9b074 100644 --- a/src/gradientai/types/agents/evaluation_metrics/__init__.py +++ b/src/gradientai/types/agents/evaluation_metrics/__init__.py @@ -2,8 +2,6 @@ from __future__ import annotations -from .model_list_params import ModelListParams as ModelListParams -from .model_list_response import ModelListResponse as ModelListResponse from .workspace_create_params import WorkspaceCreateParams as WorkspaceCreateParams from .workspace_list_response import WorkspaceListResponse as WorkspaceListResponse from .workspace_update_params import WorkspaceUpdateParams as WorkspaceUpdateParams diff --git a/src/gradientai/types/agents/evaluation_metrics/model_list_response.py b/src/gradientai/types/agents/evaluation_metrics/model_list_response.py deleted file mode 100644 index 2fc17524..00000000 --- a/src/gradientai/types/agents/evaluation_metrics/model_list_response.py +++ /dev/null @@ -1,21 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from ...._models import BaseModel -from ...api_model import APIModel -from ...shared.api_meta import APIMeta -from ...shared.api_links import APILinks - -__all__ = ["ModelListResponse"] - - -class ModelListResponse(BaseModel): - links: Optional[APILinks] = None - """Links to other pages""" - - meta: Optional[APIMeta] = None - """Meta information about the data set""" - - models: Optional[List[APIModel]] = None - """The models""" diff --git a/src/gradientai/types/agents/evaluation_metrics/workspace_create_params.py b/src/gradientai/types/agents/evaluation_metrics/workspace_create_params.py index 7a418e81..73f390be 100644 --- a/src/gradientai/types/agents/evaluation_metrics/workspace_create_params.py +++ b/src/gradientai/types/agents/evaluation_metrics/workspace_create_params.py @@ -10,10 +10,7 @@ class WorkspaceCreateParams(TypedDict, total=False): agent_uuids: List[str] - """Ids of the agents(s) to attach to the workspace""" description: str - """Description of the workspace""" name: str - """Name of the workspace""" diff --git a/src/gradientai/types/agents/evaluation_metrics/workspace_delete_response.py b/src/gradientai/types/agents/evaluation_metrics/workspace_delete_response.py index 3e094515..1fe7b5a2 100644 --- a/src/gradientai/types/agents/evaluation_metrics/workspace_delete_response.py +++ b/src/gradientai/types/agents/evaluation_metrics/workspace_delete_response.py @@ -9,4 +9,3 @@ class WorkspaceDeleteResponse(BaseModel): workspace_uuid: Optional[str] = None - """Workspace""" diff --git a/src/gradientai/types/agents/evaluation_metrics/workspace_list_response.py b/src/gradientai/types/agents/evaluation_metrics/workspace_list_response.py index 793623dd..64f9a63c 100644 --- a/src/gradientai/types/agents/evaluation_metrics/workspace_list_response.py +++ b/src/gradientai/types/agents/evaluation_metrics/workspace_list_response.py @@ -11,7 +11,6 @@ class WorkspaceListResponse(BaseModel): workspaces: Optional[List["APIWorkspace"]] = None - """Workspaces""" from ...api_workspace import APIWorkspace diff --git a/src/gradientai/types/agents/evaluation_metrics/workspace_update_params.py b/src/gradientai/types/agents/evaluation_metrics/workspace_update_params.py index d5906bd9..fd09079e 100644 --- a/src/gradientai/types/agents/evaluation_metrics/workspace_update_params.py +++ b/src/gradientai/types/agents/evaluation_metrics/workspace_update_params.py @@ -11,10 +11,8 @@ class WorkspaceUpdateParams(TypedDict, total=False): description: str - """The new description of the workspace""" name: str - """The new name of the workspace""" body_workspace_uuid: Annotated[str, PropertyInfo(alias="workspace_uuid")] """Workspace UUID.""" diff --git a/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_list_params.py b/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_list_params.py index b56d0395..277274ed 100644 --- a/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_list_params.py +++ b/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_list_params.py @@ -2,17 +2,25 @@ from __future__ import annotations +from typing import List from typing_extensions import TypedDict -__all__ = ["AgentListParams"] +__all__ = ["AgentListParams", "FieldMask"] class AgentListParams(TypedDict, total=False): + field_mask: FieldMask + only_deployed: bool """Only list agents that are deployed.""" page: int - """Page number.""" + """page number.""" per_page: int - """Items per page.""" + """items per page.""" + + +class FieldMask(TypedDict, total=False): + paths: List[str] + """The set of field mask paths.""" diff --git a/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_list_response.py b/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_list_response.py index 6f9ea948..1e520736 100644 --- a/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_list_response.py +++ b/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_list_response.py @@ -15,10 +15,8 @@ class AgentListResponse(BaseModel): agents: Optional[List["APIAgent"]] = None links: Optional[APILinks] = None - """Links to other pages""" meta: Optional[APIMeta] = None - """Meta information about the data set""" from ....api_agent import APIAgent diff --git a/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_move_params.py b/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_move_params.py index 74e27dd2..8e92503a 100644 --- a/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_move_params.py +++ b/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_move_params.py @@ -12,7 +12,5 @@ class AgentMoveParams(TypedDict, total=False): agent_uuids: List[str] - """Agent uuids""" body_workspace_uuid: Annotated[str, PropertyInfo(alias="workspace_uuid")] - """Workspace uuid to move agents to""" diff --git a/src/gradientai/types/agents/evaluation_run_create_params.py b/src/gradientai/types/agents/evaluation_run_create_params.py index 3029e192..47bdabd6 100644 --- a/src/gradientai/types/agents/evaluation_run_create_params.py +++ b/src/gradientai/types/agents/evaluation_run_create_params.py @@ -16,4 +16,3 @@ class EvaluationRunCreateParams(TypedDict, total=False): """The name of the run.""" test_case_uuid: str - """Test-case UUID to run""" diff --git a/src/gradientai/types/agents/evaluation_run_list_results_params.py b/src/gradientai/types/agents/evaluation_run_list_results_params.py deleted file mode 100644 index bcf96c14..00000000 --- a/src/gradientai/types/agents/evaluation_run_list_results_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["EvaluationRunListResultsParams"] - - -class EvaluationRunListResultsParams(TypedDict, total=False): - page: int - """Page number.""" - - per_page: int - """Items per page.""" diff --git a/src/gradientai/types/agents/evaluation_run_list_results_response.py b/src/gradientai/types/agents/evaluation_run_list_results_response.py index df830a5b..f0a9882b 100644 --- a/src/gradientai/types/agents/evaluation_run_list_results_response.py +++ b/src/gradientai/types/agents/evaluation_run_list_results_response.py @@ -3,8 +3,6 @@ from typing import List, Optional from ..._models import BaseModel -from ..shared.api_meta import APIMeta -from ..shared.api_links import APILinks from .api_evaluation_run import APIEvaluationRun from .api_evaluation_prompt import APIEvaluationPrompt @@ -14,11 +12,5 @@ class EvaluationRunListResultsResponse(BaseModel): evaluation_run: Optional[APIEvaluationRun] = None - links: Optional[APILinks] = None - """Links to other pages""" - - meta: Optional[APIMeta] = None - """Meta information about the data set""" - prompts: Optional[List[APIEvaluationPrompt]] = None """The prompt level results.""" diff --git a/src/gradientai/types/agents/evaluation_test_case_list_response.py b/src/gradientai/types/agents/evaluation_test_case_list_response.py index 62b97961..ccfc263e 100644 --- a/src/gradientai/types/agents/evaluation_test_case_list_response.py +++ b/src/gradientai/types/agents/evaluation_test_case_list_response.py @@ -10,7 +10,3 @@ class EvaluationTestCaseListResponse(BaseModel): evaluation_test_cases: Optional[List[APIEvaluationTestCase]] = None - """ - Alternative way of authentication for internal usage only - should not be - exposed to public api - """ diff --git a/src/gradientai/types/agents/evaluation_test_case_update_params.py b/src/gradientai/types/agents/evaluation_test_case_update_params.py index 825f961b..be70fc95 100644 --- a/src/gradientai/types/agents/evaluation_test_case_update_params.py +++ b/src/gradientai/types/agents/evaluation_test_case_update_params.py @@ -26,7 +26,6 @@ class EvaluationTestCaseUpdateParams(TypedDict, total=False): star_metric: APIStarMetricParam body_test_case_uuid: Annotated[str, PropertyInfo(alias="test_case_uuid")] - """Test-case UUID to update""" class Metrics(TypedDict, total=False): diff --git a/src/gradientai/types/agents/function_create_params.py b/src/gradientai/types/agents/function_create_params.py index 000de32b..938fb1d5 100644 --- a/src/gradientai/types/agents/function_create_params.py +++ b/src/gradientai/types/agents/function_create_params.py @@ -11,22 +11,15 @@ class FunctionCreateParams(TypedDict, total=False): body_agent_uuid: Annotated[str, PropertyInfo(alias="agent_uuid")] - """Agent id""" description: str - """Function description""" faas_name: str - """The name of the function in the DigitalOcean functions platform""" faas_namespace: str - """The namespace of the function in the DigitalOcean functions platform""" function_name: str - """Function name""" input_schema: object - """Describe the input schema for the function so the agent may call it""" output_schema: object - """Describe the output schema for the function so the agent handle its response""" diff --git a/src/gradientai/types/agents/function_create_response.py b/src/gradientai/types/agents/function_create_response.py index 65a4bb2b..82ab984b 100644 --- a/src/gradientai/types/agents/function_create_response.py +++ b/src/gradientai/types/agents/function_create_response.py @@ -11,7 +11,6 @@ class FunctionCreateResponse(BaseModel): agent: Optional["APIAgent"] = None - """An Agent""" from ..api_agent import APIAgent diff --git a/src/gradientai/types/agents/function_delete_response.py b/src/gradientai/types/agents/function_delete_response.py index 26ad02e6..678ef62d 100644 --- a/src/gradientai/types/agents/function_delete_response.py +++ b/src/gradientai/types/agents/function_delete_response.py @@ -11,7 +11,6 @@ class FunctionDeleteResponse(BaseModel): agent: Optional["APIAgent"] = None - """An Agent""" from ..api_agent import APIAgent diff --git a/src/gradientai/types/agents/function_update_params.py b/src/gradientai/types/agents/function_update_params.py index 67c6ea9b..2fa8e8f0 100644 --- a/src/gradientai/types/agents/function_update_params.py +++ b/src/gradientai/types/agents/function_update_params.py @@ -13,25 +13,17 @@ class FunctionUpdateParams(TypedDict, total=False): path_agent_uuid: Required[Annotated[str, PropertyInfo(alias="agent_uuid")]] body_agent_uuid: Annotated[str, PropertyInfo(alias="agent_uuid")] - """Agent id""" description: str - """Funciton description""" faas_name: str - """The name of the function in the DigitalOcean functions platform""" faas_namespace: str - """The namespace of the function in the DigitalOcean functions platform""" function_name: str - """Function name""" body_function_uuid: Annotated[str, PropertyInfo(alias="function_uuid")] - """Function id""" input_schema: object - """Describe the input schema for the function so the agent may call it""" output_schema: object - """Describe the output schema for the function so the agent handle its response""" diff --git a/src/gradientai/types/agents/function_update_response.py b/src/gradientai/types/agents/function_update_response.py index eebde3e6..82fc63be 100644 --- a/src/gradientai/types/agents/function_update_response.py +++ b/src/gradientai/types/agents/function_update_response.py @@ -11,7 +11,6 @@ class FunctionUpdateResponse(BaseModel): agent: Optional["APIAgent"] = None - """An Agent""" from ..api_agent import APIAgent diff --git a/src/gradientai/types/agents/knowledge_base_detach_response.py b/src/gradientai/types/agents/knowledge_base_detach_response.py index 0dc90aaf..76bb4236 100644 --- a/src/gradientai/types/agents/knowledge_base_detach_response.py +++ b/src/gradientai/types/agents/knowledge_base_detach_response.py @@ -11,7 +11,6 @@ class KnowledgeBaseDetachResponse(BaseModel): agent: Optional["APIAgent"] = None - """An Agent""" from ..api_agent import APIAgent diff --git a/src/gradientai/types/agents/route_add_params.py b/src/gradientai/types/agents/route_add_params.py index d8dbeff8..b4fcb417 100644 --- a/src/gradientai/types/agents/route_add_params.py +++ b/src/gradientai/types/agents/route_add_params.py @@ -13,7 +13,6 @@ class RouteAddParams(TypedDict, total=False): path_parent_agent_uuid: Required[Annotated[str, PropertyInfo(alias="parent_agent_uuid")]] body_child_agent_uuid: Annotated[str, PropertyInfo(alias="child_agent_uuid")] - """Routed agent id""" if_case: str @@ -21,4 +20,3 @@ class RouteAddParams(TypedDict, total=False): """A unique identifier for the parent agent.""" route_name: str - """Name of route""" diff --git a/src/gradientai/types/agents/route_add_response.py b/src/gradientai/types/agents/route_add_response.py index b9cc2b7d..cd3bb16a 100644 --- a/src/gradientai/types/agents/route_add_response.py +++ b/src/gradientai/types/agents/route_add_response.py @@ -9,7 +9,6 @@ class RouteAddResponse(BaseModel): child_agent_uuid: Optional[str] = None - """Routed agent id""" parent_agent_uuid: Optional[str] = None """A unique identifier for the parent agent.""" diff --git a/src/gradientai/types/agents/route_delete_response.py b/src/gradientai/types/agents/route_delete_response.py index b49c8b7c..07105a62 100644 --- a/src/gradientai/types/agents/route_delete_response.py +++ b/src/gradientai/types/agents/route_delete_response.py @@ -9,7 +9,5 @@ class RouteDeleteResponse(BaseModel): child_agent_uuid: Optional[str] = None - """Routed agent id""" parent_agent_uuid: Optional[str] = None - """Pagent agent id""" diff --git a/src/gradientai/types/agents/route_update_params.py b/src/gradientai/types/agents/route_update_params.py index 453a3b93..cb6d6391 100644 --- a/src/gradientai/types/agents/route_update_params.py +++ b/src/gradientai/types/agents/route_update_params.py @@ -13,16 +13,12 @@ class RouteUpdateParams(TypedDict, total=False): path_parent_agent_uuid: Required[Annotated[str, PropertyInfo(alias="parent_agent_uuid")]] body_child_agent_uuid: Annotated[str, PropertyInfo(alias="child_agent_uuid")] - """Routed agent id""" if_case: str - """Describes the case in which the child agent should be used""" body_parent_agent_uuid: Annotated[str, PropertyInfo(alias="parent_agent_uuid")] """A unique identifier for the parent agent.""" route_name: str - """Route name""" uuid: str - """Unique id of linkage""" diff --git a/src/gradientai/types/agents/route_update_response.py b/src/gradientai/types/agents/route_update_response.py index b79fc9fe..75e1eda5 100644 --- a/src/gradientai/types/agents/route_update_response.py +++ b/src/gradientai/types/agents/route_update_response.py @@ -9,7 +9,6 @@ class RouteUpdateResponse(BaseModel): child_agent_uuid: Optional[str] = None - """Routed agent id""" parent_agent_uuid: Optional[str] = None """A unique identifier for the parent agent.""" @@ -17,4 +16,3 @@ class RouteUpdateResponse(BaseModel): rollback: Optional[bool] = None uuid: Optional[str] = None - """Unique id of linkage""" diff --git a/src/gradientai/types/agents/route_view_response.py b/src/gradientai/types/agents/route_view_response.py index f0ee2d71..dd9af70b 100644 --- a/src/gradientai/types/agents/route_view_response.py +++ b/src/gradientai/types/agents/route_view_response.py @@ -11,7 +11,6 @@ class RouteViewResponse(BaseModel): children: Optional[List["APIAgent"]] = None - """Child agents""" from ..api_agent import APIAgent diff --git a/src/gradientai/types/agents/version_list_params.py b/src/gradientai/types/agents/version_list_params.py index e8fa2f6d..a71fd022 100644 --- a/src/gradientai/types/agents/version_list_params.py +++ b/src/gradientai/types/agents/version_list_params.py @@ -9,7 +9,7 @@ class VersionListParams(TypedDict, total=False): page: int - """Page number.""" + """page number.""" per_page: int - """Items per page.""" + """items per page.""" diff --git a/src/gradientai/types/agents/version_list_response.py b/src/gradientai/types/agents/version_list_response.py index c35a5ba4..af25150e 100644 --- a/src/gradientai/types/agents/version_list_response.py +++ b/src/gradientai/types/agents/version_list_response.py @@ -22,146 +22,97 @@ class AgentVersionAttachedChildAgent(BaseModel): agent_name: Optional[str] = None - """Name of the child agent""" child_agent_uuid: Optional[str] = None - """Child agent unique identifier""" if_case: Optional[str] = None - """If case""" is_deleted: Optional[bool] = None - """Child agent is deleted""" route_name: Optional[str] = None - """Route name""" class AgentVersionAttachedFunction(BaseModel): description: Optional[str] = None - """Description of the function""" faas_name: Optional[str] = None - """FaaS name of the function""" faas_namespace: Optional[str] = None - """FaaS namespace of the function""" is_deleted: Optional[bool] = None - """Whether the function is deleted""" name: Optional[str] = None - """Name of the function""" class AgentVersionAttachedGuardrail(BaseModel): is_deleted: Optional[bool] = None - """Whether the guardrail is deleted""" name: Optional[str] = None - """Guardrail Name""" priority: Optional[int] = None - """Guardrail Priority""" uuid: Optional[str] = None - """Guardrail UUID""" class AgentVersionAttachedKnowledgebase(BaseModel): is_deleted: Optional[bool] = None - """Deletet at date / time""" name: Optional[str] = None - """Name of the knowledge base""" uuid: Optional[str] = None - """Unique id of the knowledge base""" class AgentVersion(BaseModel): id: Optional[str] = None - """Unique identifier""" agent_uuid: Optional[str] = None - """Uuid of the agent this version belongs to""" attached_child_agents: Optional[List[AgentVersionAttachedChildAgent]] = None - """List of child agent relationships""" attached_functions: Optional[List[AgentVersionAttachedFunction]] = None - """List of function versions""" attached_guardrails: Optional[List[AgentVersionAttachedGuardrail]] = None - """List of guardrail version""" attached_knowledgebases: Optional[List[AgentVersionAttachedKnowledgebase]] = None - """List of knowledge base agent versions""" can_rollback: Optional[bool] = None - """Whether the version is able to be rolled back to""" created_at: Optional[datetime] = None - """Creation date""" created_by_email: Optional[str] = None - """User who created this version""" currently_applied: Optional[bool] = None - """Whether this is the currently applied configuration""" description: Optional[str] = None - """Description of the agent""" instruction: Optional[str] = None - """Instruction for the agent""" k: Optional[int] = None - """K value for the agent's configuration""" max_tokens: Optional[int] = None - """Max tokens setting for the agent""" - model: Optional[str] = FieldInfo(alias="model_name", default=None) - """Name of model associated to the agent version""" + api_model_name: Optional[str] = FieldInfo(alias="model_name", default=None) name: Optional[str] = None - """Name of the agent""" provide_citations: Optional[bool] = None - """Whether the agent should provide in-response citations""" retrieval_method: Optional[APIRetrievalMethod] = None - """ - - RETRIEVAL_METHOD_UNKNOWN: The retrieval method is unknown - - RETRIEVAL_METHOD_REWRITE: The retrieval method is rewrite - - RETRIEVAL_METHOD_STEP_BACK: The retrieval method is step back - - RETRIEVAL_METHOD_SUB_QUERIES: The retrieval method is sub queries - - RETRIEVAL_METHOD_NONE: The retrieval method is none - """ tags: Optional[List[str]] = None - """Tags associated with the agent""" temperature: Optional[float] = None - """Temperature setting for the agent""" top_p: Optional[float] = None - """Top_p setting for the agent""" trigger_action: Optional[str] = None - """Action triggering the configuration update""" version_hash: Optional[str] = None - """Version hash""" class VersionListResponse(BaseModel): agent_versions: Optional[List[AgentVersion]] = None - """Agents""" links: Optional[APILinks] = None - """Links to other pages""" meta: Optional[APIMeta] = None - """Meta information about the data set""" diff --git a/src/gradientai/types/agents/version_update_params.py b/src/gradientai/types/agents/version_update_params.py index 212eb05c..d7fb01cb 100644 --- a/src/gradientai/types/agents/version_update_params.py +++ b/src/gradientai/types/agents/version_update_params.py @@ -11,7 +11,5 @@ class VersionUpdateParams(TypedDict, total=False): body_uuid: Annotated[str, PropertyInfo(alias="uuid")] - """Agent unique identifier""" version_hash: str - """Unique identifier""" diff --git a/src/gradientai/types/agents/version_update_response.py b/src/gradientai/types/agents/version_update_response.py index 464ef12f..72058319 100644 --- a/src/gradientai/types/agents/version_update_response.py +++ b/src/gradientai/types/agents/version_update_response.py @@ -28,4 +28,3 @@ class VersionUpdateResponse(BaseModel): """An alternative way to provide auth information. for internal use only.""" version_hash: Optional[str] = None - """Unique identifier""" diff --git a/src/gradientai/types/api_agent.py b/src/gradientai/types/api_agent.py index 4be22aa5..1378950a 100644 --- a/src/gradientai/types/api_agent.py +++ b/src/gradientai/types/api_agent.py @@ -23,7 +23,6 @@ "Deployment", "Function", "Guardrail", - "LoggingConfig", "Template", "TemplateGuardrail", ] @@ -31,7 +30,6 @@ class APIKey(BaseModel): api_key: Optional[str] = None - """Api key""" class Chatbot(BaseModel): @@ -40,7 +38,6 @@ class Chatbot(BaseModel): logo: Optional[str] = None name: Optional[str] = None - """Name of chatbot""" primary_color: Optional[str] = None @@ -51,15 +48,12 @@ class Chatbot(BaseModel): class ChatbotIdentifier(BaseModel): agent_chatbot_identifier: Optional[str] = None - """Agent chatbot identifier""" class Deployment(BaseModel): created_at: Optional[datetime] = None - """Creation date / time""" name: Optional[str] = None - """Name""" status: Optional[ Literal[ @@ -76,39 +70,22 @@ class Deployment(BaseModel): ] = None updated_at: Optional[datetime] = None - """Last modified""" url: Optional[str] = None - """Access your deployed agent here""" uuid: Optional[str] = None - """Unique id""" visibility: Optional[APIDeploymentVisibility] = None - """ - - VISIBILITY_UNKNOWN: The status of the deployment is unknown - - VISIBILITY_DISABLED: The deployment is disabled and will no longer service - requests - - VISIBILITY_PLAYGROUND: Deprecated: No longer a valid state - - VISIBILITY_PUBLIC: The deployment is public and will service requests from the - public internet - - VISIBILITY_PRIVATE: The deployment is private and will only service requests - from other agents, or through API keys - """ class Function(BaseModel): api_key: Optional[str] = None - """Api key""" created_at: Optional[datetime] = None - """Creation date / time""" created_by: Optional[str] = None - """Created by user id from DO""" description: Optional[str] = None - """Agent description""" faas_name: Optional[str] = None @@ -117,18 +94,14 @@ class Function(BaseModel): input_schema: Optional[object] = None name: Optional[str] = None - """Name""" output_schema: Optional[object] = None updated_at: Optional[datetime] = None - """Last modified""" url: Optional[str] = None - """Download your agent here""" uuid: Optional[str] = None - """Unique id""" class Guardrail(BaseModel): @@ -166,122 +139,72 @@ class Guardrail(BaseModel): uuid: Optional[str] = None -class LoggingConfig(BaseModel): - galileo_project_id: Optional[str] = None - """Galileo project identifier""" - - galileo_project_name: Optional[str] = None - """Name of the Galileo project""" - - log_stream_id: Optional[str] = None - """Identifier for the log stream""" - - log_stream_name: Optional[str] = None - """Name of the log stream""" - - class TemplateGuardrail(BaseModel): priority: Optional[int] = None - """Priority of the guardrail""" uuid: Optional[str] = None - """Uuid of the guardrail""" class Template(BaseModel): created_at: Optional[datetime] = None - """The agent template's creation date""" description: Optional[str] = None - """Deprecated - Use summary instead""" guardrails: Optional[List[TemplateGuardrail]] = None - """List of guardrails associated with the agent template""" instruction: Optional[str] = None - """Instructions for the agent template""" k: Optional[int] = None - """The 'k' value for the agent template""" knowledge_bases: Optional[List[APIKnowledgeBase]] = None - """List of knowledge bases associated with the agent template""" long_description: Optional[str] = None - """The long description of the agent template""" max_tokens: Optional[int] = None - """The max_tokens setting for the agent template""" model: Optional[APIAgentModel] = None - """Description of a Model""" name: Optional[str] = None - """Name of the agent template""" short_description: Optional[str] = None - """The short description of the agent template""" summary: Optional[str] = None - """The summary of the agent template""" tags: Optional[List[str]] = None - """List of tags associated with the agent template""" temperature: Optional[float] = None - """The temperature setting for the agent template""" template_type: Optional[Literal["AGENT_TEMPLATE_TYPE_STANDARD", "AGENT_TEMPLATE_TYPE_ONE_CLICK"]] = None - """ - - AGENT_TEMPLATE_TYPE_STANDARD: The standard agent template - - AGENT_TEMPLATE_TYPE_ONE_CLICK: The one click agent template - """ top_p: Optional[float] = None - """The top_p setting for the agent template""" updated_at: Optional[datetime] = None - """The agent template's last updated date""" uuid: Optional[str] = None - """Unique id""" class APIAgent(BaseModel): anthropic_api_key: Optional[APIAnthropicAPIKeyInfo] = None - """Anthropic API Key Info""" api_key_infos: Optional[List[APIAgentAPIKeyInfo]] = None - """Api key infos""" api_keys: Optional[List[APIKey]] = None - """Api keys""" chatbot: Optional[Chatbot] = None - """A Chatbot""" chatbot_identifiers: Optional[List[ChatbotIdentifier]] = None - """Chatbot identifiers""" child_agents: Optional[List["APIAgent"]] = None - """Child agents""" - - conversation_logs_enabled: Optional[bool] = None - """Whether conversation logs are enabled for the agent""" created_at: Optional[datetime] = None - """Creation date / time""" deployment: Optional[Deployment] = None - """Description of deployment""" description: Optional[str] = None - """Description of agent""" functions: Optional[List[Function]] = None guardrails: Optional[List[Guardrail]] = None - """The guardrails the agent is attached to""" if_case: Optional[str] = None @@ -296,75 +219,48 @@ class APIAgent(BaseModel): k: Optional[int] = None knowledge_bases: Optional[List[APIKnowledgeBase]] = None - """Knowledge bases""" - - logging_config: Optional[LoggingConfig] = None max_tokens: Optional[int] = None model: Optional[APIAgentModel] = None - """Description of a Model""" name: Optional[str] = None - """Agent name""" openai_api_key: Optional[APIOpenAIAPIKeyInfo] = None - """OpenAI API Key Info""" parent_agents: Optional[List["APIAgent"]] = None - """Parent agents""" project_id: Optional[str] = None provide_citations: Optional[bool] = None - """Whether the agent should provide in-response citations""" region: Optional[str] = None - """Region code""" retrieval_method: Optional[APIRetrievalMethod] = None - """ - - RETRIEVAL_METHOD_UNKNOWN: The retrieval method is unknown - - RETRIEVAL_METHOD_REWRITE: The retrieval method is rewrite - - RETRIEVAL_METHOD_STEP_BACK: The retrieval method is step back - - RETRIEVAL_METHOD_SUB_QUERIES: The retrieval method is sub queries - - RETRIEVAL_METHOD_NONE: The retrieval method is none - """ route_created_at: Optional[datetime] = None - """Creation of route date / time""" route_created_by: Optional[str] = None route_name: Optional[str] = None - """Route name""" route_uuid: Optional[str] = None tags: Optional[List[str]] = None - """Agent tag to organize related resources""" temperature: Optional[float] = None template: Optional[Template] = None - """Represents an AgentTemplate entity""" top_p: Optional[float] = None updated_at: Optional[datetime] = None - """Last modified""" url: Optional[str] = None - """Access your agent under this url""" user_id: Optional[str] = None - """Id of user that created the agent""" uuid: Optional[str] = None - """Unique agent id""" - - version_hash: Optional[str] = None - """The latest version of the agent""" workspace: Optional["APIWorkspace"] = None diff --git a/src/gradientai/types/api_agent_api_key_info.py b/src/gradientai/types/api_agent_api_key_info.py index 7222153c..8dc71564 100644 --- a/src/gradientai/types/api_agent_api_key_info.py +++ b/src/gradientai/types/api_agent_api_key_info.py @@ -10,18 +10,13 @@ class APIAgentAPIKeyInfo(BaseModel): created_at: Optional[datetime] = None - """Creation date""" created_by: Optional[str] = None - """Created by""" deleted_at: Optional[datetime] = None - """Deleted date""" name: Optional[str] = None - """Name""" secret_key: Optional[str] = None uuid: Optional[str] = None - """Uuid""" diff --git a/src/gradientai/types/api_agent_model.py b/src/gradientai/types/api_agent_model.py index f111bfb7..1025321b 100644 --- a/src/gradientai/types/api_agent_model.py +++ b/src/gradientai/types/api_agent_model.py @@ -13,41 +13,30 @@ class APIAgentModel(BaseModel): agreement: Optional[APIAgreement] = None - """Agreement Description""" created_at: Optional[datetime] = None - """Creation date / time""" inference_name: Optional[str] = None - """Internally used name""" inference_version: Optional[str] = None - """Internally used version""" is_foundational: Optional[bool] = None - """True if it is a foundational model provided by do""" metadata: Optional[object] = None - """Additional meta data""" name: Optional[str] = None - """Name of the model""" parent_uuid: Optional[str] = None - """Unique id of the model, this model is based on""" provider: Optional[Literal["MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", "MODEL_PROVIDER_OPENAI"]] = ( None ) updated_at: Optional[datetime] = None - """Last modified""" upload_complete: Optional[bool] = None - """Model has been fully uploaded""" url: Optional[str] = None - """Download url""" usecases: Optional[ List[ @@ -62,10 +51,7 @@ class APIAgentModel(BaseModel): ] ] ] = None - """Usecases of the model""" uuid: Optional[str] = None - """Unique id""" version: Optional[APIModelVersion] = None - """Version Information about a Model""" diff --git a/src/gradientai/types/api_anthropic_api_key_info.py b/src/gradientai/types/api_anthropic_api_key_info.py index 6440c5ef..e2e04a8a 100644 --- a/src/gradientai/types/api_anthropic_api_key_info.py +++ b/src/gradientai/types/api_anthropic_api_key_info.py @@ -10,19 +10,13 @@ class APIAnthropicAPIKeyInfo(BaseModel): created_at: Optional[datetime] = None - """Key creation date""" created_by: Optional[str] = None - """Created by user id from DO""" deleted_at: Optional[datetime] = None - """Key deleted date""" name: Optional[str] = None - """Name""" updated_at: Optional[datetime] = None - """Key last updated date""" uuid: Optional[str] = None - """Uuid""" diff --git a/src/gradientai/types/api_knowledge_base.py b/src/gradientai/types/api_knowledge_base.py index 4e4a6567..2b0676f0 100644 --- a/src/gradientai/types/api_knowledge_base.py +++ b/src/gradientai/types/api_knowledge_base.py @@ -11,37 +11,27 @@ class APIKnowledgeBase(BaseModel): added_to_agent_at: Optional[datetime] = None - """Time when the knowledge base was added to the agent""" created_at: Optional[datetime] = None - """Creation date / time""" database_id: Optional[str] = None embedding_model_uuid: Optional[str] = None is_public: Optional[bool] = None - """Whether the knowledge base is public or not""" last_indexing_job: Optional[APIIndexingJob] = None - """IndexingJob description""" name: Optional[str] = None - """Name of knowledge base""" project_id: Optional[str] = None region: Optional[str] = None - """Region code""" tags: Optional[List[str]] = None - """Tags to organize related resources""" updated_at: Optional[datetime] = None - """Last modified""" user_id: Optional[str] = None - """Id of user that created the knowledge base""" uuid: Optional[str] = None - """Unique id for knowledge base""" diff --git a/src/gradientai/types/api_model.py b/src/gradientai/types/api_model.py index 7c530ee2..c2bc1edd 100644 --- a/src/gradientai/types/api_model.py +++ b/src/gradientai/types/api_model.py @@ -12,31 +12,21 @@ class APIModel(BaseModel): agreement: Optional[APIAgreement] = None - """Agreement Description""" created_at: Optional[datetime] = None - """Creation date / time""" is_foundational: Optional[bool] = None - """True if it is a foundational model provided by do""" name: Optional[str] = None - """Name of the model""" parent_uuid: Optional[str] = None - """Unique id of the model, this model is based on""" updated_at: Optional[datetime] = None - """Last modified""" upload_complete: Optional[bool] = None - """Model has been fully uploaded""" url: Optional[str] = None - """Download url""" uuid: Optional[str] = None - """Unique id""" version: Optional[APIModelVersion] = None - """Version Information about a Model""" diff --git a/src/gradientai/types/api_model_version.py b/src/gradientai/types/api_model_version.py index f19a78c6..2e118632 100644 --- a/src/gradientai/types/api_model_version.py +++ b/src/gradientai/types/api_model_version.py @@ -9,10 +9,7 @@ class APIModelVersion(BaseModel): major: Optional[int] = None - """Major version number""" minor: Optional[int] = None - """Minor version number""" patch: Optional[int] = None - """Patch version number""" diff --git a/src/gradientai/types/api_openai_api_key_info.py b/src/gradientai/types/api_openai_api_key_info.py index bcee992b..7467cfc2 100644 --- a/src/gradientai/types/api_openai_api_key_info.py +++ b/src/gradientai/types/api_openai_api_key_info.py @@ -11,22 +11,15 @@ class APIOpenAIAPIKeyInfo(BaseModel): created_at: Optional[datetime] = None - """Key creation date""" created_by: Optional[str] = None - """Created by user id from DO""" deleted_at: Optional[datetime] = None - """Key deleted date""" models: Optional[List[APIAgentModel]] = None - """Models supported by the openAI api key""" name: Optional[str] = None - """Name""" updated_at: Optional[datetime] = None - """Key last updated date""" uuid: Optional[str] = None - """Uuid""" diff --git a/src/gradientai/types/api_workspace.py b/src/gradientai/types/api_workspace.py index 564fabb6..83e59379 100644 --- a/src/gradientai/types/api_workspace.py +++ b/src/gradientai/types/api_workspace.py @@ -13,34 +13,24 @@ class APIWorkspace(BaseModel): agents: Optional[List["APIAgent"]] = None - """Agents""" created_at: Optional[datetime] = None - """Creation date""" created_by: Optional[str] = None - """The id of user who created this workspace""" created_by_email: Optional[str] = None - """The email of the user who created this workspace""" deleted_at: Optional[datetime] = None - """Deleted date""" description: Optional[str] = None - """Description of the workspace""" evaluation_test_cases: Optional[List[APIEvaluationTestCase]] = None - """Evaluations""" name: Optional[str] = None - """Name of the workspace""" updated_at: Optional[datetime] = None - """Update date""" uuid: Optional[str] = None - """Unique id""" from .api_agent import APIAgent diff --git a/src/gradientai/types/chat/completion_create_params.py b/src/gradientai/types/chat/completion_create_params.py index aaec2ba5..ec5c6b70 100644 --- a/src/gradientai/types/chat/completion_create_params.py +++ b/src/gradientai/types/chat/completion_create_params.py @@ -12,15 +12,7 @@ "MessageChatCompletionRequestDeveloperMessage", "MessageChatCompletionRequestUserMessage", "MessageChatCompletionRequestAssistantMessage", - "MessageChatCompletionRequestAssistantMessageToolCall", - "MessageChatCompletionRequestAssistantMessageToolCallFunction", - "MessageChatCompletionRequestToolMessage", "StreamOptions", - "ToolChoice", - "ToolChoiceChatCompletionNamedToolChoice", - "ToolChoiceChatCompletionNamedToolChoiceFunction", - "Tool", - "ToolFunction", "CompletionCreateParamsNonStreaming", "CompletionCreateParamsStreaming", ] @@ -113,25 +105,6 @@ class CompletionCreateParamsBase(TypedDict, total=False): this or `top_p` but not both. """ - tool_choice: ToolChoice - """ - Controls which (if any) tool is called by the model. `none` means the model will - not call any tool and instead generates a message. `auto` means the model can - pick between generating a message or calling one or more tools. `required` means - the model must call one or more tools. Specifying a particular tool via - `{"type": "function", "function": {"name": "my_function"}}` forces the model to - call that tool. - - `none` is the default when no tools are present. `auto` is the default if tools - are present. - """ - - tools: Iterable[Tool] - """A list of tools the model may call. - - Currently, only functions are supported as a tool. - """ - top_logprobs: Optional[int] """ An integer between 0 and 20 specifying the number of most likely tokens to @@ -179,30 +152,6 @@ class MessageChatCompletionRequestUserMessage(TypedDict, total=False): """The role of the messages author, in this case `user`.""" -class MessageChatCompletionRequestAssistantMessageToolCallFunction(TypedDict, total=False): - arguments: Required[str] - """ - The arguments to call the function with, as generated by the model in JSON - format. Note that the model does not always generate valid JSON, and may - hallucinate parameters not defined by your function schema. Validate the - arguments in your code before calling your function. - """ - - name: Required[str] - """The name of the function to call.""" - - -class MessageChatCompletionRequestAssistantMessageToolCall(TypedDict, total=False): - id: Required[str] - """The ID of the tool call.""" - - function: Required[MessageChatCompletionRequestAssistantMessageToolCallFunction] - """The function that the model called.""" - - type: Required[Literal["function"]] - """The type of the tool. Currently, only `function` is supported.""" - - class MessageChatCompletionRequestAssistantMessage(TypedDict, total=False): role: Required[Literal["assistant"]] """The role of the messages author, in this case `assistant`.""" @@ -210,27 +159,12 @@ class MessageChatCompletionRequestAssistantMessage(TypedDict, total=False): content: Union[str, List[str], None] """The contents of the assistant message.""" - tool_calls: Iterable[MessageChatCompletionRequestAssistantMessageToolCall] - """The tool calls generated by the model, such as function calls.""" - - -class MessageChatCompletionRequestToolMessage(TypedDict, total=False): - content: Required[str] - """The contents of the tool message.""" - - role: Required[Literal["tool"]] - """The role of the messages author, in this case `tool`.""" - - tool_call_id: Required[str] - """Tool call that this message is responding to.""" - Message: TypeAlias = Union[ MessageChatCompletionRequestSystemMessage, MessageChatCompletionRequestDeveloperMessage, MessageChatCompletionRequestUserMessage, MessageChatCompletionRequestAssistantMessage, - MessageChatCompletionRequestToolMessage, ] @@ -247,53 +181,6 @@ class StreamOptions(TypedDict, total=False): """ -class ToolChoiceChatCompletionNamedToolChoiceFunction(TypedDict, total=False): - name: Required[str] - """The name of the function to call.""" - - -class ToolChoiceChatCompletionNamedToolChoice(TypedDict, total=False): - function: Required[ToolChoiceChatCompletionNamedToolChoiceFunction] - - type: Required[Literal["function"]] - """The type of the tool. Currently, only `function` is supported.""" - - -ToolChoice: TypeAlias = Union[Literal["none", "auto", "required"], ToolChoiceChatCompletionNamedToolChoice] - - -class ToolFunction(TypedDict, total=False): - name: Required[str] - """The name of the function to be called. - - Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length - of 64. - """ - - description: str - """ - A description of what the function does, used by the model to choose when and - how to call the function. - """ - - parameters: Dict[str, object] - """The parameters the functions accepts, described as a JSON Schema object. - - See the [guide](/docs/guides/function-calling) for examples, and the - [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for - documentation about the format. - - Omitting `parameters` defines a function with an empty parameter list. - """ - - -class Tool(TypedDict, total=False): - function: Required[ToolFunction] - - type: Required[Literal["function"]] - """The type of the tool. Currently, only `function` is supported.""" - - class CompletionCreateParamsNonStreaming(CompletionCreateParamsBase, total=False): stream: Optional[Literal[False]] """ diff --git a/src/gradientai/types/chat/completion_create_response.py b/src/gradientai/types/chat/completion_create_response.py index 73a09cf5..1791373b 100644 --- a/src/gradientai/types/chat/completion_create_response.py +++ b/src/gradientai/types/chat/completion_create_response.py @@ -4,17 +4,9 @@ from typing_extensions import Literal from ..._models import BaseModel -from ..shared.completion_usage import CompletionUsage from ..shared.chat_completion_token_logprob import ChatCompletionTokenLogprob -__all__ = [ - "CompletionCreateResponse", - "Choice", - "ChoiceLogprobs", - "ChoiceMessage", - "ChoiceMessageToolCall", - "ChoiceMessageToolCallFunction", -] +__all__ = ["CompletionCreateResponse", "Choice", "ChoiceLogprobs", "ChoiceMessage", "Usage"] class ChoiceLogprobs(BaseModel): @@ -25,30 +17,6 @@ class ChoiceLogprobs(BaseModel): """A list of message refusal tokens with log probability information.""" -class ChoiceMessageToolCallFunction(BaseModel): - arguments: str - """ - The arguments to call the function with, as generated by the model in JSON - format. Note that the model does not always generate valid JSON, and may - hallucinate parameters not defined by your function schema. Validate the - arguments in your code before calling your function. - """ - - name: str - """The name of the function to call.""" - - -class ChoiceMessageToolCall(BaseModel): - id: str - """The ID of the tool call.""" - - function: ChoiceMessageToolCallFunction - """The function that the model called.""" - - type: Literal["function"] - """The type of the tool. Currently, only `function` is supported.""" - - class ChoiceMessage(BaseModel): content: Optional[str] = None """The contents of the message.""" @@ -59,17 +27,14 @@ class ChoiceMessage(BaseModel): role: Literal["assistant"] """The role of the author of this message.""" - tool_calls: Optional[List[ChoiceMessageToolCall]] = None - """The tool calls generated by the model, such as function calls.""" - class Choice(BaseModel): - finish_reason: Literal["stop", "length", "tool_calls"] + finish_reason: Literal["stop", "length"] """The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, or `length` if the maximum number of tokens specified in the request - was reached, `tool_calls` if the model called a tool. + was reached. """ index: int @@ -82,6 +47,17 @@ class Choice(BaseModel): """A chat completion message generated by the model.""" +class Usage(BaseModel): + completion_tokens: int + """Number of tokens in the generated completion.""" + + prompt_tokens: int + """Number of tokens in the prompt.""" + + total_tokens: int + """Total number of tokens used in the request (prompt + completion).""" + + class CompletionCreateResponse(BaseModel): id: str """A unique identifier for the chat completion.""" @@ -101,5 +77,5 @@ class CompletionCreateResponse(BaseModel): object: Literal["chat.completion"] """The object type, which is always `chat.completion`.""" - usage: Optional[CompletionUsage] = None + usage: Optional[Usage] = None """Usage statistics for the completion request.""" diff --git a/src/gradientai/types/domains.py b/src/gradientai/types/domains.py deleted file mode 100644 index e5510bdc..00000000 --- a/src/gradientai/types/domains.py +++ /dev/null @@ -1,22 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from .._models import BaseModel - -__all__ = ["Domains"] - - -class Domains(BaseModel): - certificate_id: Optional[str] = None - """The ID of the TLS certificate used for SSL termination.""" - - is_managed: Optional[bool] = None - """A boolean value indicating if the domain is already managed by DigitalOcean. - - If true, all A and AAAA records required to enable Global load balancers will be - automatically added. - """ - - name: Optional[str] = None - """FQDN to associate with a Global load balancer.""" diff --git a/src/gradientai/types/domains_param.py b/src/gradientai/types/domains_param.py deleted file mode 100644 index d2d21faf..00000000 --- a/src/gradientai/types/domains_param.py +++ /dev/null @@ -1,22 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["DomainsParam"] - - -class DomainsParam(TypedDict, total=False): - certificate_id: str - """The ID of the TLS certificate used for SSL termination.""" - - is_managed: bool - """A boolean value indicating if the domain is already managed by DigitalOcean. - - If true, all A and AAAA records required to enable Global load balancers will be - automatically added. - """ - - name: str - """FQDN to associate with a Global load balancer.""" diff --git a/src/gradientai/types/droplet_backup_policy.py b/src/gradientai/types/droplet_backup_policy.py deleted file mode 100644 index 63112e8f..00000000 --- a/src/gradientai/types/droplet_backup_policy.py +++ /dev/null @@ -1,28 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional -from typing_extensions import Literal - -from .._models import BaseModel - -__all__ = ["DropletBackupPolicy"] - - -class DropletBackupPolicy(BaseModel): - hour: Optional[Literal[0, 4, 8, 12, 16, 20]] = None - """The hour of the day that the backup window will start.""" - - plan: Optional[Literal["daily", "weekly"]] = None - """The backup plan used for the Droplet. - - The plan can be either `daily` or `weekly`. - """ - - retention_period_days: Optional[int] = None - """The number of days the backup will be retained.""" - - weekday: Optional[Literal["SUN", "MON", "TUE", "WED", "THU", "FRI", "SAT"]] = None - """The day of the week on which the backup will occur.""" - - window_length_hours: Optional[int] = None - """The length of the backup window starting from `hour`.""" diff --git a/src/gradientai/types/droplet_backup_policy_param.py b/src/gradientai/types/droplet_backup_policy_param.py deleted file mode 100644 index 802f057f..00000000 --- a/src/gradientai/types/droplet_backup_policy_param.py +++ /dev/null @@ -1,21 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, TypedDict - -__all__ = ["DropletBackupPolicyParam"] - - -class DropletBackupPolicyParam(TypedDict, total=False): - hour: Literal[0, 4, 8, 12, 16, 20] - """The hour of the day that the backup window will start.""" - - plan: Literal["daily", "weekly"] - """The backup plan used for the Droplet. - - The plan can be either `daily` or `weekly`. - """ - - weekday: Literal["SUN", "MON", "TUE", "WED", "THU", "FRI", "SAT"] - """The day of the week on which the backup will occur.""" diff --git a/src/gradientai/types/droplet_create_params.py b/src/gradientai/types/droplet_create_params.py deleted file mode 100644 index 750d7c11..00000000 --- a/src/gradientai/types/droplet_create_params.py +++ /dev/null @@ -1,213 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List, Union, Optional -from typing_extensions import Required, TypeAlias, TypedDict - -from .droplet_backup_policy_param import DropletBackupPolicyParam - -__all__ = ["DropletCreateParams", "DropletSingleCreate", "DropletMultiCreate"] - - -class DropletSingleCreate(TypedDict, total=False): - image: Required[Union[str, int]] - """ - The image ID of a public or private image or the slug identifier for a public - image. This image will be the base image for your Droplet. Requires `image:read` - scope. - """ - - name: Required[str] - """The human-readable string you wish to use when displaying the Droplet name. - - The name, if set to a domain name managed in the DigitalOcean DNS management - system, will configure a PTR record for the Droplet. The name set during - creation will also determine the hostname for the Droplet in its internal - configuration. - """ - - size: Required[str] - """The slug identifier for the size that you wish to select for this Droplet.""" - - backup_policy: DropletBackupPolicyParam - """An object specifying the backup policy for the Droplet. - - If omitted and `backups` is `true`, the backup plan will default to daily. - """ - - backups: bool - """ - A boolean indicating whether automated backups should be enabled for the - Droplet. - """ - - ipv6: bool - """A boolean indicating whether to enable IPv6 on the Droplet.""" - - monitoring: bool - """A boolean indicating whether to install the DigitalOcean agent for monitoring.""" - - private_networking: bool - """This parameter has been deprecated. - - Use `vpc_uuid` instead to specify a VPC network for the Droplet. If no - `vpc_uuid` is provided, the Droplet will be placed in your account's default VPC - for the region. - """ - - region: str - """The slug identifier for the region that you wish to deploy the Droplet in. - - If the specific datacenter is not not important, a slug prefix (e.g. `nyc`) can - be used to deploy the Droplet in any of the that region's locations (`nyc1`, - `nyc2`, or `nyc3`). If the region is omitted from the create request completely, - the Droplet may deploy in any region. - """ - - ssh_keys: List[Union[str, int]] - """ - An array containing the IDs or fingerprints of the SSH keys that you wish to - embed in the Droplet's root account upon creation. You must add the keys to your - team before they can be embedded on a Droplet. Requires `ssh_key:read` scope. - """ - - tags: Optional[List[str]] - """A flat array of tag names as strings to apply to the Droplet after it is - created. - - Tag names can either be existing or new tags. Requires `tag:create` scope. - """ - - user_data: str - """ - A string containing 'user data' which may be used to configure the Droplet on - first boot, often a 'cloud-config' file or Bash script. It must be plain text - and may not exceed 64 KiB in size. - """ - - volumes: List[str] - """ - An array of IDs for block storage volumes that will be attached to the Droplet - once created. The volumes must not already be attached to an existing Droplet. - Requires `block_storage:read` scpoe. - """ - - vpc_uuid: str - """A string specifying the UUID of the VPC to which the Droplet will be assigned. - - If excluded, the Droplet will be assigned to your account's default VPC for the - region. Requires `vpc:read` scope. - """ - - with_droplet_agent: bool - """ - A boolean indicating whether to install the DigitalOcean agent used for - providing access to the Droplet web console in the control panel. By default, - the agent is installed on new Droplets but installation errors (i.e. OS not - supported) are ignored. To prevent it from being installed, set to `false`. To - make installation errors fatal, explicitly set it to `true`. - """ - - -class DropletMultiCreate(TypedDict, total=False): - image: Required[Union[str, int]] - """ - The image ID of a public or private image or the slug identifier for a public - image. This image will be the base image for your Droplet. Requires `image:read` - scope. - """ - - names: Required[List[str]] - """ - An array of human human-readable strings you wish to use when displaying the - Droplet name. Each name, if set to a domain name managed in the DigitalOcean DNS - management system, will configure a PTR record for the Droplet. Each name set - during creation will also determine the hostname for the Droplet in its internal - configuration. - """ - - size: Required[str] - """The slug identifier for the size that you wish to select for this Droplet.""" - - backup_policy: DropletBackupPolicyParam - """An object specifying the backup policy for the Droplet. - - If omitted and `backups` is `true`, the backup plan will default to daily. - """ - - backups: bool - """ - A boolean indicating whether automated backups should be enabled for the - Droplet. - """ - - ipv6: bool - """A boolean indicating whether to enable IPv6 on the Droplet.""" - - monitoring: bool - """A boolean indicating whether to install the DigitalOcean agent for monitoring.""" - - private_networking: bool - """This parameter has been deprecated. - - Use `vpc_uuid` instead to specify a VPC network for the Droplet. If no - `vpc_uuid` is provided, the Droplet will be placed in your account's default VPC - for the region. - """ - - region: str - """The slug identifier for the region that you wish to deploy the Droplet in. - - If the specific datacenter is not not important, a slug prefix (e.g. `nyc`) can - be used to deploy the Droplet in any of the that region's locations (`nyc1`, - `nyc2`, or `nyc3`). If the region is omitted from the create request completely, - the Droplet may deploy in any region. - """ - - ssh_keys: List[Union[str, int]] - """ - An array containing the IDs or fingerprints of the SSH keys that you wish to - embed in the Droplet's root account upon creation. You must add the keys to your - team before they can be embedded on a Droplet. Requires `ssh_key:read` scope. - """ - - tags: Optional[List[str]] - """A flat array of tag names as strings to apply to the Droplet after it is - created. - - Tag names can either be existing or new tags. Requires `tag:create` scope. - """ - - user_data: str - """ - A string containing 'user data' which may be used to configure the Droplet on - first boot, often a 'cloud-config' file or Bash script. It must be plain text - and may not exceed 64 KiB in size. - """ - - volumes: List[str] - """ - An array of IDs for block storage volumes that will be attached to the Droplet - once created. The volumes must not already be attached to an existing Droplet. - Requires `block_storage:read` scpoe. - """ - - vpc_uuid: str - """A string specifying the UUID of the VPC to which the Droplet will be assigned. - - If excluded, the Droplet will be assigned to your account's default VPC for the - region. Requires `vpc:read` scope. - """ - - with_droplet_agent: bool - """ - A boolean indicating whether to install the DigitalOcean agent used for - providing access to the Droplet web console in the control panel. By default, - the agent is installed on new Droplets but installation errors (i.e. OS not - supported) are ignored. To prevent it from being installed, set to `false`. To - make installation errors fatal, explicitly set it to `true`. - """ - - -DropletCreateParams: TypeAlias = Union[DropletSingleCreate, DropletMultiCreate] diff --git a/src/gradientai/types/droplet_create_response.py b/src/gradientai/types/droplet_create_response.py deleted file mode 100644 index f69eb592..00000000 --- a/src/gradientai/types/droplet_create_response.py +++ /dev/null @@ -1,39 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Union, Optional -from typing_extensions import TypeAlias - -from .._models import BaseModel -from .shared.droplet import Droplet -from .shared.action_link import ActionLink - -__all__ = [ - "DropletCreateResponse", - "SingleDropletResponse", - "SingleDropletResponseLinks", - "MultipleDropletResponse", - "MultipleDropletResponseLinks", -] - - -class SingleDropletResponseLinks(BaseModel): - actions: Optional[List[ActionLink]] = None - - -class SingleDropletResponse(BaseModel): - droplet: Droplet - - links: SingleDropletResponseLinks - - -class MultipleDropletResponseLinks(BaseModel): - actions: Optional[List[ActionLink]] = None - - -class MultipleDropletResponse(BaseModel): - droplets: List[Droplet] - - links: MultipleDropletResponseLinks - - -DropletCreateResponse: TypeAlias = Union[SingleDropletResponse, MultipleDropletResponse] diff --git a/src/gradientai/types/droplet_delete_by_tag_params.py b/src/gradientai/types/droplet_delete_by_tag_params.py deleted file mode 100644 index 820b0db6..00000000 --- a/src/gradientai/types/droplet_delete_by_tag_params.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, TypedDict - -__all__ = ["DropletDeleteByTagParams"] - - -class DropletDeleteByTagParams(TypedDict, total=False): - tag_name: Required[str] - """Specifies Droplets to be deleted by tag.""" diff --git a/src/gradientai/types/droplet_list_firewalls_params.py b/src/gradientai/types/droplet_list_firewalls_params.py deleted file mode 100644 index 86774e77..00000000 --- a/src/gradientai/types/droplet_list_firewalls_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["DropletListFirewallsParams"] - - -class DropletListFirewallsParams(TypedDict, total=False): - page: int - """Which 'page' of paginated results to return.""" - - per_page: int - """Number of items returned per page""" diff --git a/src/gradientai/types/droplet_list_firewalls_response.py b/src/gradientai/types/droplet_list_firewalls_response.py deleted file mode 100644 index 5aa00655..00000000 --- a/src/gradientai/types/droplet_list_firewalls_response.py +++ /dev/null @@ -1,19 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from .._models import BaseModel -from .firewall import Firewall -from .shared.page_links import PageLinks -from .shared.meta_properties import MetaProperties - -__all__ = ["DropletListFirewallsResponse"] - - -class DropletListFirewallsResponse(BaseModel): - meta: MetaProperties - """Information about the response itself.""" - - firewalls: Optional[List[Firewall]] = None - - links: Optional[PageLinks] = None diff --git a/src/gradientai/types/droplet_list_kernels_params.py b/src/gradientai/types/droplet_list_kernels_params.py deleted file mode 100644 index 8fdfe6e1..00000000 --- a/src/gradientai/types/droplet_list_kernels_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["DropletListKernelsParams"] - - -class DropletListKernelsParams(TypedDict, total=False): - page: int - """Which 'page' of paginated results to return.""" - - per_page: int - """Number of items returned per page""" diff --git a/src/gradientai/types/droplet_list_kernels_response.py b/src/gradientai/types/droplet_list_kernels_response.py deleted file mode 100644 index 3352e1f6..00000000 --- a/src/gradientai/types/droplet_list_kernels_response.py +++ /dev/null @@ -1,19 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from .._models import BaseModel -from .shared.kernel import Kernel -from .shared.page_links import PageLinks -from .shared.meta_properties import MetaProperties - -__all__ = ["DropletListKernelsResponse"] - - -class DropletListKernelsResponse(BaseModel): - meta: MetaProperties - """Information about the response itself.""" - - kernels: Optional[List[Optional[Kernel]]] = None - - links: Optional[PageLinks] = None diff --git a/src/gradientai/types/droplet_list_neighbors_response.py b/src/gradientai/types/droplet_list_neighbors_response.py deleted file mode 100644 index 2f9a84fc..00000000 --- a/src/gradientai/types/droplet_list_neighbors_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from .._models import BaseModel -from .shared.droplet import Droplet - -__all__ = ["DropletListNeighborsResponse"] - - -class DropletListNeighborsResponse(BaseModel): - droplets: Optional[List[Droplet]] = None diff --git a/src/gradientai/types/droplet_list_params.py b/src/gradientai/types/droplet_list_params.py deleted file mode 100644 index d0fd62bc..00000000 --- a/src/gradientai/types/droplet_list_params.py +++ /dev/null @@ -1,34 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, TypedDict - -__all__ = ["DropletListParams"] - - -class DropletListParams(TypedDict, total=False): - name: str - """Used to filter list response by Droplet name returning only exact matches. - - It is case-insensitive and can not be combined with `tag_name`. - """ - - page: int - """Which 'page' of paginated results to return.""" - - per_page: int - """Number of items returned per page""" - - tag_name: str - """Used to filter Droplets by a specific tag. - - Can not be combined with `name` or `type`. Requires `tag:read` scope. - """ - - type: Literal["droplets", "gpus"] - """When `type` is set to `gpus`, only GPU Droplets will be returned. - - By default, only non-GPU Droplets are returned. Can not be combined with - `tag_name`. - """ diff --git a/src/gradientai/types/droplet_list_response.py b/src/gradientai/types/droplet_list_response.py deleted file mode 100644 index 20dce5d7..00000000 --- a/src/gradientai/types/droplet_list_response.py +++ /dev/null @@ -1,19 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from .._models import BaseModel -from .shared.droplet import Droplet -from .shared.page_links import PageLinks -from .shared.meta_properties import MetaProperties - -__all__ = ["DropletListResponse"] - - -class DropletListResponse(BaseModel): - meta: MetaProperties - """Information about the response itself.""" - - droplets: Optional[List[Droplet]] = None - - links: Optional[PageLinks] = None diff --git a/src/gradientai/types/droplet_list_snapshots_params.py b/src/gradientai/types/droplet_list_snapshots_params.py deleted file mode 100644 index 9d05be15..00000000 --- a/src/gradientai/types/droplet_list_snapshots_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["DropletListSnapshotsParams"] - - -class DropletListSnapshotsParams(TypedDict, total=False): - page: int - """Which 'page' of paginated results to return.""" - - per_page: int - """Number of items returned per page""" diff --git a/src/gradientai/types/droplet_list_snapshots_response.py b/src/gradientai/types/droplet_list_snapshots_response.py deleted file mode 100644 index ea6c9296..00000000 --- a/src/gradientai/types/droplet_list_snapshots_response.py +++ /dev/null @@ -1,53 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional -from datetime import datetime -from typing_extensions import Literal - -from .._models import BaseModel -from .shared.page_links import PageLinks -from .shared.meta_properties import MetaProperties - -__all__ = ["DropletListSnapshotsResponse", "Snapshot"] - - -class Snapshot(BaseModel): - id: int - """The unique identifier for the snapshot or backup.""" - - created_at: datetime - """ - A time value given in ISO8601 combined date and time format that represents when - the snapshot was created. - """ - - min_disk_size: int - """The minimum size in GB required for a volume or Droplet to use this snapshot.""" - - name: str - """A human-readable name for the snapshot.""" - - regions: List[str] - """An array of the regions that the snapshot is available in. - - The regions are represented by their identifying slug values. - """ - - size_gigabytes: float - """The billable size of the snapshot in gigabytes.""" - - type: Literal["snapshot", "backup"] - """Describes the kind of image. - - It may be one of `snapshot` or `backup`. This specifies whether an image is a - user-generated Droplet snapshot or automatically created Droplet backup. - """ - - -class DropletListSnapshotsResponse(BaseModel): - meta: MetaProperties - """Information about the response itself.""" - - links: Optional[PageLinks] = None - - snapshots: Optional[List[Snapshot]] = None diff --git a/src/gradientai/types/droplet_retrieve_response.py b/src/gradientai/types/droplet_retrieve_response.py deleted file mode 100644 index a3e60721..00000000 --- a/src/gradientai/types/droplet_retrieve_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from .._models import BaseModel -from .shared.droplet import Droplet - -__all__ = ["DropletRetrieveResponse"] - - -class DropletRetrieveResponse(BaseModel): - droplet: Optional[Droplet] = None diff --git a/src/gradientai/types/droplets/__init__.py b/src/gradientai/types/droplets/__init__.py deleted file mode 100644 index 4313caa9..00000000 --- a/src/gradientai/types/droplets/__init__.py +++ /dev/null @@ -1,51 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from .autoscale_pool import AutoscalePool as AutoscalePool -from .action_list_params import ActionListParams as ActionListParams -from .backup_list_params import BackupListParams as BackupListParams -from .associated_resource import AssociatedResource as AssociatedResource -from .current_utilization import CurrentUtilization as CurrentUtilization -from .action_list_response import ActionListResponse as ActionListResponse -from .backup_list_response import BackupListResponse as BackupListResponse -from .autoscale_list_params import AutoscaleListParams as AutoscaleListParams -from .action_initiate_params import ActionInitiateParams as ActionInitiateParams -from .autoscale_create_params import AutoscaleCreateParams as AutoscaleCreateParams -from .autoscale_list_response import AutoscaleListResponse as AutoscaleListResponse -from .autoscale_update_params import AutoscaleUpdateParams as AutoscaleUpdateParams -from .action_initiate_response import ActionInitiateResponse as ActionInitiateResponse -from .action_retrieve_response import ActionRetrieveResponse as ActionRetrieveResponse -from .autoscale_create_response import AutoscaleCreateResponse as AutoscaleCreateResponse -from .autoscale_update_response import AutoscaleUpdateResponse as AutoscaleUpdateResponse -from .action_bulk_initiate_params import ActionBulkInitiateParams as ActionBulkInitiateParams -from .autoscale_retrieve_response import AutoscaleRetrieveResponse as AutoscaleRetrieveResponse -from .backup_list_policies_params import BackupListPoliciesParams as BackupListPoliciesParams -from .autoscale_pool_static_config import AutoscalePoolStaticConfig as AutoscalePoolStaticConfig -from .action_bulk_initiate_response import ActionBulkInitiateResponse as ActionBulkInitiateResponse -from .autoscale_list_history_params import AutoscaleListHistoryParams as AutoscaleListHistoryParams -from .autoscale_list_members_params import AutoscaleListMembersParams as AutoscaleListMembersParams -from .autoscale_pool_dynamic_config import AutoscalePoolDynamicConfig as AutoscalePoolDynamicConfig -from .backup_list_policies_response import BackupListPoliciesResponse as BackupListPoliciesResponse -from .destroyed_associated_resource import DestroyedAssociatedResource as DestroyedAssociatedResource -from .autoscale_list_history_response import AutoscaleListHistoryResponse as AutoscaleListHistoryResponse -from .autoscale_list_members_response import AutoscaleListMembersResponse as AutoscaleListMembersResponse -from .autoscale_pool_droplet_template import AutoscalePoolDropletTemplate as AutoscalePoolDropletTemplate -from .backup_retrieve_policy_response import BackupRetrievePolicyResponse as BackupRetrievePolicyResponse -from .autoscale_pool_static_config_param import AutoscalePoolStaticConfigParam as AutoscalePoolStaticConfigParam -from .autoscale_pool_dynamic_config_param import AutoscalePoolDynamicConfigParam as AutoscalePoolDynamicConfigParam -from .autoscale_pool_droplet_template_param import ( - AutoscalePoolDropletTemplateParam as AutoscalePoolDropletTemplateParam, -) -from .backup_list_supported_policies_response import ( - BackupListSupportedPoliciesResponse as BackupListSupportedPoliciesResponse, -) -from .destroy_with_associated_resource_list_response import ( - DestroyWithAssociatedResourceListResponse as DestroyWithAssociatedResourceListResponse, -) -from .destroy_with_associated_resource_check_status_response import ( - DestroyWithAssociatedResourceCheckStatusResponse as DestroyWithAssociatedResourceCheckStatusResponse, -) -from .destroy_with_associated_resource_delete_selective_params import ( - DestroyWithAssociatedResourceDeleteSelectiveParams as DestroyWithAssociatedResourceDeleteSelectiveParams, -) diff --git a/src/gradientai/types/droplets/action_bulk_initiate_params.py b/src/gradientai/types/droplets/action_bulk_initiate_params.py deleted file mode 100644 index a6402096..00000000 --- a/src/gradientai/types/droplets/action_bulk_initiate_params.py +++ /dev/null @@ -1,72 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Union -from typing_extensions import Literal, Required, TypeAlias, TypedDict - -__all__ = ["ActionBulkInitiateParams", "DropletAction", "DropletActionSnapshot"] - - -class DropletAction(TypedDict, total=False): - type: Required[ - Literal[ - "enable_backups", - "disable_backups", - "reboot", - "power_cycle", - "shutdown", - "power_off", - "power_on", - "restore", - "password_reset", - "resize", - "rebuild", - "rename", - "change_kernel", - "enable_ipv6", - "snapshot", - ] - ] - """The type of action to initiate for the Droplet.""" - - tag_name: str - """Used to filter Droplets by a specific tag. - - Can not be combined with `name` or `type`. Requires `tag:read` scope. - """ - - -class DropletActionSnapshot(TypedDict, total=False): - type: Required[ - Literal[ - "enable_backups", - "disable_backups", - "reboot", - "power_cycle", - "shutdown", - "power_off", - "power_on", - "restore", - "password_reset", - "resize", - "rebuild", - "rename", - "change_kernel", - "enable_ipv6", - "snapshot", - ] - ] - """The type of action to initiate for the Droplet.""" - - tag_name: str - """Used to filter Droplets by a specific tag. - - Can not be combined with `name` or `type`. Requires `tag:read` scope. - """ - - name: str - """The name to give the new snapshot of the Droplet.""" - - -ActionBulkInitiateParams: TypeAlias = Union[DropletAction, DropletActionSnapshot] diff --git a/src/gradientai/types/droplets/action_bulk_initiate_response.py b/src/gradientai/types/droplets/action_bulk_initiate_response.py deleted file mode 100644 index 905860d7..00000000 --- a/src/gradientai/types/droplets/action_bulk_initiate_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from ..._models import BaseModel -from ..shared.action import Action - -__all__ = ["ActionBulkInitiateResponse"] - - -class ActionBulkInitiateResponse(BaseModel): - actions: Optional[List[Action]] = None diff --git a/src/gradientai/types/droplets/action_initiate_params.py b/src/gradientai/types/droplets/action_initiate_params.py deleted file mode 100644 index f0ef6b1e..00000000 --- a/src/gradientai/types/droplets/action_initiate_params.py +++ /dev/null @@ -1,278 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Union -from typing_extensions import Literal, Required, TypeAlias, TypedDict - -from ..droplet_backup_policy_param import DropletBackupPolicyParam - -__all__ = [ - "ActionInitiateParams", - "DropletAction", - "DropletActionEnableBackups", - "DropletActionChangeBackupPolicy", - "DropletActionRestore", - "DropletActionResize", - "DropletActionRebuild", - "DropletActionRename", - "DropletActionChangeKernel", - "DropletActionSnapshot", -] - - -class DropletAction(TypedDict, total=False): - type: Required[ - Literal[ - "enable_backups", - "disable_backups", - "reboot", - "power_cycle", - "shutdown", - "power_off", - "power_on", - "restore", - "password_reset", - "resize", - "rebuild", - "rename", - "change_kernel", - "enable_ipv6", - "snapshot", - ] - ] - """The type of action to initiate for the Droplet.""" - - -class DropletActionEnableBackups(TypedDict, total=False): - type: Required[ - Literal[ - "enable_backups", - "disable_backups", - "reboot", - "power_cycle", - "shutdown", - "power_off", - "power_on", - "restore", - "password_reset", - "resize", - "rebuild", - "rename", - "change_kernel", - "enable_ipv6", - "snapshot", - ] - ] - """The type of action to initiate for the Droplet.""" - - backup_policy: DropletBackupPolicyParam - """An object specifying the backup policy for the Droplet. - - If omitted, the backup plan will default to daily. - """ - - -class DropletActionChangeBackupPolicy(TypedDict, total=False): - type: Required[ - Literal[ - "enable_backups", - "disable_backups", - "reboot", - "power_cycle", - "shutdown", - "power_off", - "power_on", - "restore", - "password_reset", - "resize", - "rebuild", - "rename", - "change_kernel", - "enable_ipv6", - "snapshot", - ] - ] - """The type of action to initiate for the Droplet.""" - - backup_policy: DropletBackupPolicyParam - """An object specifying the backup policy for the Droplet.""" - - -class DropletActionRestore(TypedDict, total=False): - type: Required[ - Literal[ - "enable_backups", - "disable_backups", - "reboot", - "power_cycle", - "shutdown", - "power_off", - "power_on", - "restore", - "password_reset", - "resize", - "rebuild", - "rename", - "change_kernel", - "enable_ipv6", - "snapshot", - ] - ] - """The type of action to initiate for the Droplet.""" - - image: int - """The ID of a backup of the current Droplet instance to restore from.""" - - -class DropletActionResize(TypedDict, total=False): - type: Required[ - Literal[ - "enable_backups", - "disable_backups", - "reboot", - "power_cycle", - "shutdown", - "power_off", - "power_on", - "restore", - "password_reset", - "resize", - "rebuild", - "rename", - "change_kernel", - "enable_ipv6", - "snapshot", - ] - ] - """The type of action to initiate for the Droplet.""" - - disk: bool - """When `true`, the Droplet's disk will be resized in addition to its RAM and CPU. - - This is a permanent change and cannot be reversed as a Droplet's disk size - cannot be decreased. - """ - - size: str - """The slug identifier for the size to which you wish to resize the Droplet.""" - - -class DropletActionRebuild(TypedDict, total=False): - type: Required[ - Literal[ - "enable_backups", - "disable_backups", - "reboot", - "power_cycle", - "shutdown", - "power_off", - "power_on", - "restore", - "password_reset", - "resize", - "rebuild", - "rename", - "change_kernel", - "enable_ipv6", - "snapshot", - ] - ] - """The type of action to initiate for the Droplet.""" - - image: Union[str, int] - """ - The image ID of a public or private image or the slug identifier for a public - image. The Droplet will be rebuilt using this image as its base. - """ - - -class DropletActionRename(TypedDict, total=False): - type: Required[ - Literal[ - "enable_backups", - "disable_backups", - "reboot", - "power_cycle", - "shutdown", - "power_off", - "power_on", - "restore", - "password_reset", - "resize", - "rebuild", - "rename", - "change_kernel", - "enable_ipv6", - "snapshot", - ] - ] - """The type of action to initiate for the Droplet.""" - - name: str - """The new name for the Droplet.""" - - -class DropletActionChangeKernel(TypedDict, total=False): - type: Required[ - Literal[ - "enable_backups", - "disable_backups", - "reboot", - "power_cycle", - "shutdown", - "power_off", - "power_on", - "restore", - "password_reset", - "resize", - "rebuild", - "rename", - "change_kernel", - "enable_ipv6", - "snapshot", - ] - ] - """The type of action to initiate for the Droplet.""" - - kernel: int - """A unique number used to identify and reference a specific kernel.""" - - -class DropletActionSnapshot(TypedDict, total=False): - type: Required[ - Literal[ - "enable_backups", - "disable_backups", - "reboot", - "power_cycle", - "shutdown", - "power_off", - "power_on", - "restore", - "password_reset", - "resize", - "rebuild", - "rename", - "change_kernel", - "enable_ipv6", - "snapshot", - ] - ] - """The type of action to initiate for the Droplet.""" - - name: str - """The name to give the new snapshot of the Droplet.""" - - -ActionInitiateParams: TypeAlias = Union[ - DropletAction, - DropletActionEnableBackups, - DropletActionChangeBackupPolicy, - DropletActionRestore, - DropletActionResize, - DropletActionRebuild, - DropletActionRename, - DropletActionChangeKernel, - DropletActionSnapshot, -] diff --git a/src/gradientai/types/droplets/action_initiate_response.py b/src/gradientai/types/droplets/action_initiate_response.py deleted file mode 100644 index 087781d1..00000000 --- a/src/gradientai/types/droplets/action_initiate_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel -from ..shared.action import Action - -__all__ = ["ActionInitiateResponse"] - - -class ActionInitiateResponse(BaseModel): - action: Optional[Action] = None diff --git a/src/gradientai/types/droplets/action_list_params.py b/src/gradientai/types/droplets/action_list_params.py deleted file mode 100644 index dd873288..00000000 --- a/src/gradientai/types/droplets/action_list_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["ActionListParams"] - - -class ActionListParams(TypedDict, total=False): - page: int - """Which 'page' of paginated results to return.""" - - per_page: int - """Number of items returned per page""" diff --git a/src/gradientai/types/droplets/action_list_response.py b/src/gradientai/types/droplets/action_list_response.py deleted file mode 100644 index 1a20f780..00000000 --- a/src/gradientai/types/droplets/action_list_response.py +++ /dev/null @@ -1,19 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from ..._models import BaseModel -from ..shared.action import Action -from ..shared.page_links import PageLinks -from ..shared.meta_properties import MetaProperties - -__all__ = ["ActionListResponse"] - - -class ActionListResponse(BaseModel): - meta: MetaProperties - """Information about the response itself.""" - - actions: Optional[List[Action]] = None - - links: Optional[PageLinks] = None diff --git a/src/gradientai/types/droplets/action_retrieve_response.py b/src/gradientai/types/droplets/action_retrieve_response.py deleted file mode 100644 index 3856228d..00000000 --- a/src/gradientai/types/droplets/action_retrieve_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel -from ..shared.action import Action - -__all__ = ["ActionRetrieveResponse"] - - -class ActionRetrieveResponse(BaseModel): - action: Optional[Action] = None diff --git a/src/gradientai/types/droplets/associated_resource.py b/src/gradientai/types/droplets/associated_resource.py deleted file mode 100644 index f72c3d32..00000000 --- a/src/gradientai/types/droplets/associated_resource.py +++ /dev/null @@ -1,21 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel - -__all__ = ["AssociatedResource"] - - -class AssociatedResource(BaseModel): - id: Optional[str] = None - """The unique identifier for the resource associated with the Droplet.""" - - cost: Optional[str] = None - """ - The cost of the resource in USD per month if the resource is retained after the - Droplet is destroyed. - """ - - name: Optional[str] = None - """The name of the resource associated with the Droplet.""" diff --git a/src/gradientai/types/droplets/autoscale_create_params.py b/src/gradientai/types/droplets/autoscale_create_params.py deleted file mode 100644 index 0f3c05a6..00000000 --- a/src/gradientai/types/droplets/autoscale_create_params.py +++ /dev/null @@ -1,28 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Union -from typing_extensions import Required, TypeAlias, TypedDict - -from .autoscale_pool_static_config_param import AutoscalePoolStaticConfigParam -from .autoscale_pool_dynamic_config_param import AutoscalePoolDynamicConfigParam -from .autoscale_pool_droplet_template_param import AutoscalePoolDropletTemplateParam - -__all__ = ["AutoscaleCreateParams", "Config"] - - -class AutoscaleCreateParams(TypedDict, total=False): - config: Required[Config] - """ - The scaling configuration for an autoscale pool, which is how the pool scales up - and down (either by resource utilization or static configuration). - """ - - droplet_template: Required[AutoscalePoolDropletTemplateParam] - - name: Required[str] - """The human-readable name of the autoscale pool. This field cannot be updated""" - - -Config: TypeAlias = Union[AutoscalePoolStaticConfigParam, AutoscalePoolDynamicConfigParam] diff --git a/src/gradientai/types/droplets/autoscale_create_response.py b/src/gradientai/types/droplets/autoscale_create_response.py deleted file mode 100644 index 819297e9..00000000 --- a/src/gradientai/types/droplets/autoscale_create_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel -from .autoscale_pool import AutoscalePool - -__all__ = ["AutoscaleCreateResponse"] - - -class AutoscaleCreateResponse(BaseModel): - autoscale_pool: Optional[AutoscalePool] = None diff --git a/src/gradientai/types/droplets/autoscale_list_history_params.py b/src/gradientai/types/droplets/autoscale_list_history_params.py deleted file mode 100644 index f837a11e..00000000 --- a/src/gradientai/types/droplets/autoscale_list_history_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["AutoscaleListHistoryParams"] - - -class AutoscaleListHistoryParams(TypedDict, total=False): - page: int - """Which 'page' of paginated results to return.""" - - per_page: int - """Number of items returned per page""" diff --git a/src/gradientai/types/droplets/autoscale_list_history_response.py b/src/gradientai/types/droplets/autoscale_list_history_response.py deleted file mode 100644 index 843f44d8..00000000 --- a/src/gradientai/types/droplets/autoscale_list_history_response.py +++ /dev/null @@ -1,48 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional -from datetime import datetime -from typing_extensions import Literal - -from ..._models import BaseModel -from ..shared.page_links import PageLinks -from ..shared.meta_properties import MetaProperties - -__all__ = ["AutoscaleListHistoryResponse", "History"] - - -class History(BaseModel): - created_at: datetime - """ - The creation time of the history event in ISO8601 combined date and time format. - """ - - current_instance_count: int - """The current number of Droplets in the autoscale pool.""" - - desired_instance_count: int - """The target number of Droplets for the autoscale pool after the scaling event.""" - - history_event_id: str - """The unique identifier of the history event.""" - - reason: Literal["CONFIGURATION_CHANGE", "SCALE_UP", "SCALE_DOWN"] - """The reason for the scaling event.""" - - status: Literal["in_progress", "success", "error"] - """The status of the scaling event.""" - - updated_at: datetime - """ - The last updated time of the history event in ISO8601 combined date and time - format. - """ - - -class AutoscaleListHistoryResponse(BaseModel): - meta: MetaProperties - """Information about the response itself.""" - - history: Optional[List[History]] = None - - links: Optional[PageLinks] = None diff --git a/src/gradientai/types/droplets/autoscale_list_members_params.py b/src/gradientai/types/droplets/autoscale_list_members_params.py deleted file mode 100644 index 5a7f738d..00000000 --- a/src/gradientai/types/droplets/autoscale_list_members_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["AutoscaleListMembersParams"] - - -class AutoscaleListMembersParams(TypedDict, total=False): - page: int - """Which 'page' of paginated results to return.""" - - per_page: int - """Number of items returned per page""" diff --git a/src/gradientai/types/droplets/autoscale_list_members_response.py b/src/gradientai/types/droplets/autoscale_list_members_response.py deleted file mode 100644 index 337ac4e3..00000000 --- a/src/gradientai/types/droplets/autoscale_list_members_response.py +++ /dev/null @@ -1,47 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional -from datetime import datetime -from typing_extensions import Literal - -from ..._models import BaseModel -from ..shared.page_links import PageLinks -from ..shared.meta_properties import MetaProperties - -__all__ = ["AutoscaleListMembersResponse", "Droplet", "DropletCurrentUtilization"] - - -class DropletCurrentUtilization(BaseModel): - cpu: Optional[float] = None - """The CPU utilization average of the individual Droplet.""" - - memory: Optional[float] = None - """The memory utilization average of the individual Droplet.""" - - -class Droplet(BaseModel): - created_at: datetime - """The creation time of the Droplet in ISO8601 combined date and time format.""" - - current_utilization: DropletCurrentUtilization - - droplet_id: int - """The unique identifier of the Droplet.""" - - health_status: str - """The health status of the Droplet.""" - - status: Literal["provisioning", "active", "deleting", "off"] - """The power status of the Droplet.""" - - updated_at: datetime - """The last updated time of the Droplet in ISO8601 combined date and time format.""" - - -class AutoscaleListMembersResponse(BaseModel): - meta: MetaProperties - """Information about the response itself.""" - - droplets: Optional[List[Droplet]] = None - - links: Optional[PageLinks] = None diff --git a/src/gradientai/types/droplets/autoscale_list_params.py b/src/gradientai/types/droplets/autoscale_list_params.py deleted file mode 100644 index 3a35e616..00000000 --- a/src/gradientai/types/droplets/autoscale_list_params.py +++ /dev/null @@ -1,18 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["AutoscaleListParams"] - - -class AutoscaleListParams(TypedDict, total=False): - name: str - """The name of the autoscale pool""" - - page: int - """Which 'page' of paginated results to return.""" - - per_page: int - """Number of items returned per page""" diff --git a/src/gradientai/types/droplets/autoscale_list_response.py b/src/gradientai/types/droplets/autoscale_list_response.py deleted file mode 100644 index 807cb17f..00000000 --- a/src/gradientai/types/droplets/autoscale_list_response.py +++ /dev/null @@ -1,19 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from ..._models import BaseModel -from .autoscale_pool import AutoscalePool -from ..shared.page_links import PageLinks -from ..shared.meta_properties import MetaProperties - -__all__ = ["AutoscaleListResponse"] - - -class AutoscaleListResponse(BaseModel): - meta: MetaProperties - """Information about the response itself.""" - - autoscale_pools: Optional[List[AutoscalePool]] = None - - links: Optional[PageLinks] = None diff --git a/src/gradientai/types/droplets/autoscale_pool.py b/src/gradientai/types/droplets/autoscale_pool.py deleted file mode 100644 index 2964319e..00000000 --- a/src/gradientai/types/droplets/autoscale_pool.py +++ /dev/null @@ -1,54 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Union, Optional -from datetime import datetime -from typing_extensions import Literal, TypeAlias - -from ..._models import BaseModel -from .current_utilization import CurrentUtilization -from .autoscale_pool_static_config import AutoscalePoolStaticConfig -from .autoscale_pool_dynamic_config import AutoscalePoolDynamicConfig -from .autoscale_pool_droplet_template import AutoscalePoolDropletTemplate - -__all__ = ["AutoscalePool", "Config"] - -Config: TypeAlias = Union[AutoscalePoolStaticConfig, AutoscalePoolDynamicConfig] - - -class AutoscalePool(BaseModel): - id: str - """A unique identifier for each autoscale pool instance. - - This is automatically generated upon autoscale pool creation. - """ - - active_resources_count: int - """The number of active Droplets in the autoscale pool.""" - - config: Config - """ - The scaling configuration for an autoscale pool, which is how the pool scales up - and down (either by resource utilization or static configuration). - """ - - created_at: datetime - """ - A time value given in ISO8601 combined date and time format that represents when - the autoscale pool was created. - """ - - droplet_template: AutoscalePoolDropletTemplate - - name: str - """The human-readable name set for the autoscale pool.""" - - status: Literal["active", "deleting", "error"] - """The current status of the autoscale pool.""" - - updated_at: datetime - """ - A time value given in ISO8601 combined date and time format that represents when - the autoscale pool was last updated. - """ - - current_utilization: Optional[CurrentUtilization] = None diff --git a/src/gradientai/types/droplets/autoscale_pool_droplet_template.py b/src/gradientai/types/droplets/autoscale_pool_droplet_template.py deleted file mode 100644 index 2ab2036b..00000000 --- a/src/gradientai/types/droplets/autoscale_pool_droplet_template.py +++ /dev/null @@ -1,69 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["AutoscalePoolDropletTemplate"] - - -class AutoscalePoolDropletTemplate(BaseModel): - image: str - """The Droplet image to be used for all Droplets in the autoscale pool. - - You may specify the slug or the image ID. - """ - - region: Literal[ - "nyc1", "nyc2", "nyc3", "ams2", "ams3", "sfo1", "sfo2", "sfo3", "sgp1", "lon1", "fra1", "tor1", "blr1", "syd1" - ] - """The datacenter in which all of the Droplets will be created.""" - - size: str - """The Droplet size to be used for all Droplets in the autoscale pool.""" - - ssh_keys: List[str] - """The SSH keys to be installed on the Droplets in the autoscale pool. - - You can either specify the key ID or the fingerprint. Requires `ssh_key:read` - scope. - """ - - ipv6: Optional[bool] = None - """Assigns a unique IPv6 address to each of the Droplets in the autoscale pool.""" - - name: Optional[str] = None - """The name(s) to be applied to all Droplets in the autoscale pool.""" - - project_id: Optional[str] = None - """ - The project that the Droplets in the autoscale pool will belong to. Requires - `project:read` scope. - """ - - tags: Optional[List[str]] = None - """ - The tags to apply to each of the Droplets in the autoscale pool. Requires - `tag:read` scope. - """ - - user_data: Optional[str] = None - """ - A string containing user data that cloud-init consumes to configure a Droplet on - first boot. User data is often a cloud-config file or Bash script. It must be - plain text and may not exceed 64 KiB in size. - """ - - vpc_uuid: Optional[str] = None - """The VPC where the Droplets in the autoscale pool will be created. - - The VPC must be in the region where you want to create the Droplets. Requires - `vpc:read` scope. - """ - - with_droplet_agent: Optional[bool] = None - """Installs the Droplet agent. - - This must be set to true to monitor Droplets for resource utilization scaling. - """ diff --git a/src/gradientai/types/droplets/autoscale_pool_droplet_template_param.py b/src/gradientai/types/droplets/autoscale_pool_droplet_template_param.py deleted file mode 100644 index c491ed55..00000000 --- a/src/gradientai/types/droplets/autoscale_pool_droplet_template_param.py +++ /dev/null @@ -1,84 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import Literal, Required, TypedDict - -__all__ = ["AutoscalePoolDropletTemplateParam"] - - -class AutoscalePoolDropletTemplateParam(TypedDict, total=False): - image: Required[str] - """The Droplet image to be used for all Droplets in the autoscale pool. - - You may specify the slug or the image ID. - """ - - region: Required[ - Literal[ - "nyc1", - "nyc2", - "nyc3", - "ams2", - "ams3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "lon1", - "fra1", - "tor1", - "blr1", - "syd1", - ] - ] - """The datacenter in which all of the Droplets will be created.""" - - size: Required[str] - """The Droplet size to be used for all Droplets in the autoscale pool.""" - - ssh_keys: Required[List[str]] - """The SSH keys to be installed on the Droplets in the autoscale pool. - - You can either specify the key ID or the fingerprint. Requires `ssh_key:read` - scope. - """ - - ipv6: bool - """Assigns a unique IPv6 address to each of the Droplets in the autoscale pool.""" - - name: str - """The name(s) to be applied to all Droplets in the autoscale pool.""" - - project_id: str - """ - The project that the Droplets in the autoscale pool will belong to. Requires - `project:read` scope. - """ - - tags: List[str] - """ - The tags to apply to each of the Droplets in the autoscale pool. Requires - `tag:read` scope. - """ - - user_data: str - """ - A string containing user data that cloud-init consumes to configure a Droplet on - first boot. User data is often a cloud-config file or Bash script. It must be - plain text and may not exceed 64 KiB in size. - """ - - vpc_uuid: str - """The VPC where the Droplets in the autoscale pool will be created. - - The VPC must be in the region where you want to create the Droplets. Requires - `vpc:read` scope. - """ - - with_droplet_agent: bool - """Installs the Droplet agent. - - This must be set to true to monitor Droplets for resource utilization scaling. - """ diff --git a/src/gradientai/types/droplets/autoscale_pool_dynamic_config.py b/src/gradientai/types/droplets/autoscale_pool_dynamic_config.py deleted file mode 100644 index 10f9781b..00000000 --- a/src/gradientai/types/droplets/autoscale_pool_dynamic_config.py +++ /dev/null @@ -1,27 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel - -__all__ = ["AutoscalePoolDynamicConfig"] - - -class AutoscalePoolDynamicConfig(BaseModel): - max_instances: int - """The maximum number of Droplets in an autoscale pool.""" - - min_instances: int - """The minimum number of Droplets in an autoscale pool.""" - - cooldown_minutes: Optional[int] = None - """The number of minutes to wait between scaling events in an autoscale pool. - - Defaults to 10 minutes. - """ - - target_cpu_utilization: Optional[float] = None - """Target CPU utilization as a decimal.""" - - target_memory_utilization: Optional[float] = None - """Target memory utilization as a decimal.""" diff --git a/src/gradientai/types/droplets/autoscale_pool_dynamic_config_param.py b/src/gradientai/types/droplets/autoscale_pool_dynamic_config_param.py deleted file mode 100644 index af06e73a..00000000 --- a/src/gradientai/types/droplets/autoscale_pool_dynamic_config_param.py +++ /dev/null @@ -1,27 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, TypedDict - -__all__ = ["AutoscalePoolDynamicConfigParam"] - - -class AutoscalePoolDynamicConfigParam(TypedDict, total=False): - max_instances: Required[int] - """The maximum number of Droplets in an autoscale pool.""" - - min_instances: Required[int] - """The minimum number of Droplets in an autoscale pool.""" - - cooldown_minutes: int - """The number of minutes to wait between scaling events in an autoscale pool. - - Defaults to 10 minutes. - """ - - target_cpu_utilization: float - """Target CPU utilization as a decimal.""" - - target_memory_utilization: float - """Target memory utilization as a decimal.""" diff --git a/src/gradientai/types/droplets/autoscale_pool_static_config.py b/src/gradientai/types/droplets/autoscale_pool_static_config.py deleted file mode 100644 index cc891007..00000000 --- a/src/gradientai/types/droplets/autoscale_pool_static_config.py +++ /dev/null @@ -1,10 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from ..._models import BaseModel - -__all__ = ["AutoscalePoolStaticConfig"] - - -class AutoscalePoolStaticConfig(BaseModel): - target_number_instances: int - """Fixed number of instances in an autoscale pool.""" diff --git a/src/gradientai/types/droplets/autoscale_pool_static_config_param.py b/src/gradientai/types/droplets/autoscale_pool_static_config_param.py deleted file mode 100644 index a7510d22..00000000 --- a/src/gradientai/types/droplets/autoscale_pool_static_config_param.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, TypedDict - -__all__ = ["AutoscalePoolStaticConfigParam"] - - -class AutoscalePoolStaticConfigParam(TypedDict, total=False): - target_number_instances: Required[int] - """Fixed number of instances in an autoscale pool.""" diff --git a/src/gradientai/types/droplets/autoscale_retrieve_response.py b/src/gradientai/types/droplets/autoscale_retrieve_response.py deleted file mode 100644 index f383ed03..00000000 --- a/src/gradientai/types/droplets/autoscale_retrieve_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel -from .autoscale_pool import AutoscalePool - -__all__ = ["AutoscaleRetrieveResponse"] - - -class AutoscaleRetrieveResponse(BaseModel): - autoscale_pool: Optional[AutoscalePool] = None diff --git a/src/gradientai/types/droplets/autoscale_update_params.py b/src/gradientai/types/droplets/autoscale_update_params.py deleted file mode 100644 index 1b96af1e..00000000 --- a/src/gradientai/types/droplets/autoscale_update_params.py +++ /dev/null @@ -1,28 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Union -from typing_extensions import Required, TypeAlias, TypedDict - -from .autoscale_pool_static_config_param import AutoscalePoolStaticConfigParam -from .autoscale_pool_dynamic_config_param import AutoscalePoolDynamicConfigParam -from .autoscale_pool_droplet_template_param import AutoscalePoolDropletTemplateParam - -__all__ = ["AutoscaleUpdateParams", "Config"] - - -class AutoscaleUpdateParams(TypedDict, total=False): - config: Required[Config] - """ - The scaling configuration for an autoscale pool, which is how the pool scales up - and down (either by resource utilization or static configuration). - """ - - droplet_template: Required[AutoscalePoolDropletTemplateParam] - - name: Required[str] - """The human-readable name of the autoscale pool. This field cannot be updated""" - - -Config: TypeAlias = Union[AutoscalePoolStaticConfigParam, AutoscalePoolDynamicConfigParam] diff --git a/src/gradientai/types/droplets/autoscale_update_response.py b/src/gradientai/types/droplets/autoscale_update_response.py deleted file mode 100644 index 09dde2a4..00000000 --- a/src/gradientai/types/droplets/autoscale_update_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel -from .autoscale_pool import AutoscalePool - -__all__ = ["AutoscaleUpdateResponse"] - - -class AutoscaleUpdateResponse(BaseModel): - autoscale_pool: Optional[AutoscalePool] = None diff --git a/src/gradientai/types/droplets/backup_list_params.py b/src/gradientai/types/droplets/backup_list_params.py deleted file mode 100644 index 66fe92aa..00000000 --- a/src/gradientai/types/droplets/backup_list_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["BackupListParams"] - - -class BackupListParams(TypedDict, total=False): - page: int - """Which 'page' of paginated results to return.""" - - per_page: int - """Number of items returned per page""" diff --git a/src/gradientai/types/droplets/backup_list_policies_params.py b/src/gradientai/types/droplets/backup_list_policies_params.py deleted file mode 100644 index 0cdb0ddb..00000000 --- a/src/gradientai/types/droplets/backup_list_policies_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["BackupListPoliciesParams"] - - -class BackupListPoliciesParams(TypedDict, total=False): - page: int - """Which 'page' of paginated results to return.""" - - per_page: int - """Number of items returned per page""" diff --git a/src/gradientai/types/droplets/backup_list_policies_response.py b/src/gradientai/types/droplets/backup_list_policies_response.py deleted file mode 100644 index 73aa9458..00000000 --- a/src/gradientai/types/droplets/backup_list_policies_response.py +++ /dev/null @@ -1,41 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Dict, Optional - -from ..._models import BaseModel -from ..shared.page_links import PageLinks -from ..droplet_backup_policy import DropletBackupPolicy -from ..shared.meta_properties import MetaProperties -from ..shared.droplet_next_backup_window import DropletNextBackupWindow - -__all__ = ["BackupListPoliciesResponse", "Policies"] - - -class Policies(BaseModel): - backup_enabled: Optional[bool] = None - """A boolean value indicating whether backups are enabled for the Droplet.""" - - backup_policy: Optional[DropletBackupPolicy] = None - """An object specifying the backup policy for the Droplet.""" - - droplet_id: Optional[int] = None - """The unique identifier for the Droplet.""" - - next_backup_window: Optional[DropletNextBackupWindow] = None - """ - An object containing keys with the start and end times of the window during - which the backup will occur. - """ - - -class BackupListPoliciesResponse(BaseModel): - meta: MetaProperties - """Information about the response itself.""" - - links: Optional[PageLinks] = None - - policies: Optional[Dict[str, Policies]] = None - """ - A map where the keys are the Droplet IDs and the values are objects containing - the backup policy information for each Droplet. - """ diff --git a/src/gradientai/types/droplets/backup_list_response.py b/src/gradientai/types/droplets/backup_list_response.py deleted file mode 100644 index c96d573a..00000000 --- a/src/gradientai/types/droplets/backup_list_response.py +++ /dev/null @@ -1,53 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional -from datetime import datetime -from typing_extensions import Literal - -from ..._models import BaseModel -from ..shared.page_links import PageLinks -from ..shared.meta_properties import MetaProperties - -__all__ = ["BackupListResponse", "Backup"] - - -class Backup(BaseModel): - id: int - """The unique identifier for the snapshot or backup.""" - - created_at: datetime - """ - A time value given in ISO8601 combined date and time format that represents when - the snapshot was created. - """ - - min_disk_size: int - """The minimum size in GB required for a volume or Droplet to use this snapshot.""" - - name: str - """A human-readable name for the snapshot.""" - - regions: List[str] - """An array of the regions that the snapshot is available in. - - The regions are represented by their identifying slug values. - """ - - size_gigabytes: float - """The billable size of the snapshot in gigabytes.""" - - type: Literal["snapshot", "backup"] - """Describes the kind of image. - - It may be one of `snapshot` or `backup`. This specifies whether an image is a - user-generated Droplet snapshot or automatically created Droplet backup. - """ - - -class BackupListResponse(BaseModel): - meta: MetaProperties - """Information about the response itself.""" - - backups: Optional[List[Backup]] = None - - links: Optional[PageLinks] = None diff --git a/src/gradientai/types/droplets/backup_list_supported_policies_response.py b/src/gradientai/types/droplets/backup_list_supported_policies_response.py deleted file mode 100644 index 219cfc34..00000000 --- a/src/gradientai/types/droplets/backup_list_supported_policies_response.py +++ /dev/null @@ -1,28 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from ..._models import BaseModel - -__all__ = ["BackupListSupportedPoliciesResponse", "SupportedPolicy"] - - -class SupportedPolicy(BaseModel): - name: Optional[str] = None - """The name of the Droplet backup plan.""" - - possible_days: Optional[List[str]] = None - """The day of the week the backup will occur.""" - - possible_window_starts: Optional[List[int]] = None - """An array of integers representing the hours of the day that a backup can start.""" - - retention_period_days: Optional[int] = None - """The number of days that a backup will be kept.""" - - window_length_hours: Optional[int] = None - """The number of hours that a backup window is open.""" - - -class BackupListSupportedPoliciesResponse(BaseModel): - supported_policies: Optional[List[SupportedPolicy]] = None diff --git a/src/gradientai/types/droplets/backup_retrieve_policy_response.py b/src/gradientai/types/droplets/backup_retrieve_policy_response.py deleted file mode 100644 index 38288dea..00000000 --- a/src/gradientai/types/droplets/backup_retrieve_policy_response.py +++ /dev/null @@ -1,30 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel -from ..droplet_backup_policy import DropletBackupPolicy -from ..shared.droplet_next_backup_window import DropletNextBackupWindow - -__all__ = ["BackupRetrievePolicyResponse", "Policy"] - - -class Policy(BaseModel): - backup_enabled: Optional[bool] = None - """A boolean value indicating whether backups are enabled for the Droplet.""" - - backup_policy: Optional[DropletBackupPolicy] = None - """An object specifying the backup policy for the Droplet.""" - - droplet_id: Optional[int] = None - """The unique identifier for the Droplet.""" - - next_backup_window: Optional[DropletNextBackupWindow] = None - """ - An object containing keys with the start and end times of the window during - which the backup will occur. - """ - - -class BackupRetrievePolicyResponse(BaseModel): - policy: Optional[Policy] = None diff --git a/src/gradientai/types/droplets/current_utilization.py b/src/gradientai/types/droplets/current_utilization.py deleted file mode 100644 index f2cb0b6c..00000000 --- a/src/gradientai/types/droplets/current_utilization.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel - -__all__ = ["CurrentUtilization"] - - -class CurrentUtilization(BaseModel): - cpu: Optional[float] = None - """The average CPU utilization of the autoscale pool.""" - - memory: Optional[float] = None - """The average memory utilization of the autoscale pool.""" diff --git a/src/gradientai/types/droplets/destroy_with_associated_resource_check_status_response.py b/src/gradientai/types/droplets/destroy_with_associated_resource_check_status_response.py deleted file mode 100644 index f2f2ff67..00000000 --- a/src/gradientai/types/droplets/destroy_with_associated_resource_check_status_response.py +++ /dev/null @@ -1,41 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional -from datetime import datetime - -from ..._models import BaseModel -from .destroyed_associated_resource import DestroyedAssociatedResource - -__all__ = ["DestroyWithAssociatedResourceCheckStatusResponse", "Resources"] - - -class Resources(BaseModel): - floating_ips: Optional[List[DestroyedAssociatedResource]] = None - - reserved_ips: Optional[List[DestroyedAssociatedResource]] = None - - snapshots: Optional[List[DestroyedAssociatedResource]] = None - - volume_snapshots: Optional[List[DestroyedAssociatedResource]] = None - - volumes: Optional[List[DestroyedAssociatedResource]] = None - - -class DestroyWithAssociatedResourceCheckStatusResponse(BaseModel): - completed_at: Optional[datetime] = None - """ - A time value given in ISO8601 combined date and time format indicating when the - requested action was completed. - """ - - droplet: Optional[DestroyedAssociatedResource] = None - """An object containing information about a resource scheduled for deletion.""" - - failures: Optional[int] = None - """A count of the associated resources that failed to be destroyed, if any.""" - - resources: Optional[Resources] = None - """ - An object containing additional information about resource related to a Droplet - requested to be destroyed. - """ diff --git a/src/gradientai/types/droplets/destroy_with_associated_resource_delete_selective_params.py b/src/gradientai/types/droplets/destroy_with_associated_resource_delete_selective_params.py deleted file mode 100644 index f4037b6b..00000000 --- a/src/gradientai/types/droplets/destroy_with_associated_resource_delete_selective_params.py +++ /dev/null @@ -1,34 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import TypedDict - -__all__ = ["DestroyWithAssociatedResourceDeleteSelectiveParams"] - - -class DestroyWithAssociatedResourceDeleteSelectiveParams(TypedDict, total=False): - floating_ips: List[str] - """ - An array of unique identifiers for the floating IPs to be scheduled for - deletion. - """ - - reserved_ips: List[str] - """ - An array of unique identifiers for the reserved IPs to be scheduled for - deletion. - """ - - snapshots: List[str] - """An array of unique identifiers for the snapshots to be scheduled for deletion.""" - - volume_snapshots: List[str] - """ - An array of unique identifiers for the volume snapshots to be scheduled for - deletion. - """ - - volumes: List[str] - """An array of unique identifiers for the volumes to be scheduled for deletion.""" diff --git a/src/gradientai/types/droplets/destroy_with_associated_resource_list_response.py b/src/gradientai/types/droplets/destroy_with_associated_resource_list_response.py deleted file mode 100644 index ef4c6c99..00000000 --- a/src/gradientai/types/droplets/destroy_with_associated_resource_list_response.py +++ /dev/null @@ -1,37 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from ..._models import BaseModel -from .associated_resource import AssociatedResource - -__all__ = ["DestroyWithAssociatedResourceListResponse"] - - -class DestroyWithAssociatedResourceListResponse(BaseModel): - floating_ips: Optional[List[AssociatedResource]] = None - """ - Floating IPs that are associated with this Droplet. Requires `reserved_ip:read` - scope. - """ - - reserved_ips: Optional[List[AssociatedResource]] = None - """ - Reserved IPs that are associated with this Droplet. Requires `reserved_ip:read` - scope. - """ - - snapshots: Optional[List[AssociatedResource]] = None - """Snapshots that are associated with this Droplet. Requires `image:read` scope.""" - - volume_snapshots: Optional[List[AssociatedResource]] = None - """ - Volume Snapshots that are associated with this Droplet. Requires - `block_storage_snapshot:read` scope. - """ - - volumes: Optional[List[AssociatedResource]] = None - """ - Volumes that are associated with this Droplet. Requires `block_storage:read` - scope. - """ diff --git a/src/gradientai/types/droplets/destroyed_associated_resource.py b/src/gradientai/types/droplets/destroyed_associated_resource.py deleted file mode 100644 index 358c14e9..00000000 --- a/src/gradientai/types/droplets/destroyed_associated_resource.py +++ /dev/null @@ -1,28 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional -from datetime import datetime - -from ..._models import BaseModel - -__all__ = ["DestroyedAssociatedResource"] - - -class DestroyedAssociatedResource(BaseModel): - id: Optional[str] = None - """The unique identifier for the resource scheduled for deletion.""" - - destroyed_at: Optional[datetime] = None - """ - A time value given in ISO8601 combined date and time format indicating when the - resource was destroyed if the request was successful. - """ - - error_message: Optional[str] = None - """ - A string indicating that the resource was not successfully destroyed and - providing additional information. - """ - - name: Optional[str] = None - """The name of the resource scheduled for deletion.""" diff --git a/src/gradientai/types/firewall.py b/src/gradientai/types/firewall.py deleted file mode 100644 index 427d53b0..00000000 --- a/src/gradientai/types/firewall.py +++ /dev/null @@ -1,98 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional -from datetime import datetime -from typing_extensions import Literal - -from .._models import BaseModel -from .shared.firewall_rule_target import FirewallRuleTarget - -__all__ = ["Firewall", "InboundRule", "OutboundRule", "PendingChange"] - - -class InboundRule(BaseModel): - ports: str - """ - The ports on which traffic will be allowed specified as a string containing a - single port, a range (e.g. "8000-9000"), or "0" when all ports are open for a - protocol. For ICMP rules this parameter will always return "0". - """ - - protocol: Literal["tcp", "udp", "icmp"] - """The type of traffic to be allowed. This may be one of `tcp`, `udp`, or `icmp`.""" - - sources: FirewallRuleTarget - """An object specifying locations from which inbound traffic will be accepted.""" - - -class OutboundRule(BaseModel): - destinations: FirewallRuleTarget - """An object specifying locations to which outbound traffic that will be allowed.""" - - ports: str - """ - The ports on which traffic will be allowed specified as a string containing a - single port, a range (e.g. "8000-9000"), or "0" when all ports are open for a - protocol. For ICMP rules this parameter will always return "0". - """ - - protocol: Literal["tcp", "udp", "icmp"] - """The type of traffic to be allowed. This may be one of `tcp`, `udp`, or `icmp`.""" - - -class PendingChange(BaseModel): - droplet_id: Optional[int] = None - - removing: Optional[bool] = None - - status: Optional[str] = None - - -class Firewall(BaseModel): - id: Optional[str] = None - """A unique ID that can be used to identify and reference a firewall.""" - - created_at: Optional[datetime] = None - """ - A time value given in ISO8601 combined date and time format that represents when - the firewall was created. - """ - - droplet_ids: Optional[List[int]] = None - """An array containing the IDs of the Droplets assigned to the firewall. - - Requires `droplet:read` scope. - """ - - inbound_rules: Optional[List[InboundRule]] = None - - name: Optional[str] = None - """A human-readable name for a firewall. - - The name must begin with an alphanumeric character. Subsequent characters must - either be alphanumeric characters, a period (.), or a dash (-). - """ - - outbound_rules: Optional[List[OutboundRule]] = None - - pending_changes: Optional[List[PendingChange]] = None - """ - An array of objects each containing the fields "droplet_id", "removing", and - "status". It is provided to detail exactly which Droplets are having their - security policies updated. When empty, all changes have been successfully - applied. - """ - - status: Optional[Literal["waiting", "succeeded", "failed"]] = None - """A status string indicating the current state of the firewall. - - This can be "waiting", "succeeded", or "failed". - """ - - tags: Optional[List[str]] = None - """A flat array of tag names as strings to be applied to the resource. - - Tag names must exist in order to be referenced in a request. - - Requires `tag:create` and `tag:read` scopes. - """ diff --git a/src/gradientai/types/firewall_create_params.py b/src/gradientai/types/firewall_create_params.py deleted file mode 100644 index b10ae98e..00000000 --- a/src/gradientai/types/firewall_create_params.py +++ /dev/null @@ -1,17 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -from .firewall_param import FirewallParam - -__all__ = ["FirewallCreateParams", "Body"] - - -class FirewallCreateParams(TypedDict, total=False): - body: Body - - -class Body(FirewallParam, total=False): - pass diff --git a/src/gradientai/types/firewall_create_response.py b/src/gradientai/types/firewall_create_response.py deleted file mode 100644 index 8a9a2ff1..00000000 --- a/src/gradientai/types/firewall_create_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from .._models import BaseModel -from .firewall import Firewall - -__all__ = ["FirewallCreateResponse"] - - -class FirewallCreateResponse(BaseModel): - firewall: Optional[Firewall] = None diff --git a/src/gradientai/types/firewall_list_params.py b/src/gradientai/types/firewall_list_params.py deleted file mode 100644 index 155cc480..00000000 --- a/src/gradientai/types/firewall_list_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["FirewallListParams"] - - -class FirewallListParams(TypedDict, total=False): - page: int - """Which 'page' of paginated results to return.""" - - per_page: int - """Number of items returned per page""" diff --git a/src/gradientai/types/firewall_list_response.py b/src/gradientai/types/firewall_list_response.py deleted file mode 100644 index 27768083..00000000 --- a/src/gradientai/types/firewall_list_response.py +++ /dev/null @@ -1,19 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from .._models import BaseModel -from .firewall import Firewall -from .shared.page_links import PageLinks -from .shared.meta_properties import MetaProperties - -__all__ = ["FirewallListResponse"] - - -class FirewallListResponse(BaseModel): - meta: MetaProperties - """Information about the response itself.""" - - firewalls: Optional[List[Firewall]] = None - - links: Optional[PageLinks] = None diff --git a/src/gradientai/types/firewall_param.py b/src/gradientai/types/firewall_param.py deleted file mode 100644 index c92635d1..00000000 --- a/src/gradientai/types/firewall_param.py +++ /dev/null @@ -1,67 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List, Iterable, Optional -from typing_extensions import Literal, Required, TypedDict - -from .shared_params.firewall_rule_target import FirewallRuleTarget - -__all__ = ["FirewallParam", "InboundRule", "OutboundRule"] - - -class InboundRule(TypedDict, total=False): - ports: Required[str] - """ - The ports on which traffic will be allowed specified as a string containing a - single port, a range (e.g. "8000-9000"), or "0" when all ports are open for a - protocol. For ICMP rules this parameter will always return "0". - """ - - protocol: Required[Literal["tcp", "udp", "icmp"]] - """The type of traffic to be allowed. This may be one of `tcp`, `udp`, or `icmp`.""" - - sources: Required[FirewallRuleTarget] - """An object specifying locations from which inbound traffic will be accepted.""" - - -class OutboundRule(TypedDict, total=False): - destinations: Required[FirewallRuleTarget] - """An object specifying locations to which outbound traffic that will be allowed.""" - - ports: Required[str] - """ - The ports on which traffic will be allowed specified as a string containing a - single port, a range (e.g. "8000-9000"), or "0" when all ports are open for a - protocol. For ICMP rules this parameter will always return "0". - """ - - protocol: Required[Literal["tcp", "udp", "icmp"]] - """The type of traffic to be allowed. This may be one of `tcp`, `udp`, or `icmp`.""" - - -class FirewallParam(TypedDict, total=False): - droplet_ids: Optional[Iterable[int]] - """An array containing the IDs of the Droplets assigned to the firewall. - - Requires `droplet:read` scope. - """ - - inbound_rules: Optional[Iterable[InboundRule]] - - name: str - """A human-readable name for a firewall. - - The name must begin with an alphanumeric character. Subsequent characters must - either be alphanumeric characters, a period (.), or a dash (-). - """ - - outbound_rules: Optional[Iterable[OutboundRule]] - - tags: Optional[List[str]] - """A flat array of tag names as strings to be applied to the resource. - - Tag names must exist in order to be referenced in a request. - - Requires `tag:create` and `tag:read` scopes. - """ diff --git a/src/gradientai/types/firewall_retrieve_response.py b/src/gradientai/types/firewall_retrieve_response.py deleted file mode 100644 index a8bdfa07..00000000 --- a/src/gradientai/types/firewall_retrieve_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from .._models import BaseModel -from .firewall import Firewall - -__all__ = ["FirewallRetrieveResponse"] - - -class FirewallRetrieveResponse(BaseModel): - firewall: Optional[Firewall] = None diff --git a/src/gradientai/types/firewall_update_params.py b/src/gradientai/types/firewall_update_params.py deleted file mode 100644 index c2d0691d..00000000 --- a/src/gradientai/types/firewall_update_params.py +++ /dev/null @@ -1,13 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, TypedDict - -from .firewall_param import FirewallParam - -__all__ = ["FirewallUpdateParams"] - - -class FirewallUpdateParams(TypedDict, total=False): - firewall: Required[FirewallParam] diff --git a/src/gradientai/types/firewall_update_response.py b/src/gradientai/types/firewall_update_response.py deleted file mode 100644 index d3f96601..00000000 --- a/src/gradientai/types/firewall_update_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from .._models import BaseModel -from .firewall import Firewall - -__all__ = ["FirewallUpdateResponse"] - - -class FirewallUpdateResponse(BaseModel): - firewall: Optional[Firewall] = None diff --git a/src/gradientai/types/firewalls/__init__.py b/src/gradientai/types/firewalls/__init__.py deleted file mode 100644 index 6ba459d9..00000000 --- a/src/gradientai/types/firewalls/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from .tag_add_params import TagAddParams as TagAddParams -from .rule_add_params import RuleAddParams as RuleAddParams -from .tag_remove_params import TagRemoveParams as TagRemoveParams -from .droplet_add_params import DropletAddParams as DropletAddParams -from .rule_remove_params import RuleRemoveParams as RuleRemoveParams -from .droplet_remove_params import DropletRemoveParams as DropletRemoveParams diff --git a/src/gradientai/types/firewalls/droplet_add_params.py b/src/gradientai/types/firewalls/droplet_add_params.py deleted file mode 100644 index 35a403a5..00000000 --- a/src/gradientai/types/firewalls/droplet_add_params.py +++ /dev/null @@ -1,13 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Iterable -from typing_extensions import Required, TypedDict - -__all__ = ["DropletAddParams"] - - -class DropletAddParams(TypedDict, total=False): - droplet_ids: Required[Iterable[int]] - """An array containing the IDs of the Droplets to be assigned to the firewall.""" diff --git a/src/gradientai/types/firewalls/droplet_remove_params.py b/src/gradientai/types/firewalls/droplet_remove_params.py deleted file mode 100644 index 5aea18e8..00000000 --- a/src/gradientai/types/firewalls/droplet_remove_params.py +++ /dev/null @@ -1,13 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Iterable -from typing_extensions import Required, TypedDict - -__all__ = ["DropletRemoveParams"] - - -class DropletRemoveParams(TypedDict, total=False): - droplet_ids: Required[Iterable[int]] - """An array containing the IDs of the Droplets to be removed from the firewall.""" diff --git a/src/gradientai/types/firewalls/rule_add_params.py b/src/gradientai/types/firewalls/rule_add_params.py deleted file mode 100644 index fd405c61..00000000 --- a/src/gradientai/types/firewalls/rule_add_params.py +++ /dev/null @@ -1,46 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Iterable, Optional -from typing_extensions import Literal, Required, TypedDict - -from ..shared_params.firewall_rule_target import FirewallRuleTarget - -__all__ = ["RuleAddParams", "InboundRule", "OutboundRule"] - - -class RuleAddParams(TypedDict, total=False): - inbound_rules: Optional[Iterable[InboundRule]] - - outbound_rules: Optional[Iterable[OutboundRule]] - - -class InboundRule(TypedDict, total=False): - ports: Required[str] - """ - The ports on which traffic will be allowed specified as a string containing a - single port, a range (e.g. "8000-9000"), or "0" when all ports are open for a - protocol. For ICMP rules this parameter will always return "0". - """ - - protocol: Required[Literal["tcp", "udp", "icmp"]] - """The type of traffic to be allowed. This may be one of `tcp`, `udp`, or `icmp`.""" - - sources: Required[FirewallRuleTarget] - """An object specifying locations from which inbound traffic will be accepted.""" - - -class OutboundRule(TypedDict, total=False): - destinations: Required[FirewallRuleTarget] - """An object specifying locations to which outbound traffic that will be allowed.""" - - ports: Required[str] - """ - The ports on which traffic will be allowed specified as a string containing a - single port, a range (e.g. "8000-9000"), or "0" when all ports are open for a - protocol. For ICMP rules this parameter will always return "0". - """ - - protocol: Required[Literal["tcp", "udp", "icmp"]] - """The type of traffic to be allowed. This may be one of `tcp`, `udp`, or `icmp`.""" diff --git a/src/gradientai/types/firewalls/rule_remove_params.py b/src/gradientai/types/firewalls/rule_remove_params.py deleted file mode 100644 index 93911e8e..00000000 --- a/src/gradientai/types/firewalls/rule_remove_params.py +++ /dev/null @@ -1,46 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Iterable, Optional -from typing_extensions import Literal, Required, TypedDict - -from ..shared_params.firewall_rule_target import FirewallRuleTarget - -__all__ = ["RuleRemoveParams", "InboundRule", "OutboundRule"] - - -class RuleRemoveParams(TypedDict, total=False): - inbound_rules: Optional[Iterable[InboundRule]] - - outbound_rules: Optional[Iterable[OutboundRule]] - - -class InboundRule(TypedDict, total=False): - ports: Required[str] - """ - The ports on which traffic will be allowed specified as a string containing a - single port, a range (e.g. "8000-9000"), or "0" when all ports are open for a - protocol. For ICMP rules this parameter will always return "0". - """ - - protocol: Required[Literal["tcp", "udp", "icmp"]] - """The type of traffic to be allowed. This may be one of `tcp`, `udp`, or `icmp`.""" - - sources: Required[FirewallRuleTarget] - """An object specifying locations from which inbound traffic will be accepted.""" - - -class OutboundRule(TypedDict, total=False): - destinations: Required[FirewallRuleTarget] - """An object specifying locations to which outbound traffic that will be allowed.""" - - ports: Required[str] - """ - The ports on which traffic will be allowed specified as a string containing a - single port, a range (e.g. "8000-9000"), or "0" when all ports are open for a - protocol. For ICMP rules this parameter will always return "0". - """ - - protocol: Required[Literal["tcp", "udp", "icmp"]] - """The type of traffic to be allowed. This may be one of `tcp`, `udp`, or `icmp`.""" diff --git a/src/gradientai/types/firewalls/tag_add_params.py b/src/gradientai/types/firewalls/tag_add_params.py deleted file mode 100644 index 63af7640..00000000 --- a/src/gradientai/types/firewalls/tag_add_params.py +++ /dev/null @@ -1,18 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List, Optional -from typing_extensions import Required, TypedDict - -__all__ = ["TagAddParams"] - - -class TagAddParams(TypedDict, total=False): - tags: Required[Optional[List[str]]] - """A flat array of tag names as strings to be applied to the resource. - - Tag names must exist in order to be referenced in a request. - - Requires `tag:create` and `tag:read` scopes. - """ diff --git a/src/gradientai/types/firewalls/tag_remove_params.py b/src/gradientai/types/firewalls/tag_remove_params.py deleted file mode 100644 index 91a3e382..00000000 --- a/src/gradientai/types/firewalls/tag_remove_params.py +++ /dev/null @@ -1,18 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List, Optional -from typing_extensions import Required, TypedDict - -__all__ = ["TagRemoveParams"] - - -class TagRemoveParams(TypedDict, total=False): - tags: Required[Optional[List[str]]] - """A flat array of tag names as strings to be applied to the resource. - - Tag names must exist in order to be referenced in a request. - - Requires `tag:create` and `tag:read` scopes. - """ diff --git a/src/gradientai/types/floating_ip.py b/src/gradientai/types/floating_ip.py deleted file mode 100644 index 6bfee5b0..00000000 --- a/src/gradientai/types/floating_ip.py +++ /dev/null @@ -1,47 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Union, Optional -from typing_extensions import TypeAlias - -from .shared import region, droplet -from .._models import BaseModel - -__all__ = ["FloatingIP", "Droplet", "Region"] - -Droplet: TypeAlias = Union[droplet.Droplet, Optional[object]] - - -class Region(region.Region): - pass - - -class FloatingIP(BaseModel): - droplet: Optional[Droplet] = None - """The Droplet that the floating IP has been assigned to. - - When you query a floating IP, if it is assigned to a Droplet, the entire Droplet - object will be returned. If it is not assigned, the value will be null. - - Requires `droplet:read` scope. - """ - - ip: Optional[str] = None - """The public IP address of the floating IP. It also serves as its identifier.""" - - locked: Optional[bool] = None - """ - A boolean value indicating whether or not the floating IP has pending actions - preventing new ones from being submitted. - """ - - project_id: Optional[str] = None - """The UUID of the project to which the reserved IP currently belongs. - - Requires `project:read` scope. - """ - - region: Optional[Region] = None - """The region that the floating IP is reserved to. - - When you query a floating IP, the entire region object will be returned. - """ diff --git a/src/gradientai/types/floating_ip_create_params.py b/src/gradientai/types/floating_ip_create_params.py deleted file mode 100644 index 2adadc27..00000000 --- a/src/gradientai/types/floating_ip_create_params.py +++ /dev/null @@ -1,24 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Union -from typing_extensions import Required, TypeAlias, TypedDict - -__all__ = ["FloatingIPCreateParams", "AssignToDroplet", "ReserveToRegion"] - - -class AssignToDroplet(TypedDict, total=False): - droplet_id: Required[int] - """The ID of the Droplet that the floating IP will be assigned to.""" - - -class ReserveToRegion(TypedDict, total=False): - region: Required[str] - """The slug identifier for the region the floating IP will be reserved to.""" - - project_id: str - """The UUID of the project to which the floating IP will be assigned.""" - - -FloatingIPCreateParams: TypeAlias = Union[AssignToDroplet, ReserveToRegion] diff --git a/src/gradientai/types/floating_ip_create_response.py b/src/gradientai/types/floating_ip_create_response.py deleted file mode 100644 index fab8c06b..00000000 --- a/src/gradientai/types/floating_ip_create_response.py +++ /dev/null @@ -1,21 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from .._models import BaseModel -from .floating_ip import FloatingIP -from .shared.action_link import ActionLink - -__all__ = ["FloatingIPCreateResponse", "Links"] - - -class Links(BaseModel): - actions: Optional[List[ActionLink]] = None - - droplets: Optional[List[ActionLink]] = None - - -class FloatingIPCreateResponse(BaseModel): - floating_ip: Optional[FloatingIP] = None - - links: Optional[Links] = None diff --git a/src/gradientai/types/floating_ip_list_params.py b/src/gradientai/types/floating_ip_list_params.py deleted file mode 100644 index 2e054075..00000000 --- a/src/gradientai/types/floating_ip_list_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["FloatingIPListParams"] - - -class FloatingIPListParams(TypedDict, total=False): - page: int - """Which 'page' of paginated results to return.""" - - per_page: int - """Number of items returned per page""" diff --git a/src/gradientai/types/floating_ip_list_response.py b/src/gradientai/types/floating_ip_list_response.py deleted file mode 100644 index 8535fa24..00000000 --- a/src/gradientai/types/floating_ip_list_response.py +++ /dev/null @@ -1,19 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from .._models import BaseModel -from .floating_ip import FloatingIP -from .shared.page_links import PageLinks -from .shared.meta_properties import MetaProperties - -__all__ = ["FloatingIPListResponse"] - - -class FloatingIPListResponse(BaseModel): - meta: MetaProperties - """Information about the response itself.""" - - floating_ips: Optional[List[FloatingIP]] = None - - links: Optional[PageLinks] = None diff --git a/src/gradientai/types/floating_ip_retrieve_response.py b/src/gradientai/types/floating_ip_retrieve_response.py deleted file mode 100644 index 98bbbb2a..00000000 --- a/src/gradientai/types/floating_ip_retrieve_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from .._models import BaseModel -from .floating_ip import FloatingIP - -__all__ = ["FloatingIPRetrieveResponse"] - - -class FloatingIPRetrieveResponse(BaseModel): - floating_ip: Optional[FloatingIP] = None diff --git a/src/gradientai/types/floating_ips/__init__.py b/src/gradientai/types/floating_ips/__init__.py deleted file mode 100644 index a597418e..00000000 --- a/src/gradientai/types/floating_ips/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from .action_create_params import ActionCreateParams as ActionCreateParams -from .action_list_response import ActionListResponse as ActionListResponse -from .action_create_response import ActionCreateResponse as ActionCreateResponse -from .action_retrieve_response import ActionRetrieveResponse as ActionRetrieveResponse diff --git a/src/gradientai/types/floating_ips/action_create_params.py b/src/gradientai/types/floating_ips/action_create_params.py deleted file mode 100644 index c84f5df7..00000000 --- a/src/gradientai/types/floating_ips/action_create_params.py +++ /dev/null @@ -1,24 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Union -from typing_extensions import Literal, Required, TypeAlias, TypedDict - -__all__ = ["ActionCreateParams", "FloatingIPActionUnassign", "FloatingIPActionAssign"] - - -class FloatingIPActionUnassign(TypedDict, total=False): - type: Required[Literal["assign", "unassign"]] - """The type of action to initiate for the floating IP.""" - - -class FloatingIPActionAssign(TypedDict, total=False): - droplet_id: Required[int] - """The ID of the Droplet that the floating IP will be assigned to.""" - - type: Required[Literal["assign", "unassign"]] - """The type of action to initiate for the floating IP.""" - - -ActionCreateParams: TypeAlias = Union[FloatingIPActionUnassign, FloatingIPActionAssign] diff --git a/src/gradientai/types/floating_ips/action_create_response.py b/src/gradientai/types/floating_ips/action_create_response.py deleted file mode 100644 index 5f68724f..00000000 --- a/src/gradientai/types/floating_ips/action_create_response.py +++ /dev/null @@ -1,17 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..shared import action -from ..._models import BaseModel - -__all__ = ["ActionCreateResponse", "Action"] - - -class Action(action.Action): - project_id: Optional[str] = None - """The UUID of the project to which the reserved IP currently belongs.""" - - -class ActionCreateResponse(BaseModel): - action: Optional[Action] = None diff --git a/src/gradientai/types/floating_ips/action_list_response.py b/src/gradientai/types/floating_ips/action_list_response.py deleted file mode 100644 index 1a20f780..00000000 --- a/src/gradientai/types/floating_ips/action_list_response.py +++ /dev/null @@ -1,19 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from ..._models import BaseModel -from ..shared.action import Action -from ..shared.page_links import PageLinks -from ..shared.meta_properties import MetaProperties - -__all__ = ["ActionListResponse"] - - -class ActionListResponse(BaseModel): - meta: MetaProperties - """Information about the response itself.""" - - actions: Optional[List[Action]] = None - - links: Optional[PageLinks] = None diff --git a/src/gradientai/types/floating_ips/action_retrieve_response.py b/src/gradientai/types/floating_ips/action_retrieve_response.py deleted file mode 100644 index 493b62a7..00000000 --- a/src/gradientai/types/floating_ips/action_retrieve_response.py +++ /dev/null @@ -1,17 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..shared import action -from ..._models import BaseModel - -__all__ = ["ActionRetrieveResponse", "Action"] - - -class Action(action.Action): - project_id: Optional[str] = None - """The UUID of the project to which the reserved IP currently belongs.""" - - -class ActionRetrieveResponse(BaseModel): - action: Optional[Action] = None diff --git a/src/gradientai/types/forwarding_rule.py b/src/gradientai/types/forwarding_rule.py deleted file mode 100644 index 38da45d8..00000000 --- a/src/gradientai/types/forwarding_rule.py +++ /dev/null @@ -1,49 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional -from typing_extensions import Literal - -from .._models import BaseModel - -__all__ = ["ForwardingRule"] - - -class ForwardingRule(BaseModel): - entry_port: int - """ - An integer representing the port on which the load balancer instance will - listen. - """ - - entry_protocol: Literal["http", "https", "http2", "http3", "tcp", "udp"] - """The protocol used for traffic to the load balancer. - - The possible values are: `http`, `https`, `http2`, `http3`, `tcp`, or `udp`. If - you set the `entry_protocol` to `udp`, the `target_protocol` must be set to - `udp`. When using UDP, the load balancer requires that you set up a health check - with a port that uses TCP, HTTP, or HTTPS to work properly. - """ - - target_port: int - """ - An integer representing the port on the backend Droplets to which the load - balancer will send traffic. - """ - - target_protocol: Literal["http", "https", "http2", "tcp", "udp"] - """The protocol used for traffic from the load balancer to the backend Droplets. - - The possible values are: `http`, `https`, `http2`, `tcp`, or `udp`. If you set - the `target_protocol` to `udp`, the `entry_protocol` must be set to `udp`. When - using UDP, the load balancer requires that you set up a health check with a port - that uses TCP, HTTP, or HTTPS to work properly. - """ - - certificate_id: Optional[str] = None - """The ID of the TLS certificate used for SSL termination if enabled.""" - - tls_passthrough: Optional[bool] = None - """ - A boolean value indicating whether SSL encrypted traffic will be passed through - to the backend Droplets. - """ diff --git a/src/gradientai/types/forwarding_rule_param.py b/src/gradientai/types/forwarding_rule_param.py deleted file mode 100644 index 70285bf6..00000000 --- a/src/gradientai/types/forwarding_rule_param.py +++ /dev/null @@ -1,48 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, Required, TypedDict - -__all__ = ["ForwardingRuleParam"] - - -class ForwardingRuleParam(TypedDict, total=False): - entry_port: Required[int] - """ - An integer representing the port on which the load balancer instance will - listen. - """ - - entry_protocol: Required[Literal["http", "https", "http2", "http3", "tcp", "udp"]] - """The protocol used for traffic to the load balancer. - - The possible values are: `http`, `https`, `http2`, `http3`, `tcp`, or `udp`. If - you set the `entry_protocol` to `udp`, the `target_protocol` must be set to - `udp`. When using UDP, the load balancer requires that you set up a health check - with a port that uses TCP, HTTP, or HTTPS to work properly. - """ - - target_port: Required[int] - """ - An integer representing the port on the backend Droplets to which the load - balancer will send traffic. - """ - - target_protocol: Required[Literal["http", "https", "http2", "tcp", "udp"]] - """The protocol used for traffic from the load balancer to the backend Droplets. - - The possible values are: `http`, `https`, `http2`, `tcp`, or `udp`. If you set - the `target_protocol` to `udp`, the `entry_protocol` must be set to `udp`. When - using UDP, the load balancer requires that you set up a health check with a port - that uses TCP, HTTP, or HTTPS to work properly. - """ - - certificate_id: str - """The ID of the TLS certificate used for SSL termination if enabled.""" - - tls_passthrough: bool - """ - A boolean value indicating whether SSL encrypted traffic will be passed through - to the backend Droplets. - """ diff --git a/src/gradientai/types/glb_settings.py b/src/gradientai/types/glb_settings.py deleted file mode 100644 index 164b75af..00000000 --- a/src/gradientai/types/glb_settings.py +++ /dev/null @@ -1,45 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Dict, Optional -from typing_extensions import Literal - -from .._models import BaseModel - -__all__ = ["GlbSettings", "Cdn"] - - -class Cdn(BaseModel): - is_enabled: Optional[bool] = None - """A boolean flag to enable CDN caching.""" - - -class GlbSettings(BaseModel): - cdn: Optional[Cdn] = None - """An object specifying CDN configurations for a Global load balancer.""" - - failover_threshold: Optional[int] = None - """ - An integer value as a percentage to indicate failure threshold to decide how the - regional priorities will take effect. A value of `50` would indicate that the - Global load balancer will choose a lower priority region to forward traffic to - once this failure threshold has been reached for the higher priority region. - """ - - region_priorities: Optional[Dict[str, int]] = None - """ - A map of region string to an integer priority value indicating preference for - which regional target a Global load balancer will forward traffic to. A lower - value indicates a higher priority. - """ - - target_port: Optional[int] = None - """ - An integer representing the port on the target backends which the load balancer - will forward traffic to. - """ - - target_protocol: Optional[Literal["http", "https", "http2"]] = None - """ - The protocol used for forwarding traffic from the load balancer to the target - backends. The possible values are `http`, `https` and `http2`. - """ diff --git a/src/gradientai/types/glb_settings_param.py b/src/gradientai/types/glb_settings_param.py deleted file mode 100644 index f1b25c8b..00000000 --- a/src/gradientai/types/glb_settings_param.py +++ /dev/null @@ -1,45 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict -from typing_extensions import Literal, TypedDict - -__all__ = ["GlbSettingsParam", "Cdn"] - - -class Cdn(TypedDict, total=False): - is_enabled: bool - """A boolean flag to enable CDN caching.""" - - -class GlbSettingsParam(TypedDict, total=False): - cdn: Cdn - """An object specifying CDN configurations for a Global load balancer.""" - - failover_threshold: int - """ - An integer value as a percentage to indicate failure threshold to decide how the - regional priorities will take effect. A value of `50` would indicate that the - Global load balancer will choose a lower priority region to forward traffic to - once this failure threshold has been reached for the higher priority region. - """ - - region_priorities: Dict[str, int] - """ - A map of region string to an integer priority value indicating preference for - which regional target a Global load balancer will forward traffic to. A lower - value indicates a higher priority. - """ - - target_port: int - """ - An integer representing the port on the target backends which the load balancer - will forward traffic to. - """ - - target_protocol: Literal["http", "https", "http2"] - """ - The protocol used for forwarding traffic from the load balancer to the target - backends. The possible values are `http`, `https` and `http2`. - """ diff --git a/src/gradientai/types/health_check.py b/src/gradientai/types/health_check.py deleted file mode 100644 index 3f167fb8..00000000 --- a/src/gradientai/types/health_check.py +++ /dev/null @@ -1,49 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional -from typing_extensions import Literal - -from .._models import BaseModel - -__all__ = ["HealthCheck"] - - -class HealthCheck(BaseModel): - check_interval_seconds: Optional[int] = None - """The number of seconds between between two consecutive health checks.""" - - healthy_threshold: Optional[int] = None - """ - The number of times a health check must pass for a backend Droplet to be marked - "healthy" and be re-added to the pool. - """ - - path: Optional[str] = None - """ - The path on the backend Droplets to which the load balancer instance will send a - request. - """ - - port: Optional[int] = None - """ - An integer representing the port on the backend Droplets on which the health - check will attempt a connection. - """ - - protocol: Optional[Literal["http", "https", "tcp"]] = None - """The protocol used for health checks sent to the backend Droplets. - - The possible values are `http`, `https`, or `tcp`. - """ - - response_timeout_seconds: Optional[int] = None - """ - The number of seconds the load balancer instance will wait for a response until - marking a health check as failed. - """ - - unhealthy_threshold: Optional[int] = None - """ - The number of times a health check must fail for a backend Droplet to be marked - "unhealthy" and be removed from the pool. - """ diff --git a/src/gradientai/types/health_check_param.py b/src/gradientai/types/health_check_param.py deleted file mode 100644 index e840f818..00000000 --- a/src/gradientai/types/health_check_param.py +++ /dev/null @@ -1,48 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, TypedDict - -__all__ = ["HealthCheckParam"] - - -class HealthCheckParam(TypedDict, total=False): - check_interval_seconds: int - """The number of seconds between between two consecutive health checks.""" - - healthy_threshold: int - """ - The number of times a health check must pass for a backend Droplet to be marked - "healthy" and be re-added to the pool. - """ - - path: str - """ - The path on the backend Droplets to which the load balancer instance will send a - request. - """ - - port: int - """ - An integer representing the port on the backend Droplets on which the health - check will attempt a connection. - """ - - protocol: Literal["http", "https", "tcp"] - """The protocol used for health checks sent to the backend Droplets. - - The possible values are `http`, `https`, or `tcp`. - """ - - response_timeout_seconds: int - """ - The number of seconds the load balancer instance will wait for a response until - marking a health check as failed. - """ - - unhealthy_threshold: int - """ - The number of times a health check must fail for a backend Droplet to be marked - "unhealthy" and be removed from the pool. - """ diff --git a/src/gradientai/types/image_create_params.py b/src/gradientai/types/image_create_params.py deleted file mode 100644 index efbd684c..00000000 --- a/src/gradientai/types/image_create_params.py +++ /dev/null @@ -1,81 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List, Optional -from typing_extensions import Literal, TypedDict - -__all__ = ["ImageCreateParams"] - - -class ImageCreateParams(TypedDict, total=False): - description: str - """An optional free-form text field to describe an image.""" - - distribution: Literal[ - "Arch Linux", - "CentOS", - "CoreOS", - "Debian", - "Fedora", - "Fedora Atomic", - "FreeBSD", - "Gentoo", - "openSUSE", - "RancherOS", - "Rocky Linux", - "Ubuntu", - "Unknown", - ] - """The name of a custom image's distribution. - - Currently, the valid values are `Arch Linux`, `CentOS`, `CoreOS`, `Debian`, - `Fedora`, `Fedora Atomic`, `FreeBSD`, `Gentoo`, `openSUSE`, `RancherOS`, - `Rocky Linux`, `Ubuntu`, and `Unknown`. Any other value will be accepted but - ignored, and `Unknown` will be used in its place. - """ - - name: str - """The display name that has been given to an image. - - This is what is shown in the control panel and is generally a descriptive title - for the image in question. - """ - - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ] - """ - The slug identifier for the region where the resource will initially be - available. - """ - - tags: Optional[List[str]] - """A flat array of tag names as strings to be applied to the resource. - - Tag names may be for either existing or new tags. - - Requires `tag:create` scope. - """ - - url: str - """A URL from which the custom Linux virtual machine image may be retrieved. - - The image it points to must be in the raw, qcow2, vhdx, vdi, or vmdk format. It - may be compressed using gzip or bzip2 and must be smaller than 100 GB after - being decompressed. - """ diff --git a/src/gradientai/types/image_create_response.py b/src/gradientai/types/image_create_response.py deleted file mode 100644 index 57c96cf5..00000000 --- a/src/gradientai/types/image_create_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from .._models import BaseModel -from .shared.image import Image - -__all__ = ["ImageCreateResponse"] - - -class ImageCreateResponse(BaseModel): - image: Optional[Image] = None diff --git a/src/gradientai/types/image_list_params.py b/src/gradientai/types/image_list_params.py deleted file mode 100644 index d8e90efa..00000000 --- a/src/gradientai/types/image_list_params.py +++ /dev/null @@ -1,27 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, TypedDict - -__all__ = ["ImageListParams"] - - -class ImageListParams(TypedDict, total=False): - page: int - """Which 'page' of paginated results to return.""" - - per_page: int - """Number of items returned per page""" - - private: bool - """Used to filter only user images.""" - - tag_name: str - """Used to filter images by a specific tag.""" - - type: Literal["application", "distribution"] - """ - Filters results based on image type which can be either `application` or - `distribution`. - """ diff --git a/src/gradientai/types/image_list_response.py b/src/gradientai/types/image_list_response.py deleted file mode 100644 index 0e3f7324..00000000 --- a/src/gradientai/types/image_list_response.py +++ /dev/null @@ -1,19 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from .._models import BaseModel -from .shared.image import Image -from .shared.page_links import PageLinks -from .shared.meta_properties import MetaProperties - -__all__ = ["ImageListResponse"] - - -class ImageListResponse(BaseModel): - images: List[Image] - - meta: MetaProperties - """Information about the response itself.""" - - links: Optional[PageLinks] = None diff --git a/src/gradientai/types/image_retrieve_response.py b/src/gradientai/types/image_retrieve_response.py deleted file mode 100644 index 761d6184..00000000 --- a/src/gradientai/types/image_retrieve_response.py +++ /dev/null @@ -1,10 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .._models import BaseModel -from .shared.image import Image - -__all__ = ["ImageRetrieveResponse"] - - -class ImageRetrieveResponse(BaseModel): - image: Image diff --git a/src/gradientai/types/image_update_params.py b/src/gradientai/types/image_update_params.py deleted file mode 100644 index 2ff851f8..00000000 --- a/src/gradientai/types/image_update_params.py +++ /dev/null @@ -1,42 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, TypedDict - -__all__ = ["ImageUpdateParams"] - - -class ImageUpdateParams(TypedDict, total=False): - description: str - """An optional free-form text field to describe an image.""" - - distribution: Literal[ - "Arch Linux", - "CentOS", - "CoreOS", - "Debian", - "Fedora", - "Fedora Atomic", - "FreeBSD", - "Gentoo", - "openSUSE", - "RancherOS", - "Rocky Linux", - "Ubuntu", - "Unknown", - ] - """The name of a custom image's distribution. - - Currently, the valid values are `Arch Linux`, `CentOS`, `CoreOS`, `Debian`, - `Fedora`, `Fedora Atomic`, `FreeBSD`, `Gentoo`, `openSUSE`, `RancherOS`, - `Rocky Linux`, `Ubuntu`, and `Unknown`. Any other value will be accepted but - ignored, and `Unknown` will be used in its place. - """ - - name: str - """The display name that has been given to an image. - - This is what is shown in the control panel and is generally a descriptive title - for the image in question. - """ diff --git a/src/gradientai/types/image_update_response.py b/src/gradientai/types/image_update_response.py deleted file mode 100644 index 22db593b..00000000 --- a/src/gradientai/types/image_update_response.py +++ /dev/null @@ -1,10 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .._models import BaseModel -from .shared.image import Image - -__all__ = ["ImageUpdateResponse"] - - -class ImageUpdateResponse(BaseModel): - image: Image diff --git a/src/gradientai/types/images/__init__.py b/src/gradientai/types/images/__init__.py deleted file mode 100644 index 7e78954c..00000000 --- a/src/gradientai/types/images/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from .action_create_params import ActionCreateParams as ActionCreateParams -from .action_list_response import ActionListResponse as ActionListResponse diff --git a/src/gradientai/types/images/action_create_params.py b/src/gradientai/types/images/action_create_params.py deleted file mode 100644 index a1b57d47..00000000 --- a/src/gradientai/types/images/action_create_params.py +++ /dev/null @@ -1,45 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Union -from typing_extensions import Literal, Required, TypeAlias, TypedDict - -__all__ = ["ActionCreateParams", "ImageActionBase", "ImageActionTransfer"] - - -class ImageActionBase(TypedDict, total=False): - type: Required[Literal["convert", "transfer"]] - """The action to be taken on the image. Can be either `convert` or `transfer`.""" - - -class ImageActionTransfer(TypedDict, total=False): - region: Required[ - Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ] - ] - """ - The slug identifier for the region where the resource will initially be - available. - """ - - type: Required[Literal["convert", "transfer"]] - """The action to be taken on the image. Can be either `convert` or `transfer`.""" - - -ActionCreateParams: TypeAlias = Union[ImageActionBase, ImageActionTransfer] diff --git a/src/gradientai/types/images/action_list_response.py b/src/gradientai/types/images/action_list_response.py deleted file mode 100644 index 1a20f780..00000000 --- a/src/gradientai/types/images/action_list_response.py +++ /dev/null @@ -1,19 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from ..._models import BaseModel -from ..shared.action import Action -from ..shared.page_links import PageLinks -from ..shared.meta_properties import MetaProperties - -__all__ = ["ActionListResponse"] - - -class ActionListResponse(BaseModel): - meta: MetaProperties - """Information about the response itself.""" - - actions: Optional[List[Action]] = None - - links: Optional[PageLinks] = None diff --git a/src/gradientai/types/inference/api_key_create_params.py b/src/gradientai/types/inference/api_key_create_params.py index 10edfbbe..16cc23c9 100644 --- a/src/gradientai/types/inference/api_key_create_params.py +++ b/src/gradientai/types/inference/api_key_create_params.py @@ -9,4 +9,3 @@ class APIKeyCreateParams(TypedDict, total=False): name: str - """A human friendly name to identify the key""" diff --git a/src/gradientai/types/inference/api_key_create_response.py b/src/gradientai/types/inference/api_key_create_response.py index f2469e43..654e9f1e 100644 --- a/src/gradientai/types/inference/api_key_create_response.py +++ b/src/gradientai/types/inference/api_key_create_response.py @@ -10,4 +10,3 @@ class APIKeyCreateResponse(BaseModel): api_key_info: Optional[APIModelAPIKeyInfo] = None - """Model API Key Info""" diff --git a/src/gradientai/types/inference/api_key_delete_response.py b/src/gradientai/types/inference/api_key_delete_response.py index 89102258..4d81d047 100644 --- a/src/gradientai/types/inference/api_key_delete_response.py +++ b/src/gradientai/types/inference/api_key_delete_response.py @@ -10,4 +10,3 @@ class APIKeyDeleteResponse(BaseModel): api_key_info: Optional[APIModelAPIKeyInfo] = None - """Model API Key Info""" diff --git a/src/gradientai/types/inference/api_key_list_params.py b/src/gradientai/types/inference/api_key_list_params.py index 1f8f96b7..11da9398 100644 --- a/src/gradientai/types/inference/api_key_list_params.py +++ b/src/gradientai/types/inference/api_key_list_params.py @@ -9,7 +9,7 @@ class APIKeyListParams(TypedDict, total=False): page: int - """Page number.""" + """page number.""" per_page: int - """Items per page.""" + """items per page.""" diff --git a/src/gradientai/types/inference/api_key_list_response.py b/src/gradientai/types/inference/api_key_list_response.py index 7c474873..3e937950 100644 --- a/src/gradientai/types/inference/api_key_list_response.py +++ b/src/gradientai/types/inference/api_key_list_response.py @@ -12,10 +12,7 @@ class APIKeyListResponse(BaseModel): api_key_infos: Optional[List[APIModelAPIKeyInfo]] = None - """Api key infos""" links: Optional[APILinks] = None - """Links to other pages""" meta: Optional[APIMeta] = None - """Meta information about the data set""" diff --git a/src/gradientai/types/inference/api_key_update_params.py b/src/gradientai/types/inference/api_key_update_params.py index 7f79240a..23c1c0b9 100644 --- a/src/gradientai/types/inference/api_key_update_params.py +++ b/src/gradientai/types/inference/api_key_update_params.py @@ -11,7 +11,5 @@ class APIKeyUpdateParams(TypedDict, total=False): body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")] - """API key ID""" name: str - """Name""" diff --git a/src/gradientai/types/inference/api_key_update_regenerate_response.py b/src/gradientai/types/inference/api_key_update_regenerate_response.py index c7ce5f0a..44a316dc 100644 --- a/src/gradientai/types/inference/api_key_update_regenerate_response.py +++ b/src/gradientai/types/inference/api_key_update_regenerate_response.py @@ -10,4 +10,3 @@ class APIKeyUpdateRegenerateResponse(BaseModel): api_key_info: Optional[APIModelAPIKeyInfo] = None - """Model API Key Info""" diff --git a/src/gradientai/types/inference/api_key_update_response.py b/src/gradientai/types/inference/api_key_update_response.py index 1b7f92ef..3671addf 100644 --- a/src/gradientai/types/inference/api_key_update_response.py +++ b/src/gradientai/types/inference/api_key_update_response.py @@ -10,4 +10,3 @@ class APIKeyUpdateResponse(BaseModel): api_key_info: Optional[APIModelAPIKeyInfo] = None - """Model API Key Info""" diff --git a/src/gradientai/types/inference/api_model_api_key_info.py b/src/gradientai/types/inference/api_model_api_key_info.py index 3da1c70a..bf354a47 100644 --- a/src/gradientai/types/inference/api_model_api_key_info.py +++ b/src/gradientai/types/inference/api_model_api_key_info.py @@ -10,18 +10,13 @@ class APIModelAPIKeyInfo(BaseModel): created_at: Optional[datetime] = None - """Creation date""" created_by: Optional[str] = None - """Created by""" deleted_at: Optional[datetime] = None - """Deleted date""" name: Optional[str] = None - """Name""" secret_key: Optional[str] = None uuid: Optional[str] = None - """Uuid""" diff --git a/src/gradientai/types/knowledge_base_create_params.py b/src/gradientai/types/knowledge_base_create_params.py index 9ecd777d..acf52e30 100644 --- a/src/gradientai/types/knowledge_base_create_params.py +++ b/src/gradientai/types/knowledge_base_create_params.py @@ -48,18 +48,14 @@ class KnowledgeBaseCreateParams(TypedDict, total=False): """Tags to organize your knowledge base.""" vpc_uuid: str - """The VPC to deploy the knowledge base database in""" class Datasource(TypedDict, total=False): aws_data_source: AwsDataSourceParam - """AWS S3 Data Source""" bucket_name: str - """Deprecated, moved to data_source_details""" bucket_region: str - """Deprecated, moved to data_source_details""" file_upload_data_source: APIFileUploadDataSourceParam """File to upload as data source for knowledge base.""" @@ -67,7 +63,5 @@ class Datasource(TypedDict, total=False): item_path: str spaces_data_source: APISpacesDataSourceParam - """Spaces Bucket Data Source""" web_crawler_data_source: APIWebCrawlerDataSourceParam - """WebCrawlerDataSource""" diff --git a/src/gradientai/types/knowledge_base_create_response.py b/src/gradientai/types/knowledge_base_create_response.py index 6d846fa5..cc2d8b9f 100644 --- a/src/gradientai/types/knowledge_base_create_response.py +++ b/src/gradientai/types/knowledge_base_create_response.py @@ -10,4 +10,3 @@ class KnowledgeBaseCreateResponse(BaseModel): knowledge_base: Optional[APIKnowledgeBase] = None - """Knowledgebase Description""" diff --git a/src/gradientai/types/knowledge_base_delete_response.py b/src/gradientai/types/knowledge_base_delete_response.py index b0605a20..6401e25a 100644 --- a/src/gradientai/types/knowledge_base_delete_response.py +++ b/src/gradientai/types/knowledge_base_delete_response.py @@ -9,4 +9,3 @@ class KnowledgeBaseDeleteResponse(BaseModel): uuid: Optional[str] = None - """The id of the deleted knowledge base""" diff --git a/src/gradientai/types/knowledge_base_list_params.py b/src/gradientai/types/knowledge_base_list_params.py index b2c0eb31..dcf9a0ec 100644 --- a/src/gradientai/types/knowledge_base_list_params.py +++ b/src/gradientai/types/knowledge_base_list_params.py @@ -9,7 +9,7 @@ class KnowledgeBaseListParams(TypedDict, total=False): page: int - """Page number.""" + """page number.""" per_page: int - """Items per page.""" + """items per page.""" diff --git a/src/gradientai/types/knowledge_base_list_response.py b/src/gradientai/types/knowledge_base_list_response.py index 08227316..e8998b51 100644 --- a/src/gradientai/types/knowledge_base_list_response.py +++ b/src/gradientai/types/knowledge_base_list_response.py @@ -12,10 +12,7 @@ class KnowledgeBaseListResponse(BaseModel): knowledge_bases: Optional[List[APIKnowledgeBase]] = None - """The knowledge bases""" links: Optional[APILinks] = None - """Links to other pages""" meta: Optional[APIMeta] = None - """Meta information about the data set""" diff --git a/src/gradientai/types/knowledge_base_retrieve_response.py b/src/gradientai/types/knowledge_base_retrieve_response.py index 55994f70..5a3b5f2c 100644 --- a/src/gradientai/types/knowledge_base_retrieve_response.py +++ b/src/gradientai/types/knowledge_base_retrieve_response.py @@ -28,4 +28,3 @@ class KnowledgeBaseRetrieveResponse(BaseModel): ] = None knowledge_base: Optional[APIKnowledgeBase] = None - """Knowledgebase Description""" diff --git a/src/gradientai/types/knowledge_base_update_params.py b/src/gradientai/types/knowledge_base_update_params.py index 7a86b40c..297c79de 100644 --- a/src/gradientai/types/knowledge_base_update_params.py +++ b/src/gradientai/types/knowledge_base_update_params.py @@ -12,19 +12,16 @@ class KnowledgeBaseUpdateParams(TypedDict, total=False): database_id: str - """The id of the DigitalOcean database this knowledge base will use, optiona.""" + """the id of the DigitalOcean database this knowledge base will use, optiona.""" embedding_model_uuid: str """Identifier for the foundation model.""" name: str - """Knowledge base name""" project_id: str - """The id of the DigitalOcean project this knowledge base will belong to""" tags: List[str] """Tags to organize your knowledge base.""" body_uuid: Annotated[str, PropertyInfo(alias="uuid")] - """Knowledge base id""" diff --git a/src/gradientai/types/knowledge_base_update_response.py b/src/gradientai/types/knowledge_base_update_response.py index 0840622c..f3ba2c32 100644 --- a/src/gradientai/types/knowledge_base_update_response.py +++ b/src/gradientai/types/knowledge_base_update_response.py @@ -10,4 +10,3 @@ class KnowledgeBaseUpdateResponse(BaseModel): knowledge_base: Optional[APIKnowledgeBase] = None - """Knowledgebase Description""" diff --git a/src/gradientai/types/knowledge_bases/api_file_upload_data_source.py b/src/gradientai/types/knowledge_bases/api_file_upload_data_source.py index a1c23e09..1dcc9639 100644 --- a/src/gradientai/types/knowledge_bases/api_file_upload_data_source.py +++ b/src/gradientai/types/knowledge_bases/api_file_upload_data_source.py @@ -9,10 +9,7 @@ class APIFileUploadDataSource(BaseModel): original_file_name: Optional[str] = None - """The original file name""" size_in_bytes: Optional[str] = None - """The size of the file in bytes""" stored_object_key: Optional[str] = None - """The object key the file was stored as""" diff --git a/src/gradientai/types/knowledge_bases/api_file_upload_data_source_param.py b/src/gradientai/types/knowledge_bases/api_file_upload_data_source_param.py index 562f8a34..37221059 100644 --- a/src/gradientai/types/knowledge_bases/api_file_upload_data_source_param.py +++ b/src/gradientai/types/knowledge_bases/api_file_upload_data_source_param.py @@ -9,10 +9,7 @@ class APIFileUploadDataSourceParam(TypedDict, total=False): original_file_name: str - """The original file name""" size_in_bytes: str - """The size of the file in bytes""" stored_object_key: str - """The object key the file was stored as""" diff --git a/src/gradientai/types/knowledge_bases/api_indexed_data_source.py b/src/gradientai/types/knowledge_bases/api_indexed_data_source.py index 151b29de..2449e9fd 100644 --- a/src/gradientai/types/knowledge_bases/api_indexed_data_source.py +++ b/src/gradientai/types/knowledge_bases/api_indexed_data_source.py @@ -11,34 +11,24 @@ class APIIndexedDataSource(BaseModel): completed_at: Optional[datetime] = None - """Timestamp when data source completed indexing""" data_source_uuid: Optional[str] = None - """Uuid of the indexed data source""" error_details: Optional[str] = None - """A detailed error description""" error_msg: Optional[str] = None - """A string code provinding a hint which part of the system experienced an error""" failed_item_count: Optional[str] = None - """Total count of files that have failed""" indexed_file_count: Optional[str] = None - """Total count of files that have been indexed""" indexed_item_count: Optional[str] = None - """Total count of files that have been indexed""" removed_item_count: Optional[str] = None - """Total count of files that have been removed""" skipped_item_count: Optional[str] = None - """Total count of files that have been skipped""" started_at: Optional[datetime] = None - """Timestamp when data source started indexing""" status: Optional[ Literal[ @@ -52,10 +42,7 @@ class APIIndexedDataSource(BaseModel): ] = None total_bytes: Optional[str] = None - """Total size of files in data source in bytes""" total_bytes_indexed: Optional[str] = None - """Total size of files in data source in bytes that have been indexed""" total_file_count: Optional[str] = None - """Total file count in the data source""" diff --git a/src/gradientai/types/knowledge_bases/api_indexing_job.py b/src/gradientai/types/knowledge_bases/api_indexing_job.py index 240fd709..573a7c4e 100644 --- a/src/gradientai/types/knowledge_bases/api_indexing_job.py +++ b/src/gradientai/types/knowledge_bases/api_indexing_job.py @@ -11,17 +11,14 @@ class APIIndexingJob(BaseModel): completed_datasources: Optional[int] = None - """Number of datasources indexed completed""" created_at: Optional[datetime] = None - """Creation date / time""" data_source_uuids: Optional[List[str]] = None finished_at: Optional[datetime] = None knowledge_base_uuid: Optional[str] = None - """Knowledge base id""" phase: Optional[ Literal[ @@ -50,13 +47,9 @@ class APIIndexingJob(BaseModel): ] = None tokens: Optional[int] = None - """Number of tokens""" total_datasources: Optional[int] = None - """Number of datasources being indexed""" updated_at: Optional[datetime] = None - """Last modified""" uuid: Optional[str] = None - """Unique id""" diff --git a/src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py b/src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py index a4d695d2..202e4202 100644 --- a/src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py +++ b/src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py @@ -15,46 +15,34 @@ class AwsDataSource(BaseModel): bucket_name: Optional[str] = None - """Spaces bucket name""" item_path: Optional[str] = None region: Optional[str] = None - """Region of bucket""" class APIKnowledgeBaseDataSource(BaseModel): aws_data_source: Optional[AwsDataSource] = None - """AWS S3 Data Source for Display""" bucket_name: Optional[str] = None - """Name of storage bucket - Deprecated, moved to data_source_details""" created_at: Optional[datetime] = None - """Creation date / time""" file_upload_data_source: Optional[APIFileUploadDataSource] = None """File to upload as data source for knowledge base.""" item_path: Optional[str] = None - """Path of folder or object in bucket - Deprecated, moved to data_source_details""" last_datasource_indexing_job: Optional[APIIndexedDataSource] = None last_indexing_job: Optional[APIIndexingJob] = None - """IndexingJob description""" region: Optional[str] = None - """Region code - Deprecated, moved to data_source_details""" spaces_data_source: Optional[APISpacesDataSource] = None - """Spaces Bucket Data Source""" updated_at: Optional[datetime] = None - """Last modified""" uuid: Optional[str] = None - """Unique id of knowledge base""" web_crawler_data_source: Optional[APIWebCrawlerDataSource] = None - """WebCrawlerDataSource""" diff --git a/src/gradientai/types/knowledge_bases/api_spaces_data_source.py b/src/gradientai/types/knowledge_bases/api_spaces_data_source.py index 02aa479a..f3a0421a 100644 --- a/src/gradientai/types/knowledge_bases/api_spaces_data_source.py +++ b/src/gradientai/types/knowledge_bases/api_spaces_data_source.py @@ -9,9 +9,7 @@ class APISpacesDataSource(BaseModel): bucket_name: Optional[str] = None - """Spaces bucket name""" item_path: Optional[str] = None region: Optional[str] = None - """Region of bucket""" diff --git a/src/gradientai/types/knowledge_bases/api_spaces_data_source_param.py b/src/gradientai/types/knowledge_bases/api_spaces_data_source_param.py index 5eaeb0ad..b7f2f657 100644 --- a/src/gradientai/types/knowledge_bases/api_spaces_data_source_param.py +++ b/src/gradientai/types/knowledge_bases/api_spaces_data_source_param.py @@ -9,9 +9,7 @@ class APISpacesDataSourceParam(TypedDict, total=False): bucket_name: str - """Spaces bucket name""" item_path: str region: str - """Region of bucket""" diff --git a/src/gradientai/types/knowledge_bases/aws_data_source_param.py b/src/gradientai/types/knowledge_bases/aws_data_source_param.py index 912e3e29..93d49228 100644 --- a/src/gradientai/types/knowledge_bases/aws_data_source_param.py +++ b/src/gradientai/types/knowledge_bases/aws_data_source_param.py @@ -9,15 +9,11 @@ class AwsDataSourceParam(TypedDict, total=False): bucket_name: str - """Spaces bucket name""" item_path: str key_id: str - """The AWS Key ID""" region: str - """Region of bucket""" secret_key: str - """The AWS Secret Key""" diff --git a/src/gradientai/types/knowledge_bases/data_source_create_params.py b/src/gradientai/types/knowledge_bases/data_source_create_params.py index ac3aa93c..22bd76e7 100644 --- a/src/gradientai/types/knowledge_bases/data_source_create_params.py +++ b/src/gradientai/types/knowledge_bases/data_source_create_params.py @@ -14,13 +14,9 @@ class DataSourceCreateParams(TypedDict, total=False): aws_data_source: AwsDataSourceParam - """AWS S3 Data Source""" body_knowledge_base_uuid: Annotated[str, PropertyInfo(alias="knowledge_base_uuid")] - """Knowledge base id""" spaces_data_source: APISpacesDataSourceParam - """Spaces Bucket Data Source""" web_crawler_data_source: APIWebCrawlerDataSourceParam - """WebCrawlerDataSource""" diff --git a/src/gradientai/types/knowledge_bases/data_source_create_response.py b/src/gradientai/types/knowledge_bases/data_source_create_response.py index 76ec88e2..1035d3f4 100644 --- a/src/gradientai/types/knowledge_bases/data_source_create_response.py +++ b/src/gradientai/types/knowledge_bases/data_source_create_response.py @@ -10,4 +10,3 @@ class DataSourceCreateResponse(BaseModel): knowledge_base_data_source: Optional[APIKnowledgeBaseDataSource] = None - """Data Source configuration for Knowledge Bases""" diff --git a/src/gradientai/types/knowledge_bases/data_source_delete_response.py b/src/gradientai/types/knowledge_bases/data_source_delete_response.py index eaad72ff..53954d7f 100644 --- a/src/gradientai/types/knowledge_bases/data_source_delete_response.py +++ b/src/gradientai/types/knowledge_bases/data_source_delete_response.py @@ -9,7 +9,5 @@ class DataSourceDeleteResponse(BaseModel): data_source_uuid: Optional[str] = None - """Data source id""" knowledge_base_uuid: Optional[str] = None - """Knowledge base id""" diff --git a/src/gradientai/types/knowledge_bases/data_source_list_params.py b/src/gradientai/types/knowledge_bases/data_source_list_params.py index 089eb291..e3ed5e3c 100644 --- a/src/gradientai/types/knowledge_bases/data_source_list_params.py +++ b/src/gradientai/types/knowledge_bases/data_source_list_params.py @@ -9,7 +9,7 @@ class DataSourceListParams(TypedDict, total=False): page: int - """Page number.""" + """page number.""" per_page: int - """Items per page.""" + """items per page.""" diff --git a/src/gradientai/types/knowledge_bases/data_source_list_response.py b/src/gradientai/types/knowledge_bases/data_source_list_response.py index f05a49bc..2e5fc517 100644 --- a/src/gradientai/types/knowledge_bases/data_source_list_response.py +++ b/src/gradientai/types/knowledge_bases/data_source_list_response.py @@ -12,10 +12,7 @@ class DataSourceListResponse(BaseModel): knowledge_base_data_sources: Optional[List[APIKnowledgeBaseDataSource]] = None - """The data sources""" links: Optional[APILinks] = None - """Links to other pages""" meta: Optional[APIMeta] = None - """Meta information about the data set""" diff --git a/src/gradientai/types/knowledge_bases/indexing_job_create_params.py b/src/gradientai/types/knowledge_bases/indexing_job_create_params.py index d92c5790..04838472 100644 --- a/src/gradientai/types/knowledge_bases/indexing_job_create_params.py +++ b/src/gradientai/types/knowledge_bases/indexing_job_create_params.py @@ -10,10 +10,5 @@ class IndexingJobCreateParams(TypedDict, total=False): data_source_uuids: List[str] - """ - List of data source ids to index, if none are provided, all data sources will be - indexed - """ knowledge_base_uuid: str - """Knowledge base id""" diff --git a/src/gradientai/types/knowledge_bases/indexing_job_create_response.py b/src/gradientai/types/knowledge_bases/indexing_job_create_response.py index 685f40ef..835ec60d 100644 --- a/src/gradientai/types/knowledge_bases/indexing_job_create_response.py +++ b/src/gradientai/types/knowledge_bases/indexing_job_create_response.py @@ -10,4 +10,3 @@ class IndexingJobCreateResponse(BaseModel): job: Optional[APIIndexingJob] = None - """IndexingJob description""" diff --git a/src/gradientai/types/knowledge_bases/indexing_job_list_params.py b/src/gradientai/types/knowledge_bases/indexing_job_list_params.py index c9ac560e..90206aba 100644 --- a/src/gradientai/types/knowledge_bases/indexing_job_list_params.py +++ b/src/gradientai/types/knowledge_bases/indexing_job_list_params.py @@ -9,7 +9,7 @@ class IndexingJobListParams(TypedDict, total=False): page: int - """Page number.""" + """page number.""" per_page: int - """Items per page.""" + """items per page.""" diff --git a/src/gradientai/types/knowledge_bases/indexing_job_list_response.py b/src/gradientai/types/knowledge_bases/indexing_job_list_response.py index 371f51bb..deea4562 100644 --- a/src/gradientai/types/knowledge_bases/indexing_job_list_response.py +++ b/src/gradientai/types/knowledge_bases/indexing_job_list_response.py @@ -12,10 +12,7 @@ class IndexingJobListResponse(BaseModel): jobs: Optional[List[APIIndexingJob]] = None - """The indexing jobs""" links: Optional[APILinks] = None - """Links to other pages""" meta: Optional[APIMeta] = None - """Meta information about the data set""" diff --git a/src/gradientai/types/knowledge_bases/indexing_job_retrieve_response.py b/src/gradientai/types/knowledge_bases/indexing_job_retrieve_response.py index 2d6be855..6034bdf1 100644 --- a/src/gradientai/types/knowledge_bases/indexing_job_retrieve_response.py +++ b/src/gradientai/types/knowledge_bases/indexing_job_retrieve_response.py @@ -10,4 +10,3 @@ class IndexingJobRetrieveResponse(BaseModel): job: Optional[APIIndexingJob] = None - """IndexingJob description""" diff --git a/src/gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py b/src/gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py index 9fd41764..ae4b394f 100644 --- a/src/gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py +++ b/src/gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py @@ -10,4 +10,3 @@ class IndexingJobUpdateCancelResponse(BaseModel): job: Optional[APIIndexingJob] = None - """IndexingJob description""" diff --git a/src/gradientai/types/lb_firewall.py b/src/gradientai/types/lb_firewall.py deleted file mode 100644 index b02efa3e..00000000 --- a/src/gradientai/types/lb_firewall.py +++ /dev/null @@ -1,21 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from .._models import BaseModel - -__all__ = ["LbFirewall"] - - -class LbFirewall(BaseModel): - allow: Optional[List[str]] = None - """ - the rules for allowing traffic to the load balancer (in the form 'ip:1.2.3.4' or - 'cidr:1.2.0.0/16') - """ - - deny: Optional[List[str]] = None - """ - the rules for denying traffic to the load balancer (in the form 'ip:1.2.3.4' or - 'cidr:1.2.0.0/16') - """ diff --git a/src/gradientai/types/lb_firewall_param.py b/src/gradientai/types/lb_firewall_param.py deleted file mode 100644 index 6f1dcf10..00000000 --- a/src/gradientai/types/lb_firewall_param.py +++ /dev/null @@ -1,22 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List -from typing_extensions import TypedDict - -__all__ = ["LbFirewallParam"] - - -class LbFirewallParam(TypedDict, total=False): - allow: List[str] - """ - the rules for allowing traffic to the load balancer (in the form 'ip:1.2.3.4' or - 'cidr:1.2.0.0/16') - """ - - deny: List[str] - """ - the rules for denying traffic to the load balancer (in the form 'ip:1.2.3.4' or - 'cidr:1.2.0.0/16') - """ diff --git a/src/gradientai/types/load_balancer.py b/src/gradientai/types/load_balancer.py deleted file mode 100644 index 9d63222b..00000000 --- a/src/gradientai/types/load_balancer.py +++ /dev/null @@ -1,185 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional -from datetime import datetime -from typing_extensions import Literal - -from .domains import Domains -from .._models import BaseModel -from .lb_firewall import LbFirewall -from .glb_settings import GlbSettings -from .health_check import HealthCheck -from .shared.region import Region -from .forwarding_rule import ForwardingRule -from .sticky_sessions import StickySessions - -__all__ = ["LoadBalancer"] - - -class LoadBalancer(BaseModel): - forwarding_rules: List[ForwardingRule] - """An array of objects specifying the forwarding rules for a load balancer.""" - - id: Optional[str] = None - """A unique ID that can be used to identify and reference a load balancer.""" - - algorithm: Optional[Literal["round_robin", "least_connections"]] = None - """This field has been deprecated. - - You can no longer specify an algorithm for load balancers. - """ - - created_at: Optional[datetime] = None - """ - A time value given in ISO8601 combined date and time format that represents when - the load balancer was created. - """ - - disable_lets_encrypt_dns_records: Optional[bool] = None - """ - A boolean value indicating whether to disable automatic DNS record creation for - Let's Encrypt certificates that are added to the load balancer. - """ - - domains: Optional[List[Domains]] = None - """ - An array of objects specifying the domain configurations for a Global load - balancer. - """ - - droplet_ids: Optional[List[int]] = None - """An array containing the IDs of the Droplets assigned to the load balancer.""" - - enable_backend_keepalive: Optional[bool] = None - """ - A boolean value indicating whether HTTP keepalive connections are maintained to - target Droplets. - """ - - enable_proxy_protocol: Optional[bool] = None - """A boolean value indicating whether PROXY Protocol is in use.""" - - firewall: Optional[LbFirewall] = None - """ - An object specifying allow and deny rules to control traffic to the load - balancer. - """ - - glb_settings: Optional[GlbSettings] = None - """An object specifying forwarding configurations for a Global load balancer.""" - - health_check: Optional[HealthCheck] = None - """An object specifying health check settings for the load balancer.""" - - http_idle_timeout_seconds: Optional[int] = None - """ - An integer value which configures the idle timeout for HTTP requests to the - target droplets. - """ - - ip: Optional[str] = None - """An attribute containing the public-facing IP address of the load balancer.""" - - ipv6: Optional[str] = None - """An attribute containing the public-facing IPv6 address of the load balancer.""" - - name: Optional[str] = None - """A human-readable name for a load balancer instance.""" - - network: Optional[Literal["EXTERNAL", "INTERNAL"]] = None - """A string indicating whether the load balancer should be external or internal. - - Internal load balancers have no public IPs and are only accessible to resources - on the same VPC network. This property cannot be updated after creating the load - balancer. - """ - - network_stack: Optional[Literal["IPV4", "DUALSTACK"]] = None - """ - A string indicating whether the load balancer will support IPv4 or both IPv4 and - IPv6 networking. This property cannot be updated after creating the load - balancer. - """ - - project_id: Optional[str] = None - """The ID of the project that the load balancer is associated with. - - If no ID is provided at creation, the load balancer associates with the user's - default project. If an invalid project ID is provided, the load balancer will - not be created. - """ - - redirect_http_to_https: Optional[bool] = None - """ - A boolean value indicating whether HTTP requests to the load balancer on port 80 - will be redirected to HTTPS on port 443. - """ - - region: Optional[Region] = None - """The region where the load balancer instance is located. - - When setting a region, the value should be the slug identifier for the region. - When you query a load balancer, an entire region object will be returned. - """ - - size: Optional[Literal["lb-small", "lb-medium", "lb-large"]] = None - """ - This field has been replaced by the `size_unit` field for all regions except in - AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load - balancer having a set number of nodes. - - - `lb-small` = 1 node - - `lb-medium` = 3 nodes - - `lb-large` = 6 nodes - - You can resize load balancers after creation up to once per hour. You cannot - resize a load balancer within the first hour of its creation. - """ - - size_unit: Optional[int] = None - """How many nodes the load balancer contains. - - Each additional node increases the load balancer's ability to manage more - connections. Load balancers can be scaled up or down, and you can change the - number of nodes after creation up to once per hour. This field is currently not - available in the AMS2, NYC2, or SFO1 regions. Use the `size` field to scale load - balancers that reside in these regions. - """ - - status: Optional[Literal["new", "active", "errored"]] = None - """A status string indicating the current state of the load balancer. - - This can be `new`, `active`, or `errored`. - """ - - sticky_sessions: Optional[StickySessions] = None - """An object specifying sticky sessions settings for the load balancer.""" - - tag: Optional[str] = None - """ - The name of a Droplet tag corresponding to Droplets assigned to the load - balancer. - """ - - target_load_balancer_ids: Optional[List[str]] = None - """ - An array containing the UUIDs of the Regional load balancers to be used as - target backends for a Global load balancer. - """ - - tls_cipher_policy: Optional[Literal["DEFAULT", "STRONG"]] = None - """ - A string indicating the policy for the TLS cipher suites used by the load - balancer. The possible values are `DEFAULT` or `STRONG`. The default value is - `DEFAULT`. - """ - - type: Optional[Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"]] = None - """ - A string indicating whether the load balancer should be a standard regional HTTP - load balancer, a regional network load balancer that routes traffic at the - TCP/UDP transport layer, or a global load balancer. - """ - - vpc_uuid: Optional[str] = None - """A string specifying the UUID of the VPC to which the load balancer is assigned.""" diff --git a/src/gradientai/types/load_balancer_create_params.py b/src/gradientai/types/load_balancer_create_params.py deleted file mode 100644 index a87d9148..00000000 --- a/src/gradientai/types/load_balancer_create_params.py +++ /dev/null @@ -1,335 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List, Union, Iterable -from typing_extensions import Literal, Required, TypeAlias, TypedDict - -from .domains_param import DomainsParam -from .lb_firewall_param import LbFirewallParam -from .glb_settings_param import GlbSettingsParam -from .health_check_param import HealthCheckParam -from .forwarding_rule_param import ForwardingRuleParam -from .sticky_sessions_param import StickySessionsParam - -__all__ = ["LoadBalancerCreateParams", "AssignDropletsByID", "AssignDropletsByTag"] - - -class AssignDropletsByID(TypedDict, total=False): - forwarding_rules: Required[Iterable[ForwardingRuleParam]] - """An array of objects specifying the forwarding rules for a load balancer.""" - - algorithm: Literal["round_robin", "least_connections"] - """This field has been deprecated. - - You can no longer specify an algorithm for load balancers. - """ - - disable_lets_encrypt_dns_records: bool - """ - A boolean value indicating whether to disable automatic DNS record creation for - Let's Encrypt certificates that are added to the load balancer. - """ - - domains: Iterable[DomainsParam] - """ - An array of objects specifying the domain configurations for a Global load - balancer. - """ - - droplet_ids: Iterable[int] - """An array containing the IDs of the Droplets assigned to the load balancer.""" - - enable_backend_keepalive: bool - """ - A boolean value indicating whether HTTP keepalive connections are maintained to - target Droplets. - """ - - enable_proxy_protocol: bool - """A boolean value indicating whether PROXY Protocol is in use.""" - - firewall: LbFirewallParam - """ - An object specifying allow and deny rules to control traffic to the load - balancer. - """ - - glb_settings: GlbSettingsParam - """An object specifying forwarding configurations for a Global load balancer.""" - - health_check: HealthCheckParam - """An object specifying health check settings for the load balancer.""" - - http_idle_timeout_seconds: int - """ - An integer value which configures the idle timeout for HTTP requests to the - target droplets. - """ - - name: str - """A human-readable name for a load balancer instance.""" - - network: Literal["EXTERNAL", "INTERNAL"] - """A string indicating whether the load balancer should be external or internal. - - Internal load balancers have no public IPs and are only accessible to resources - on the same VPC network. This property cannot be updated after creating the load - balancer. - """ - - network_stack: Literal["IPV4", "DUALSTACK"] - """ - A string indicating whether the load balancer will support IPv4 or both IPv4 and - IPv6 networking. This property cannot be updated after creating the load - balancer. - """ - - project_id: str - """The ID of the project that the load balancer is associated with. - - If no ID is provided at creation, the load balancer associates with the user's - default project. If an invalid project ID is provided, the load balancer will - not be created. - """ - - redirect_http_to_https: bool - """ - A boolean value indicating whether HTTP requests to the load balancer on port 80 - will be redirected to HTTPS on port 443. - """ - - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ] - """ - The slug identifier for the region where the resource will initially be - available. - """ - - size: Literal["lb-small", "lb-medium", "lb-large"] - """ - This field has been replaced by the `size_unit` field for all regions except in - AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load - balancer having a set number of nodes. - - - `lb-small` = 1 node - - `lb-medium` = 3 nodes - - `lb-large` = 6 nodes - - You can resize load balancers after creation up to once per hour. You cannot - resize a load balancer within the first hour of its creation. - """ - - size_unit: int - """How many nodes the load balancer contains. - - Each additional node increases the load balancer's ability to manage more - connections. Load balancers can be scaled up or down, and you can change the - number of nodes after creation up to once per hour. This field is currently not - available in the AMS2, NYC2, or SFO1 regions. Use the `size` field to scale load - balancers that reside in these regions. - """ - - sticky_sessions: StickySessionsParam - """An object specifying sticky sessions settings for the load balancer.""" - - target_load_balancer_ids: List[str] - """ - An array containing the UUIDs of the Regional load balancers to be used as - target backends for a Global load balancer. - """ - - tls_cipher_policy: Literal["DEFAULT", "STRONG"] - """ - A string indicating the policy for the TLS cipher suites used by the load - balancer. The possible values are `DEFAULT` or `STRONG`. The default value is - `DEFAULT`. - """ - - type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] - """ - A string indicating whether the load balancer should be a standard regional HTTP - load balancer, a regional network load balancer that routes traffic at the - TCP/UDP transport layer, or a global load balancer. - """ - - vpc_uuid: str - """A string specifying the UUID of the VPC to which the load balancer is assigned.""" - - -class AssignDropletsByTag(TypedDict, total=False): - forwarding_rules: Required[Iterable[ForwardingRuleParam]] - """An array of objects specifying the forwarding rules for a load balancer.""" - - algorithm: Literal["round_robin", "least_connections"] - """This field has been deprecated. - - You can no longer specify an algorithm for load balancers. - """ - - disable_lets_encrypt_dns_records: bool - """ - A boolean value indicating whether to disable automatic DNS record creation for - Let's Encrypt certificates that are added to the load balancer. - """ - - domains: Iterable[DomainsParam] - """ - An array of objects specifying the domain configurations for a Global load - balancer. - """ - - enable_backend_keepalive: bool - """ - A boolean value indicating whether HTTP keepalive connections are maintained to - target Droplets. - """ - - enable_proxy_protocol: bool - """A boolean value indicating whether PROXY Protocol is in use.""" - - firewall: LbFirewallParam - """ - An object specifying allow and deny rules to control traffic to the load - balancer. - """ - - glb_settings: GlbSettingsParam - """An object specifying forwarding configurations for a Global load balancer.""" - - health_check: HealthCheckParam - """An object specifying health check settings for the load balancer.""" - - http_idle_timeout_seconds: int - """ - An integer value which configures the idle timeout for HTTP requests to the - target droplets. - """ - - name: str - """A human-readable name for a load balancer instance.""" - - network: Literal["EXTERNAL", "INTERNAL"] - """A string indicating whether the load balancer should be external or internal. - - Internal load balancers have no public IPs and are only accessible to resources - on the same VPC network. This property cannot be updated after creating the load - balancer. - """ - - network_stack: Literal["IPV4", "DUALSTACK"] - """ - A string indicating whether the load balancer will support IPv4 or both IPv4 and - IPv6 networking. This property cannot be updated after creating the load - balancer. - """ - - project_id: str - """The ID of the project that the load balancer is associated with. - - If no ID is provided at creation, the load balancer associates with the user's - default project. If an invalid project ID is provided, the load balancer will - not be created. - """ - - redirect_http_to_https: bool - """ - A boolean value indicating whether HTTP requests to the load balancer on port 80 - will be redirected to HTTPS on port 443. - """ - - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ] - """ - The slug identifier for the region where the resource will initially be - available. - """ - - size: Literal["lb-small", "lb-medium", "lb-large"] - """ - This field has been replaced by the `size_unit` field for all regions except in - AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load - balancer having a set number of nodes. - - - `lb-small` = 1 node - - `lb-medium` = 3 nodes - - `lb-large` = 6 nodes - - You can resize load balancers after creation up to once per hour. You cannot - resize a load balancer within the first hour of its creation. - """ - - size_unit: int - """How many nodes the load balancer contains. - - Each additional node increases the load balancer's ability to manage more - connections. Load balancers can be scaled up or down, and you can change the - number of nodes after creation up to once per hour. This field is currently not - available in the AMS2, NYC2, or SFO1 regions. Use the `size` field to scale load - balancers that reside in these regions. - """ - - sticky_sessions: StickySessionsParam - """An object specifying sticky sessions settings for the load balancer.""" - - tag: str - """ - The name of a Droplet tag corresponding to Droplets assigned to the load - balancer. - """ - - target_load_balancer_ids: List[str] - """ - An array containing the UUIDs of the Regional load balancers to be used as - target backends for a Global load balancer. - """ - - tls_cipher_policy: Literal["DEFAULT", "STRONG"] - """ - A string indicating the policy for the TLS cipher suites used by the load - balancer. The possible values are `DEFAULT` or `STRONG`. The default value is - `DEFAULT`. - """ - - type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] - """ - A string indicating whether the load balancer should be a standard regional HTTP - load balancer, a regional network load balancer that routes traffic at the - TCP/UDP transport layer, or a global load balancer. - """ - - vpc_uuid: str - """A string specifying the UUID of the VPC to which the load balancer is assigned.""" - - -LoadBalancerCreateParams: TypeAlias = Union[AssignDropletsByID, AssignDropletsByTag] diff --git a/src/gradientai/types/load_balancer_create_response.py b/src/gradientai/types/load_balancer_create_response.py deleted file mode 100644 index 8d90c217..00000000 --- a/src/gradientai/types/load_balancer_create_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from .._models import BaseModel -from .load_balancer import LoadBalancer - -__all__ = ["LoadBalancerCreateResponse"] - - -class LoadBalancerCreateResponse(BaseModel): - load_balancer: Optional[LoadBalancer] = None diff --git a/src/gradientai/types/load_balancer_list_params.py b/src/gradientai/types/load_balancer_list_params.py deleted file mode 100644 index d0daff3f..00000000 --- a/src/gradientai/types/load_balancer_list_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["LoadBalancerListParams"] - - -class LoadBalancerListParams(TypedDict, total=False): - page: int - """Which 'page' of paginated results to return.""" - - per_page: int - """Number of items returned per page""" diff --git a/src/gradientai/types/load_balancer_list_response.py b/src/gradientai/types/load_balancer_list_response.py deleted file mode 100644 index 64ec8e91..00000000 --- a/src/gradientai/types/load_balancer_list_response.py +++ /dev/null @@ -1,19 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from .._models import BaseModel -from .load_balancer import LoadBalancer -from .shared.page_links import PageLinks -from .shared.meta_properties import MetaProperties - -__all__ = ["LoadBalancerListResponse"] - - -class LoadBalancerListResponse(BaseModel): - meta: MetaProperties - """Information about the response itself.""" - - links: Optional[PageLinks] = None - - load_balancers: Optional[List[LoadBalancer]] = None diff --git a/src/gradientai/types/load_balancer_retrieve_response.py b/src/gradientai/types/load_balancer_retrieve_response.py deleted file mode 100644 index f4df6ae3..00000000 --- a/src/gradientai/types/load_balancer_retrieve_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from .._models import BaseModel -from .load_balancer import LoadBalancer - -__all__ = ["LoadBalancerRetrieveResponse"] - - -class LoadBalancerRetrieveResponse(BaseModel): - load_balancer: Optional[LoadBalancer] = None diff --git a/src/gradientai/types/load_balancer_update_params.py b/src/gradientai/types/load_balancer_update_params.py deleted file mode 100644 index 9a1906cb..00000000 --- a/src/gradientai/types/load_balancer_update_params.py +++ /dev/null @@ -1,335 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List, Union, Iterable -from typing_extensions import Literal, Required, TypeAlias, TypedDict - -from .domains_param import DomainsParam -from .lb_firewall_param import LbFirewallParam -from .glb_settings_param import GlbSettingsParam -from .health_check_param import HealthCheckParam -from .forwarding_rule_param import ForwardingRuleParam -from .sticky_sessions_param import StickySessionsParam - -__all__ = ["LoadBalancerUpdateParams", "AssignDropletsByID", "AssignDropletsByTag"] - - -class AssignDropletsByID(TypedDict, total=False): - forwarding_rules: Required[Iterable[ForwardingRuleParam]] - """An array of objects specifying the forwarding rules for a load balancer.""" - - algorithm: Literal["round_robin", "least_connections"] - """This field has been deprecated. - - You can no longer specify an algorithm for load balancers. - """ - - disable_lets_encrypt_dns_records: bool - """ - A boolean value indicating whether to disable automatic DNS record creation for - Let's Encrypt certificates that are added to the load balancer. - """ - - domains: Iterable[DomainsParam] - """ - An array of objects specifying the domain configurations for a Global load - balancer. - """ - - droplet_ids: Iterable[int] - """An array containing the IDs of the Droplets assigned to the load balancer.""" - - enable_backend_keepalive: bool - """ - A boolean value indicating whether HTTP keepalive connections are maintained to - target Droplets. - """ - - enable_proxy_protocol: bool - """A boolean value indicating whether PROXY Protocol is in use.""" - - firewall: LbFirewallParam - """ - An object specifying allow and deny rules to control traffic to the load - balancer. - """ - - glb_settings: GlbSettingsParam - """An object specifying forwarding configurations for a Global load balancer.""" - - health_check: HealthCheckParam - """An object specifying health check settings for the load balancer.""" - - http_idle_timeout_seconds: int - """ - An integer value which configures the idle timeout for HTTP requests to the - target droplets. - """ - - name: str - """A human-readable name for a load balancer instance.""" - - network: Literal["EXTERNAL", "INTERNAL"] - """A string indicating whether the load balancer should be external or internal. - - Internal load balancers have no public IPs and are only accessible to resources - on the same VPC network. This property cannot be updated after creating the load - balancer. - """ - - network_stack: Literal["IPV4", "DUALSTACK"] - """ - A string indicating whether the load balancer will support IPv4 or both IPv4 and - IPv6 networking. This property cannot be updated after creating the load - balancer. - """ - - project_id: str - """The ID of the project that the load balancer is associated with. - - If no ID is provided at creation, the load balancer associates with the user's - default project. If an invalid project ID is provided, the load balancer will - not be created. - """ - - redirect_http_to_https: bool - """ - A boolean value indicating whether HTTP requests to the load balancer on port 80 - will be redirected to HTTPS on port 443. - """ - - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ] - """ - The slug identifier for the region where the resource will initially be - available. - """ - - size: Literal["lb-small", "lb-medium", "lb-large"] - """ - This field has been replaced by the `size_unit` field for all regions except in - AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load - balancer having a set number of nodes. - - - `lb-small` = 1 node - - `lb-medium` = 3 nodes - - `lb-large` = 6 nodes - - You can resize load balancers after creation up to once per hour. You cannot - resize a load balancer within the first hour of its creation. - """ - - size_unit: int - """How many nodes the load balancer contains. - - Each additional node increases the load balancer's ability to manage more - connections. Load balancers can be scaled up or down, and you can change the - number of nodes after creation up to once per hour. This field is currently not - available in the AMS2, NYC2, or SFO1 regions. Use the `size` field to scale load - balancers that reside in these regions. - """ - - sticky_sessions: StickySessionsParam - """An object specifying sticky sessions settings for the load balancer.""" - - target_load_balancer_ids: List[str] - """ - An array containing the UUIDs of the Regional load balancers to be used as - target backends for a Global load balancer. - """ - - tls_cipher_policy: Literal["DEFAULT", "STRONG"] - """ - A string indicating the policy for the TLS cipher suites used by the load - balancer. The possible values are `DEFAULT` or `STRONG`. The default value is - `DEFAULT`. - """ - - type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] - """ - A string indicating whether the load balancer should be a standard regional HTTP - load balancer, a regional network load balancer that routes traffic at the - TCP/UDP transport layer, or a global load balancer. - """ - - vpc_uuid: str - """A string specifying the UUID of the VPC to which the load balancer is assigned.""" - - -class AssignDropletsByTag(TypedDict, total=False): - forwarding_rules: Required[Iterable[ForwardingRuleParam]] - """An array of objects specifying the forwarding rules for a load balancer.""" - - algorithm: Literal["round_robin", "least_connections"] - """This field has been deprecated. - - You can no longer specify an algorithm for load balancers. - """ - - disable_lets_encrypt_dns_records: bool - """ - A boolean value indicating whether to disable automatic DNS record creation for - Let's Encrypt certificates that are added to the load balancer. - """ - - domains: Iterable[DomainsParam] - """ - An array of objects specifying the domain configurations for a Global load - balancer. - """ - - enable_backend_keepalive: bool - """ - A boolean value indicating whether HTTP keepalive connections are maintained to - target Droplets. - """ - - enable_proxy_protocol: bool - """A boolean value indicating whether PROXY Protocol is in use.""" - - firewall: LbFirewallParam - """ - An object specifying allow and deny rules to control traffic to the load - balancer. - """ - - glb_settings: GlbSettingsParam - """An object specifying forwarding configurations for a Global load balancer.""" - - health_check: HealthCheckParam - """An object specifying health check settings for the load balancer.""" - - http_idle_timeout_seconds: int - """ - An integer value which configures the idle timeout for HTTP requests to the - target droplets. - """ - - name: str - """A human-readable name for a load balancer instance.""" - - network: Literal["EXTERNAL", "INTERNAL"] - """A string indicating whether the load balancer should be external or internal. - - Internal load balancers have no public IPs and are only accessible to resources - on the same VPC network. This property cannot be updated after creating the load - balancer. - """ - - network_stack: Literal["IPV4", "DUALSTACK"] - """ - A string indicating whether the load balancer will support IPv4 or both IPv4 and - IPv6 networking. This property cannot be updated after creating the load - balancer. - """ - - project_id: str - """The ID of the project that the load balancer is associated with. - - If no ID is provided at creation, the load balancer associates with the user's - default project. If an invalid project ID is provided, the load balancer will - not be created. - """ - - redirect_http_to_https: bool - """ - A boolean value indicating whether HTTP requests to the load balancer on port 80 - will be redirected to HTTPS on port 443. - """ - - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ] - """ - The slug identifier for the region where the resource will initially be - available. - """ - - size: Literal["lb-small", "lb-medium", "lb-large"] - """ - This field has been replaced by the `size_unit` field for all regions except in - AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load - balancer having a set number of nodes. - - - `lb-small` = 1 node - - `lb-medium` = 3 nodes - - `lb-large` = 6 nodes - - You can resize load balancers after creation up to once per hour. You cannot - resize a load balancer within the first hour of its creation. - """ - - size_unit: int - """How many nodes the load balancer contains. - - Each additional node increases the load balancer's ability to manage more - connections. Load balancers can be scaled up or down, and you can change the - number of nodes after creation up to once per hour. This field is currently not - available in the AMS2, NYC2, or SFO1 regions. Use the `size` field to scale load - balancers that reside in these regions. - """ - - sticky_sessions: StickySessionsParam - """An object specifying sticky sessions settings for the load balancer.""" - - tag: str - """ - The name of a Droplet tag corresponding to Droplets assigned to the load - balancer. - """ - - target_load_balancer_ids: List[str] - """ - An array containing the UUIDs of the Regional load balancers to be used as - target backends for a Global load balancer. - """ - - tls_cipher_policy: Literal["DEFAULT", "STRONG"] - """ - A string indicating the policy for the TLS cipher suites used by the load - balancer. The possible values are `DEFAULT` or `STRONG`. The default value is - `DEFAULT`. - """ - - type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] - """ - A string indicating whether the load balancer should be a standard regional HTTP - load balancer, a regional network load balancer that routes traffic at the - TCP/UDP transport layer, or a global load balancer. - """ - - vpc_uuid: str - """A string specifying the UUID of the VPC to which the load balancer is assigned.""" - - -LoadBalancerUpdateParams: TypeAlias = Union[AssignDropletsByID, AssignDropletsByTag] diff --git a/src/gradientai/types/load_balancer_update_response.py b/src/gradientai/types/load_balancer_update_response.py deleted file mode 100644 index e1a58a3f..00000000 --- a/src/gradientai/types/load_balancer_update_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from .._models import BaseModel -from .load_balancer import LoadBalancer - -__all__ = ["LoadBalancerUpdateResponse"] - - -class LoadBalancerUpdateResponse(BaseModel): - load_balancer: Optional[LoadBalancer] = None diff --git a/src/gradientai/types/load_balancers/__init__.py b/src/gradientai/types/load_balancers/__init__.py deleted file mode 100644 index 806a71be..00000000 --- a/src/gradientai/types/load_balancers/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from .droplet_add_params import DropletAddParams as DropletAddParams -from .droplet_remove_params import DropletRemoveParams as DropletRemoveParams -from .forwarding_rule_add_params import ForwardingRuleAddParams as ForwardingRuleAddParams -from .forwarding_rule_remove_params import ForwardingRuleRemoveParams as ForwardingRuleRemoveParams diff --git a/src/gradientai/types/load_balancers/droplet_add_params.py b/src/gradientai/types/load_balancers/droplet_add_params.py deleted file mode 100644 index ee403f5f..00000000 --- a/src/gradientai/types/load_balancers/droplet_add_params.py +++ /dev/null @@ -1,13 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Iterable -from typing_extensions import Required, TypedDict - -__all__ = ["DropletAddParams"] - - -class DropletAddParams(TypedDict, total=False): - droplet_ids: Required[Iterable[int]] - """An array containing the IDs of the Droplets assigned to the load balancer.""" diff --git a/src/gradientai/types/load_balancers/droplet_remove_params.py b/src/gradientai/types/load_balancers/droplet_remove_params.py deleted file mode 100644 index d48795e9..00000000 --- a/src/gradientai/types/load_balancers/droplet_remove_params.py +++ /dev/null @@ -1,13 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Iterable -from typing_extensions import Required, TypedDict - -__all__ = ["DropletRemoveParams"] - - -class DropletRemoveParams(TypedDict, total=False): - droplet_ids: Required[Iterable[int]] - """An array containing the IDs of the Droplets assigned to the load balancer.""" diff --git a/src/gradientai/types/load_balancers/forwarding_rule_add_params.py b/src/gradientai/types/load_balancers/forwarding_rule_add_params.py deleted file mode 100644 index 2cc6a2df..00000000 --- a/src/gradientai/types/load_balancers/forwarding_rule_add_params.py +++ /dev/null @@ -1,14 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Iterable -from typing_extensions import Required, TypedDict - -from ..forwarding_rule_param import ForwardingRuleParam - -__all__ = ["ForwardingRuleAddParams"] - - -class ForwardingRuleAddParams(TypedDict, total=False): - forwarding_rules: Required[Iterable[ForwardingRuleParam]] diff --git a/src/gradientai/types/load_balancers/forwarding_rule_remove_params.py b/src/gradientai/types/load_balancers/forwarding_rule_remove_params.py deleted file mode 100644 index e5209543..00000000 --- a/src/gradientai/types/load_balancers/forwarding_rule_remove_params.py +++ /dev/null @@ -1,14 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Iterable -from typing_extensions import Required, TypedDict - -from ..forwarding_rule_param import ForwardingRuleParam - -__all__ = ["ForwardingRuleRemoveParams"] - - -class ForwardingRuleRemoveParams(TypedDict, total=False): - forwarding_rules: Required[Iterable[ForwardingRuleParam]] diff --git a/src/gradientai/types/agents/evaluation_metrics/model_list_params.py b/src/gradientai/types/model_list_params.py similarity index 87% rename from src/gradientai/types/agents/evaluation_metrics/model_list_params.py rename to src/gradientai/types/model_list_params.py index a2fa066a..4abc1dc1 100644 --- a/src/gradientai/types/agents/evaluation_metrics/model_list_params.py +++ b/src/gradientai/types/model_list_params.py @@ -10,13 +10,13 @@ class ModelListParams(TypedDict, total=False): page: int - """Page number.""" + """page number.""" per_page: int - """Items per page.""" + """items per page.""" public_only: bool - """Only include models that are publicly available.""" + """only include models that are publicly available.""" usecases: List[ Literal[ @@ -29,7 +29,7 @@ class ModelListParams(TypedDict, total=False): "MODEL_USECASE_SERVERLESS", ] ] - """Include only models defined for the listed usecases. + """include only models defined for the listed usecases. - MODEL_USECASE_UNKNOWN: The use case of the model is unknown - MODEL_USECASE_AGENT: The model maybe used in an agent diff --git a/src/gradientai/types/model_list_response.py b/src/gradientai/types/model_list_response.py index 5915bdd1..47651759 100644 --- a/src/gradientai/types/model_list_response.py +++ b/src/gradientai/types/model_list_response.py @@ -1,28 +1,18 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List -from typing_extensions import Literal +from typing import List, Optional from .._models import BaseModel +from .api_model import APIModel +from .shared.api_meta import APIMeta +from .shared.api_links import APILinks -__all__ = ["ModelListResponse", "Data"] - - -class Data(BaseModel): - id: str - """The model identifier, which can be referenced in the API endpoints.""" - - created: int - """The Unix timestamp (in seconds) when the model was created.""" - - object: Literal["model"] - """The object type, which is always "model".""" - - owned_by: str - """The organization that owns the model.""" +__all__ = ["ModelListResponse"] class ModelListResponse(BaseModel): - data: List[Data] + links: Optional[APILinks] = None + + meta: Optional[APIMeta] = None - object: Literal["list"] + models: Optional[List[APIModel]] = None diff --git a/src/gradientai/types/model_retrieve_response.py b/src/gradientai/types/model_retrieve_response.py deleted file mode 100644 index dd5de863..00000000 --- a/src/gradientai/types/model_retrieve_response.py +++ /dev/null @@ -1,21 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from .._models import BaseModel - -__all__ = ["ModelRetrieveResponse"] - - -class ModelRetrieveResponse(BaseModel): - id: str - """The model identifier, which can be referenced in the API endpoints.""" - - created: int - """The Unix timestamp (in seconds) when the model was created.""" - - object: Literal["model"] - """The object type, which is always "model".""" - - owned_by: str - """The organization that owns the model.""" diff --git a/src/gradientai/types/models/providers/anthropic_create_params.py b/src/gradientai/types/models/providers/anthropic_create_params.py index c9fd6e85..b624121f 100644 --- a/src/gradientai/types/models/providers/anthropic_create_params.py +++ b/src/gradientai/types/models/providers/anthropic_create_params.py @@ -9,7 +9,5 @@ class AnthropicCreateParams(TypedDict, total=False): api_key: str - """Anthropic API key""" name: str - """Name of the key""" diff --git a/src/gradientai/types/models/providers/anthropic_create_response.py b/src/gradientai/types/models/providers/anthropic_create_response.py index 0fbe50bc..f0b8d2d1 100644 --- a/src/gradientai/types/models/providers/anthropic_create_response.py +++ b/src/gradientai/types/models/providers/anthropic_create_response.py @@ -10,4 +10,3 @@ class AnthropicCreateResponse(BaseModel): api_key_info: Optional[APIAnthropicAPIKeyInfo] = None - """Anthropic API Key Info""" diff --git a/src/gradientai/types/models/providers/anthropic_delete_response.py b/src/gradientai/types/models/providers/anthropic_delete_response.py index b4fdd978..a3842bbc 100644 --- a/src/gradientai/types/models/providers/anthropic_delete_response.py +++ b/src/gradientai/types/models/providers/anthropic_delete_response.py @@ -10,4 +10,3 @@ class AnthropicDeleteResponse(BaseModel): api_key_info: Optional[APIAnthropicAPIKeyInfo] = None - """Anthropic API Key Info""" diff --git a/src/gradientai/types/models/providers/anthropic_list_agents_params.py b/src/gradientai/types/models/providers/anthropic_list_agents_params.py index b3308b69..1a5b8229 100644 --- a/src/gradientai/types/models/providers/anthropic_list_agents_params.py +++ b/src/gradientai/types/models/providers/anthropic_list_agents_params.py @@ -9,7 +9,7 @@ class AnthropicListAgentsParams(TypedDict, total=False): page: int - """Page number.""" + """page number.""" per_page: int - """Items per page.""" + """items per page.""" diff --git a/src/gradientai/types/models/providers/anthropic_list_agents_response.py b/src/gradientai/types/models/providers/anthropic_list_agents_response.py index a1525275..6816f0db 100644 --- a/src/gradientai/types/models/providers/anthropic_list_agents_response.py +++ b/src/gradientai/types/models/providers/anthropic_list_agents_response.py @@ -15,10 +15,8 @@ class AnthropicListAgentsResponse(BaseModel): agents: Optional[List["APIAgent"]] = None links: Optional[APILinks] = None - """Links to other pages""" meta: Optional[APIMeta] = None - """Meta information about the data set""" from ...api_agent import APIAgent diff --git a/src/gradientai/types/models/providers/anthropic_list_params.py b/src/gradientai/types/models/providers/anthropic_list_params.py index ae1cca58..de8ce520 100644 --- a/src/gradientai/types/models/providers/anthropic_list_params.py +++ b/src/gradientai/types/models/providers/anthropic_list_params.py @@ -9,7 +9,7 @@ class AnthropicListParams(TypedDict, total=False): page: int - """Page number.""" + """page number.""" per_page: int - """Items per page.""" + """items per page.""" diff --git a/src/gradientai/types/models/providers/anthropic_list_response.py b/src/gradientai/types/models/providers/anthropic_list_response.py index 24d6547a..77999f5b 100644 --- a/src/gradientai/types/models/providers/anthropic_list_response.py +++ b/src/gradientai/types/models/providers/anthropic_list_response.py @@ -12,10 +12,7 @@ class AnthropicListResponse(BaseModel): api_key_infos: Optional[List[APIAnthropicAPIKeyInfo]] = None - """Api key infos""" links: Optional[APILinks] = None - """Links to other pages""" meta: Optional[APIMeta] = None - """Meta information about the data set""" diff --git a/src/gradientai/types/models/providers/anthropic_retrieve_response.py b/src/gradientai/types/models/providers/anthropic_retrieve_response.py index 61324b7d..7083b75f 100644 --- a/src/gradientai/types/models/providers/anthropic_retrieve_response.py +++ b/src/gradientai/types/models/providers/anthropic_retrieve_response.py @@ -10,4 +10,3 @@ class AnthropicRetrieveResponse(BaseModel): api_key_info: Optional[APIAnthropicAPIKeyInfo] = None - """Anthropic API Key Info""" diff --git a/src/gradientai/types/models/providers/anthropic_update_params.py b/src/gradientai/types/models/providers/anthropic_update_params.py index 865dc29c..7bb03045 100644 --- a/src/gradientai/types/models/providers/anthropic_update_params.py +++ b/src/gradientai/types/models/providers/anthropic_update_params.py @@ -11,10 +11,7 @@ class AnthropicUpdateParams(TypedDict, total=False): api_key: str - """Anthropic API key""" body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")] - """API key ID""" name: str - """Name of the key""" diff --git a/src/gradientai/types/models/providers/anthropic_update_response.py b/src/gradientai/types/models/providers/anthropic_update_response.py index 3a6daaea..d3b2911b 100644 --- a/src/gradientai/types/models/providers/anthropic_update_response.py +++ b/src/gradientai/types/models/providers/anthropic_update_response.py @@ -10,4 +10,3 @@ class AnthropicUpdateResponse(BaseModel): api_key_info: Optional[APIAnthropicAPIKeyInfo] = None - """Anthropic API Key Info""" diff --git a/src/gradientai/types/models/providers/openai_create_params.py b/src/gradientai/types/models/providers/openai_create_params.py index 8ed7f571..da655d75 100644 --- a/src/gradientai/types/models/providers/openai_create_params.py +++ b/src/gradientai/types/models/providers/openai_create_params.py @@ -9,7 +9,5 @@ class OpenAICreateParams(TypedDict, total=False): api_key: str - """OpenAI API key""" name: str - """Name of the key""" diff --git a/src/gradientai/types/models/providers/openai_create_response.py b/src/gradientai/types/models/providers/openai_create_response.py index b2e94766..4908a91a 100644 --- a/src/gradientai/types/models/providers/openai_create_response.py +++ b/src/gradientai/types/models/providers/openai_create_response.py @@ -10,4 +10,3 @@ class OpenAICreateResponse(BaseModel): api_key_info: Optional[APIOpenAIAPIKeyInfo] = None - """OpenAI API Key Info""" diff --git a/src/gradientai/types/models/providers/openai_delete_response.py b/src/gradientai/types/models/providers/openai_delete_response.py index e59c89fe..080a251f 100644 --- a/src/gradientai/types/models/providers/openai_delete_response.py +++ b/src/gradientai/types/models/providers/openai_delete_response.py @@ -10,4 +10,3 @@ class OpenAIDeleteResponse(BaseModel): api_key_info: Optional[APIOpenAIAPIKeyInfo] = None - """OpenAI API Key Info""" diff --git a/src/gradientai/types/models/providers/openai_list_params.py b/src/gradientai/types/models/providers/openai_list_params.py index 5677eeaf..e5b86b8d 100644 --- a/src/gradientai/types/models/providers/openai_list_params.py +++ b/src/gradientai/types/models/providers/openai_list_params.py @@ -9,7 +9,7 @@ class OpenAIListParams(TypedDict, total=False): page: int - """Page number.""" + """page number.""" per_page: int - """Items per page.""" + """items per page.""" diff --git a/src/gradientai/types/models/providers/openai_list_response.py b/src/gradientai/types/models/providers/openai_list_response.py index 698cd11e..edbd9fb4 100644 --- a/src/gradientai/types/models/providers/openai_list_response.py +++ b/src/gradientai/types/models/providers/openai_list_response.py @@ -12,10 +12,7 @@ class OpenAIListResponse(BaseModel): api_key_infos: Optional[List[APIOpenAIAPIKeyInfo]] = None - """Api key infos""" links: Optional[APILinks] = None - """Links to other pages""" meta: Optional[APIMeta] = None - """Meta information about the data set""" diff --git a/src/gradientai/types/models/providers/openai_retrieve_agents_params.py b/src/gradientai/types/models/providers/openai_retrieve_agents_params.py index 2db6d7a1..8a41eaf9 100644 --- a/src/gradientai/types/models/providers/openai_retrieve_agents_params.py +++ b/src/gradientai/types/models/providers/openai_retrieve_agents_params.py @@ -9,7 +9,7 @@ class OpenAIRetrieveAgentsParams(TypedDict, total=False): page: int - """Page number.""" + """page number.""" per_page: int - """Items per page.""" + """items per page.""" diff --git a/src/gradientai/types/models/providers/openai_retrieve_agents_response.py b/src/gradientai/types/models/providers/openai_retrieve_agents_response.py index 717a56cd..b3166636 100644 --- a/src/gradientai/types/models/providers/openai_retrieve_agents_response.py +++ b/src/gradientai/types/models/providers/openai_retrieve_agents_response.py @@ -15,10 +15,8 @@ class OpenAIRetrieveAgentsResponse(BaseModel): agents: Optional[List["APIAgent"]] = None links: Optional[APILinks] = None - """Links to other pages""" meta: Optional[APIMeta] = None - """Meta information about the data set""" from ...api_agent import APIAgent diff --git a/src/gradientai/types/models/providers/openai_retrieve_response.py b/src/gradientai/types/models/providers/openai_retrieve_response.py index 0f382073..ef23b966 100644 --- a/src/gradientai/types/models/providers/openai_retrieve_response.py +++ b/src/gradientai/types/models/providers/openai_retrieve_response.py @@ -10,4 +10,3 @@ class OpenAIRetrieveResponse(BaseModel): api_key_info: Optional[APIOpenAIAPIKeyInfo] = None - """OpenAI API Key Info""" diff --git a/src/gradientai/types/models/providers/openai_update_params.py b/src/gradientai/types/models/providers/openai_update_params.py index 9b99495e..ab5d02cf 100644 --- a/src/gradientai/types/models/providers/openai_update_params.py +++ b/src/gradientai/types/models/providers/openai_update_params.py @@ -11,10 +11,7 @@ class OpenAIUpdateParams(TypedDict, total=False): api_key: str - """OpenAI API key""" body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")] - """API key ID""" name: str - """Name of the key""" diff --git a/src/gradientai/types/models/providers/openai_update_response.py b/src/gradientai/types/models/providers/openai_update_response.py index ec7a1c94..9bb80518 100644 --- a/src/gradientai/types/models/providers/openai_update_response.py +++ b/src/gradientai/types/models/providers/openai_update_response.py @@ -10,4 +10,3 @@ class OpenAIUpdateResponse(BaseModel): api_key_info: Optional[APIOpenAIAPIKeyInfo] = None - """OpenAI API Key Info""" diff --git a/src/gradientai/types/region_list_params.py b/src/gradientai/types/region_list_params.py index 4fef37b3..1db0ad50 100644 --- a/src/gradientai/types/region_list_params.py +++ b/src/gradientai/types/region_list_params.py @@ -8,8 +8,8 @@ class RegionListParams(TypedDict, total=False): - page: int - """Which 'page' of paginated results to return.""" + serves_batch: bool + """include datacenters that are capable of running batch jobs.""" - per_page: int - """Number of items returned per page""" + serves_inference: bool + """include datacenters that serve inference.""" diff --git a/src/gradientai/types/region_list_response.py b/src/gradientai/types/region_list_response.py index f1bf4c69..0f955b36 100644 --- a/src/gradientai/types/region_list_response.py +++ b/src/gradientai/types/region_list_response.py @@ -3,17 +3,21 @@ from typing import List, Optional from .._models import BaseModel -from .shared.region import Region -from .shared.page_links import PageLinks -from .shared.meta_properties import MetaProperties -__all__ = ["RegionListResponse"] +__all__ = ["RegionListResponse", "Region"] -class RegionListResponse(BaseModel): - meta: MetaProperties - """Information about the response itself.""" +class Region(BaseModel): + inference_url: Optional[str] = None + + region: Optional[str] = None + + serves_batch: Optional[bool] = None - regions: List[Region] + serves_inference: Optional[bool] = None - links: Optional[PageLinks] = None + stream_inference_url: Optional[str] = None + + +class RegionListResponse(BaseModel): + regions: Optional[List[Region]] = None diff --git a/src/gradientai/types/shared/__init__.py b/src/gradientai/types/shared/__init__.py index 881ec31a..9fdd7605 100644 --- a/src/gradientai/types/shared/__init__.py +++ b/src/gradientai/types/shared/__init__.py @@ -1,32 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from .size import Size as Size -from .image import Image as Image -from .action import Action as Action -from .kernel import Kernel as Kernel -from .region import Region as Region -from .droplet import Droplet as Droplet from .api_meta import APIMeta as APIMeta -from .gpu_info import GPUInfo as GPUInfo from .api_links import APILinks as APILinks -from .disk_info import DiskInfo as DiskInfo -from .snapshots import Snapshots as Snapshots -from .network_v4 import NetworkV4 as NetworkV4 -from .network_v6 import NetworkV6 as NetworkV6 -from .page_links import PageLinks as PageLinks -from .action_link import ActionLink as ActionLink -from .vpc_peering import VpcPeering as VpcPeering -from .subscription import Subscription as Subscription -from .forward_links import ForwardLinks as ForwardLinks -from .backward_links import BackwardLinks as BackwardLinks -from .repository_tag import RepositoryTag as RepositoryTag -from .meta_properties import MetaProperties as MetaProperties -from .repository_blob import RepositoryBlob as RepositoryBlob -from .completion_usage import CompletionUsage as CompletionUsage -from .garbage_collection import GarbageCollection as GarbageCollection -from .repository_manifest import RepositoryManifest as RepositoryManifest -from .firewall_rule_target import FirewallRuleTarget as FirewallRuleTarget from .chat_completion_chunk import ChatCompletionChunk as ChatCompletionChunk -from .subscription_tier_base import SubscriptionTierBase as SubscriptionTierBase -from .droplet_next_backup_window import DropletNextBackupWindow as DropletNextBackupWindow from .chat_completion_token_logprob import ChatCompletionTokenLogprob as ChatCompletionTokenLogprob diff --git a/src/gradientai/types/shared/action.py b/src/gradientai/types/shared/action.py deleted file mode 100644 index 2b9fbf4e..00000000 --- a/src/gradientai/types/shared/action.py +++ /dev/null @@ -1,51 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional -from datetime import datetime -from typing_extensions import Literal - -from .region import Region -from ..._models import BaseModel - -__all__ = ["Action"] - - -class Action(BaseModel): - id: Optional[int] = None - """A unique numeric ID that can be used to identify and reference an action.""" - - completed_at: Optional[datetime] = None - """ - A time value given in ISO8601 combined date and time format that represents when - the action was completed. - """ - - region: Optional[Region] = None - - region_slug: Optional[str] = None - """A human-readable string that is used as a unique identifier for each region.""" - - resource_id: Optional[int] = None - """A unique identifier for the resource that the action is associated with.""" - - resource_type: Optional[str] = None - """The type of resource that the action is associated with.""" - - started_at: Optional[datetime] = None - """ - A time value given in ISO8601 combined date and time format that represents when - the action was initiated. - """ - - status: Optional[Literal["in-progress", "completed", "errored"]] = None - """The current status of the action. - - This can be "in-progress", "completed", or "errored". - """ - - type: Optional[str] = None - """This is the type of action that the object represents. - - For example, this could be "transfer" to represent the state of an image - transfer action. - """ diff --git a/src/gradientai/types/shared/action_link.py b/src/gradientai/types/shared/action_link.py deleted file mode 100644 index 78aec9ff..00000000 --- a/src/gradientai/types/shared/action_link.py +++ /dev/null @@ -1,18 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel - -__all__ = ["ActionLink"] - - -class ActionLink(BaseModel): - id: Optional[int] = None - """A unique numeric ID that can be used to identify and reference an action.""" - - href: Optional[str] = None - """A URL that can be used to access the action.""" - - rel: Optional[str] = None - """A string specifying the type of the related action.""" diff --git a/src/gradientai/types/shared/api_links.py b/src/gradientai/types/shared/api_links.py index 24b19cfe..b37113f0 100644 --- a/src/gradientai/types/shared/api_links.py +++ b/src/gradientai/types/shared/api_links.py @@ -9,18 +9,13 @@ class Pages(BaseModel): first: Optional[str] = None - """First page""" last: Optional[str] = None - """Last page""" next: Optional[str] = None - """Next page""" previous: Optional[str] = None - """Previous page""" class APILinks(BaseModel): pages: Optional[Pages] = None - """Information about how to reach other pages""" diff --git a/src/gradientai/types/shared/api_meta.py b/src/gradientai/types/shared/api_meta.py index dc267527..9191812c 100644 --- a/src/gradientai/types/shared/api_meta.py +++ b/src/gradientai/types/shared/api_meta.py @@ -9,10 +9,7 @@ class APIMeta(BaseModel): page: Optional[int] = None - """The current page""" pages: Optional[int] = None - """Total number of pages""" total: Optional[int] = None - """Total amount of items over all pages""" diff --git a/src/gradientai/types/shared/backward_links.py b/src/gradientai/types/shared/backward_links.py deleted file mode 100644 index 502fefef..00000000 --- a/src/gradientai/types/shared/backward_links.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel - -__all__ = ["BackwardLinks"] - - -class BackwardLinks(BaseModel): - first: Optional[str] = None - """URI of the first page of the results.""" - - prev: Optional[str] = None - """URI of the previous page of the results.""" diff --git a/src/gradientai/types/shared/chat_completion_chunk.py b/src/gradientai/types/shared/chat_completion_chunk.py index 4dd587f9..4d45ef8d 100644 --- a/src/gradientai/types/shared/chat_completion_chunk.py +++ b/src/gradientai/types/shared/chat_completion_chunk.py @@ -4,43 +4,9 @@ from typing_extensions import Literal from ..._models import BaseModel -from .completion_usage import CompletionUsage from .chat_completion_token_logprob import ChatCompletionTokenLogprob -__all__ = [ - "ChatCompletionChunk", - "Choice", - "ChoiceDelta", - "ChoiceDeltaToolCall", - "ChoiceDeltaToolCallFunction", - "ChoiceLogprobs", -] - - -class ChoiceDeltaToolCallFunction(BaseModel): - arguments: Optional[str] = None - """ - The arguments to call the function with, as generated by the model in JSON - format. Note that the model does not always generate valid JSON, and may - hallucinate parameters not defined by your function schema. Validate the - arguments in your code before calling your function. - """ - - name: Optional[str] = None - """The name of the function to call.""" - - -class ChoiceDeltaToolCall(BaseModel): - index: int - - id: Optional[str] = None - """The ID of the tool call.""" - - function: Optional[ChoiceDeltaToolCallFunction] = None - """A chunk of a function that the model called.""" - - type: Optional[Literal["function"]] = None - """The type of the tool. Currently, only `function` is supported.""" +__all__ = ["ChatCompletionChunk", "Choice", "ChoiceDelta", "ChoiceLogprobs", "Usage"] class ChoiceDelta(BaseModel): @@ -53,8 +19,6 @@ class ChoiceDelta(BaseModel): role: Optional[Literal["developer", "user", "assistant"]] = None """The role of the author of this message.""" - tool_calls: Optional[List[ChoiceDeltaToolCall]] = None - class ChoiceLogprobs(BaseModel): content: Optional[List[ChatCompletionTokenLogprob]] = None @@ -68,12 +32,12 @@ class Choice(BaseModel): delta: ChoiceDelta """A chat completion delta generated by streamed model responses.""" - finish_reason: Optional[Literal["stop", "length", "tool_calls"]] = None + finish_reason: Optional[Literal["stop", "length"]] = None """The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, or `length` if the maximum number of tokens specified in the request - was reached, `tool_calls` if the model called a tool. + was reached """ index: int @@ -83,6 +47,17 @@ class Choice(BaseModel): """Log probability information for the choice.""" +class Usage(BaseModel): + completion_tokens: int + """Number of tokens in the generated completion.""" + + prompt_tokens: int + """Number of tokens in the prompt.""" + + total_tokens: int + """Total number of tokens used in the request (prompt + completion).""" + + class ChatCompletionChunk(BaseModel): id: str """A unique identifier for the chat completion. Each chunk has the same ID.""" @@ -106,7 +81,7 @@ class ChatCompletionChunk(BaseModel): object: Literal["chat.completion.chunk"] """The object type, which is always `chat.completion.chunk`.""" - usage: Optional[CompletionUsage] = None + usage: Optional[Usage] = None """ An optional field that will only be present when you set `stream_options: {"include_usage": true}` in your request. When present, it diff --git a/src/gradientai/types/shared/completion_usage.py b/src/gradientai/types/shared/completion_usage.py deleted file mode 100644 index a2012eef..00000000 --- a/src/gradientai/types/shared/completion_usage.py +++ /dev/null @@ -1,16 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from ..._models import BaseModel - -__all__ = ["CompletionUsage"] - - -class CompletionUsage(BaseModel): - completion_tokens: int - """Number of tokens in the generated completion.""" - - prompt_tokens: int - """Number of tokens in the prompt.""" - - total_tokens: int - """Total number of tokens used in the request (prompt + completion).""" diff --git a/src/gradientai/types/shared/disk_info.py b/src/gradientai/types/shared/disk_info.py deleted file mode 100644 index 3c5c4911..00000000 --- a/src/gradientai/types/shared/disk_info.py +++ /dev/null @@ -1,27 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["DiskInfo", "Size"] - - -class Size(BaseModel): - amount: Optional[int] = None - """The amount of space allocated to the disk.""" - - unit: Optional[str] = None - """The unit of measure for the disk size.""" - - -class DiskInfo(BaseModel): - size: Optional[Size] = None - - type: Optional[Literal["local", "scratch"]] = None - """The type of disk. - - All Droplets contain a `local` disk. Additionally, GPU Droplets can also have a - `scratch` disk for non-persistent data. - """ diff --git a/src/gradientai/types/shared/droplet.py b/src/gradientai/types/shared/droplet.py deleted file mode 100644 index 9d2bb17c..00000000 --- a/src/gradientai/types/shared/droplet.py +++ /dev/null @@ -1,143 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional -from datetime import datetime -from typing_extensions import Literal - -from .size import Size -from .image import Image -from .kernel import Kernel -from .region import Region -from .gpu_info import GPUInfo -from ..._models import BaseModel -from .disk_info import DiskInfo -from .network_v4 import NetworkV4 -from .network_v6 import NetworkV6 -from .droplet_next_backup_window import DropletNextBackupWindow - -__all__ = ["Droplet", "Networks"] - - -class Networks(BaseModel): - v4: Optional[List[NetworkV4]] = None - - v6: Optional[List[NetworkV6]] = None - - -class Droplet(BaseModel): - id: int - """A unique identifier for each Droplet instance. - - This is automatically generated upon Droplet creation. - """ - - backup_ids: List[int] - """ - An array of backup IDs of any backups that have been taken of the Droplet - instance. Droplet backups are enabled at the time of the instance creation. - Requires `image:read` scope. - """ - - created_at: datetime - """ - A time value given in ISO8601 combined date and time format that represents when - the Droplet was created. - """ - - disk: int - """The size of the Droplet's disk in gigabytes.""" - - features: List[str] - """An array of features enabled on this Droplet.""" - - image: Image - """The Droplet's image. Requires `image:read` scope.""" - - locked: bool - """ - A boolean value indicating whether the Droplet has been locked, preventing - actions by users. - """ - - memory: int - """Memory of the Droplet in megabytes.""" - - name: str - """The human-readable name set for the Droplet instance.""" - - networks: Networks - """The details of the network that are configured for the Droplet instance. - - This is an object that contains keys for IPv4 and IPv6. The value of each of - these is an array that contains objects describing an individual IP resource - allocated to the Droplet. These will define attributes like the IP address, - netmask, and gateway of the specific network depending on the type of network it - is. - """ - - next_backup_window: Optional[DropletNextBackupWindow] = None - """ - The details of the Droplet's backups feature, if backups are configured for the - Droplet. This object contains keys for the start and end times of the window - during which the backup will start. - """ - - region: Region - - size: Size - - size_slug: str - """The unique slug identifier for the size of this Droplet.""" - - snapshot_ids: List[int] - """ - An array of snapshot IDs of any snapshots created from the Droplet instance. - Requires `image:read` scope. - """ - - status: Literal["new", "active", "off", "archive"] - """A status string indicating the state of the Droplet instance. - - This may be "new", "active", "off", or "archive". - """ - - tags: List[str] - """An array of Tags the Droplet has been tagged with. Requires `tag:read` scope.""" - - vcpus: int - """The number of virtual CPUs.""" - - volume_ids: List[str] - """ - A flat array including the unique identifier for each Block Storage volume - attached to the Droplet. Requires `block_storage:read` scope. - """ - - disk_info: Optional[List[DiskInfo]] = None - """ - An array of objects containing information about the disks available to the - Droplet. - """ - - gpu_info: Optional[GPUInfo] = None - """ - An object containing information about the GPU capabilities of Droplets created - with this size. - """ - - kernel: Optional[Kernel] = None - """ - **Note**: All Droplets created after March 2017 use internal kernels by default. - These Droplets will have this attribute set to `null`. - - The current - [kernel](https://docs.digitalocean.com/products/droplets/how-to/kernel/) for - Droplets with externally managed kernels. This will initially be set to the - kernel of the base image when the Droplet is created. - """ - - vpc_uuid: Optional[str] = None - """ - A string specifying the UUID of the VPC to which the Droplet is assigned. - Requires `vpc:read` scope. - """ diff --git a/src/gradientai/types/shared/droplet_next_backup_window.py b/src/gradientai/types/shared/droplet_next_backup_window.py deleted file mode 100644 index 81d07be6..00000000 --- a/src/gradientai/types/shared/droplet_next_backup_window.py +++ /dev/null @@ -1,22 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional -from datetime import datetime - -from ..._models import BaseModel - -__all__ = ["DropletNextBackupWindow"] - - -class DropletNextBackupWindow(BaseModel): - end: Optional[datetime] = None - """ - A time value given in ISO8601 combined date and time format specifying the end - of the Droplet's backup window. - """ - - start: Optional[datetime] = None - """ - A time value given in ISO8601 combined date and time format specifying the start - of the Droplet's backup window. - """ diff --git a/src/gradientai/types/shared/firewall_rule_target.py b/src/gradientai/types/shared/firewall_rule_target.py deleted file mode 100644 index 11f61065..00000000 --- a/src/gradientai/types/shared/firewall_rule_target.py +++ /dev/null @@ -1,41 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from ..._models import BaseModel - -__all__ = ["FirewallRuleTarget"] - - -class FirewallRuleTarget(BaseModel): - addresses: Optional[List[str]] = None - """ - An array of strings containing the IPv4 addresses, IPv6 addresses, IPv4 CIDRs, - and/or IPv6 CIDRs to which the firewall will allow traffic. - """ - - droplet_ids: Optional[List[int]] = None - """ - An array containing the IDs of the Droplets to which the firewall will allow - traffic. - """ - - kubernetes_ids: Optional[List[str]] = None - """ - An array containing the IDs of the Kubernetes clusters to which the firewall - will allow traffic. - """ - - load_balancer_uids: Optional[List[str]] = None - """ - An array containing the IDs of the load balancers to which the firewall will - allow traffic. - """ - - tags: Optional[List[str]] = None - """A flat array of tag names as strings to be applied to the resource. - - Tag names must exist in order to be referenced in a request. - - Requires `tag:create` and `tag:read` scopes. - """ diff --git a/src/gradientai/types/shared/forward_links.py b/src/gradientai/types/shared/forward_links.py deleted file mode 100644 index 30d46985..00000000 --- a/src/gradientai/types/shared/forward_links.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel - -__all__ = ["ForwardLinks"] - - -class ForwardLinks(BaseModel): - last: Optional[str] = None - """URI of the last page of the results.""" - - next: Optional[str] = None - """URI of the next page of the results.""" diff --git a/src/gradientai/types/shared/garbage_collection.py b/src/gradientai/types/shared/garbage_collection.py deleted file mode 100644 index f1f7f4cd..00000000 --- a/src/gradientai/types/shared/garbage_collection.py +++ /dev/null @@ -1,43 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional -from datetime import datetime -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["GarbageCollection"] - - -class GarbageCollection(BaseModel): - blobs_deleted: Optional[int] = None - """The number of blobs deleted as a result of this garbage collection.""" - - created_at: Optional[datetime] = None - """The time the garbage collection was created.""" - - freed_bytes: Optional[int] = None - """The number of bytes freed as a result of this garbage collection.""" - - registry_name: Optional[str] = None - """The name of the container registry.""" - - status: Optional[ - Literal[ - "requested", - "waiting for write JWTs to expire", - "scanning manifests", - "deleting unreferenced blobs", - "cancelling", - "failed", - "succeeded", - "cancelled", - ] - ] = None - """The current status of this garbage collection.""" - - updated_at: Optional[datetime] = None - """The time the garbage collection was last updated.""" - - uuid: Optional[str] = None - """A string specifying the UUID of the garbage collection.""" diff --git a/src/gradientai/types/shared/gpu_info.py b/src/gradientai/types/shared/gpu_info.py deleted file mode 100644 index a285dd23..00000000 --- a/src/gradientai/types/shared/gpu_info.py +++ /dev/null @@ -1,25 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel - -__all__ = ["GPUInfo", "Vram"] - - -class Vram(BaseModel): - amount: Optional[int] = None - """The amount of VRAM allocated to the GPU.""" - - unit: Optional[str] = None - """The unit of measure for the VRAM.""" - - -class GPUInfo(BaseModel): - count: Optional[int] = None - """The number of GPUs allocated to the Droplet.""" - - model: Optional[str] = None - """The model of the GPU.""" - - vram: Optional[Vram] = None diff --git a/src/gradientai/types/shared/image.py b/src/gradientai/types/shared/image.py deleted file mode 100644 index d8a7acde..00000000 --- a/src/gradientai/types/shared/image.py +++ /dev/null @@ -1,131 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional -from datetime import datetime -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["Image"] - - -class Image(BaseModel): - id: Optional[int] = None - """A unique number that can be used to identify and reference a specific image.""" - - created_at: Optional[datetime] = None - """ - A time value given in ISO8601 combined date and time format that represents when - the image was created. - """ - - description: Optional[str] = None - """An optional free-form text field to describe an image.""" - - distribution: Optional[ - Literal[ - "Arch Linux", - "CentOS", - "CoreOS", - "Debian", - "Fedora", - "Fedora Atomic", - "FreeBSD", - "Gentoo", - "openSUSE", - "RancherOS", - "Rocky Linux", - "Ubuntu", - "Unknown", - ] - ] = None - """The name of a custom image's distribution. - - Currently, the valid values are `Arch Linux`, `CentOS`, `CoreOS`, `Debian`, - `Fedora`, `Fedora Atomic`, `FreeBSD`, `Gentoo`, `openSUSE`, `RancherOS`, - `Rocky Linux`, `Ubuntu`, and `Unknown`. Any other value will be accepted but - ignored, and `Unknown` will be used in its place. - """ - - error_message: Optional[str] = None - """ - A string containing information about errors that may occur when importing a - custom image. - """ - - min_disk_size: Optional[int] = None - """The minimum disk size in GB required for a Droplet to use this image.""" - - name: Optional[str] = None - """The display name that has been given to an image. - - This is what is shown in the control panel and is generally a descriptive title - for the image in question. - """ - - public: Optional[bool] = None - """ - This is a boolean value that indicates whether the image in question is public - or not. An image that is public is available to all accounts. A non-public image - is only accessible from your account. - """ - - regions: Optional[ - List[ - Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ] - ] - ] = None - """This attribute is an array of the regions that the image is available in. - - The regions are represented by their identifying slug values. - """ - - size_gigabytes: Optional[float] = None - """The size of the image in gigabytes.""" - - slug: Optional[str] = None - """ - A uniquely identifying string that is associated with each of the - DigitalOcean-provided public images. These can be used to reference a public - image as an alternative to the numeric id. - """ - - status: Optional[Literal["NEW", "available", "pending", "deleted", "retired"]] = None - """A status string indicating the state of a custom image. - - This may be `NEW`, `available`, `pending`, `deleted`, or `retired`. - """ - - tags: Optional[List[str]] = None - """A flat array of tag names as strings to be applied to the resource. - - Tag names may be for either existing or new tags. - - Requires `tag:create` scope. - """ - - type: Optional[Literal["base", "snapshot", "backup", "custom", "admin"]] = None - """Describes the kind of image. - - It may be one of `base`, `snapshot`, `backup`, `custom`, or `admin`. - Respectively, this specifies whether an image is a DigitalOcean base OS image, - user-generated Droplet snapshot, automatically created Droplet backup, - user-provided virtual machine image, or an image used for DigitalOcean managed - resources (e.g. DOKS worker nodes). - """ diff --git a/src/gradientai/types/shared/kernel.py b/src/gradientai/types/shared/kernel.py deleted file mode 100644 index 78a63427..00000000 --- a/src/gradientai/types/shared/kernel.py +++ /dev/null @@ -1,25 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel - -__all__ = ["Kernel"] - - -class Kernel(BaseModel): - id: Optional[int] = None - """A unique number used to identify and reference a specific kernel.""" - - name: Optional[str] = None - """The display name of the kernel. - - This is shown in the web UI and is generally a descriptive title for the kernel - in question. - """ - - version: Optional[str] = None - """ - A standard kernel version string representing the version, patch, and release - information. - """ diff --git a/src/gradientai/types/shared/meta_properties.py b/src/gradientai/types/shared/meta_properties.py deleted file mode 100644 index a78a64d6..00000000 --- a/src/gradientai/types/shared/meta_properties.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel - -__all__ = ["MetaProperties"] - - -class MetaProperties(BaseModel): - total: Optional[int] = None - """Number of objects returned by the request.""" diff --git a/src/gradientai/types/shared/network_v4.py b/src/gradientai/types/shared/network_v4.py deleted file mode 100644 index bbf8490a..00000000 --- a/src/gradientai/types/shared/network_v4.py +++ /dev/null @@ -1,26 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["NetworkV4"] - - -class NetworkV4(BaseModel): - gateway: Optional[str] = None - """The gateway of the specified IPv4 network interface. - - For private interfaces, a gateway is not provided. This is denoted by returning - `nil` as its value. - """ - - ip_address: Optional[str] = None - """The IP address of the IPv4 network interface.""" - - netmask: Optional[str] = None - """The netmask of the IPv4 network interface.""" - - type: Optional[Literal["public", "private"]] = None - """The type of the IPv4 network interface.""" diff --git a/src/gradientai/types/shared/network_v6.py b/src/gradientai/types/shared/network_v6.py deleted file mode 100644 index a3eb6b42..00000000 --- a/src/gradientai/types/shared/network_v6.py +++ /dev/null @@ -1,25 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["NetworkV6"] - - -class NetworkV6(BaseModel): - gateway: Optional[str] = None - """The gateway of the specified IPv6 network interface.""" - - ip_address: Optional[str] = None - """The IP address of the IPv6 network interface.""" - - netmask: Optional[int] = None - """The netmask of the IPv6 network interface.""" - - type: Optional[Literal["public"]] = None - """The type of the IPv6 network interface. - - **Note**: IPv6 private networking is not currently supported. - """ diff --git a/src/gradientai/types/shared/page_links.py b/src/gradientai/types/shared/page_links.py deleted file mode 100644 index bfceabef..00000000 --- a/src/gradientai/types/shared/page_links.py +++ /dev/null @@ -1,16 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Union, Optional -from typing_extensions import TypeAlias - -from ..._models import BaseModel -from .forward_links import ForwardLinks -from .backward_links import BackwardLinks - -__all__ = ["PageLinks", "Pages"] - -Pages: TypeAlias = Union[ForwardLinks, BackwardLinks, object] - - -class PageLinks(BaseModel): - pages: Optional[Pages] = None diff --git a/src/gradientai/types/shared/region.py b/src/gradientai/types/shared/region.py deleted file mode 100644 index d2fe7c51..00000000 --- a/src/gradientai/types/shared/region.py +++ /dev/null @@ -1,36 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List - -from ..._models import BaseModel - -__all__ = ["Region"] - - -class Region(BaseModel): - available: bool - """ - This is a boolean value that represents whether new Droplets can be created in - this region. - """ - - features: List[str] - """ - This attribute is set to an array which contains features available in this - region - """ - - name: str - """The display name of the region. - - This will be a full name that is used in the control panel and other interfaces. - """ - - sizes: List[str] - """ - This attribute is set to an array which contains the identifying slugs for the - sizes available in this region. sizes:read is required to view. - """ - - slug: str - """A human-readable string that is used as a unique identifier for each region.""" diff --git a/src/gradientai/types/shared/repository_blob.py b/src/gradientai/types/shared/repository_blob.py deleted file mode 100644 index aae5702b..00000000 --- a/src/gradientai/types/shared/repository_blob.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel - -__all__ = ["RepositoryBlob"] - - -class RepositoryBlob(BaseModel): - compressed_size_bytes: Optional[int] = None - """The compressed size of the blob in bytes.""" - - digest: Optional[str] = None - """The digest of the blob""" diff --git a/src/gradientai/types/shared/repository_manifest.py b/src/gradientai/types/shared/repository_manifest.py deleted file mode 100644 index babbbea2..00000000 --- a/src/gradientai/types/shared/repository_manifest.py +++ /dev/null @@ -1,38 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional -from datetime import datetime - -from ..._models import BaseModel -from .repository_blob import RepositoryBlob - -__all__ = ["RepositoryManifest"] - - -class RepositoryManifest(BaseModel): - blobs: Optional[List[RepositoryBlob]] = None - """All blobs associated with this manifest""" - - compressed_size_bytes: Optional[int] = None - """The compressed size of the manifest in bytes.""" - - digest: Optional[str] = None - """The manifest digest""" - - registry_name: Optional[str] = None - """The name of the container registry.""" - - repository: Optional[str] = None - """The name of the repository.""" - - size_bytes: Optional[int] = None - """ - The uncompressed size of the manifest in bytes (this size is calculated - asynchronously so it may not be immediately available). - """ - - tags: Optional[List[str]] = None - """All tags associated with this manifest""" - - updated_at: Optional[datetime] = None - """The time the manifest was last updated.""" diff --git a/src/gradientai/types/shared/repository_tag.py b/src/gradientai/types/shared/repository_tag.py deleted file mode 100644 index a06ec6bb..00000000 --- a/src/gradientai/types/shared/repository_tag.py +++ /dev/null @@ -1,34 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional -from datetime import datetime - -from ..._models import BaseModel - -__all__ = ["RepositoryTag"] - - -class RepositoryTag(BaseModel): - compressed_size_bytes: Optional[int] = None - """The compressed size of the tag in bytes.""" - - manifest_digest: Optional[str] = None - """The digest of the manifest associated with the tag.""" - - registry_name: Optional[str] = None - """The name of the container registry.""" - - repository: Optional[str] = None - """The name of the repository.""" - - size_bytes: Optional[int] = None - """ - The uncompressed size of the tag in bytes (this size is calculated - asynchronously so it may not be immediately available). - """ - - tag: Optional[str] = None - """The name of the tag.""" - - updated_at: Optional[datetime] = None - """The time the tag was last updated.""" diff --git a/src/gradientai/types/shared/size.py b/src/gradientai/types/shared/size.py deleted file mode 100644 index 42b0b41f..00000000 --- a/src/gradientai/types/shared/size.py +++ /dev/null @@ -1,79 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from .gpu_info import GPUInfo -from ..._models import BaseModel -from .disk_info import DiskInfo - -__all__ = ["Size"] - - -class Size(BaseModel): - available: bool - """ - This is a boolean value that represents whether new Droplets can be created with - this size. - """ - - description: str - """A string describing the class of Droplets created from this size. - - For example: Basic, General Purpose, CPU-Optimized, Memory-Optimized, or - Storage-Optimized. - """ - - disk: int - """The amount of disk space set aside for Droplets of this size. - - The value is represented in gigabytes. - """ - - memory: int - """The amount of RAM allocated to Droplets created of this size. - - The value is represented in megabytes. - """ - - price_hourly: float - """This describes the price of the Droplet size as measured hourly. - - The value is measured in US dollars. - """ - - price_monthly: float - """ - This attribute describes the monthly cost of this Droplet size if the Droplet is - kept for an entire month. The value is measured in US dollars. - """ - - regions: List[str] - """ - An array containing the region slugs where this size is available for Droplet - creates. regions:read is required to view. - """ - - slug: str - """A human-readable string that is used to uniquely identify each size.""" - - transfer: float - """ - The amount of transfer bandwidth that is available for Droplets created in this - size. This only counts traffic on the public interface. The value is given in - terabytes. - """ - - vcpus: int - """The number of CPUs allocated to Droplets of this size.""" - - disk_info: Optional[List[DiskInfo]] = None - """ - An array of objects containing information about the disks available to Droplets - created with this size. - """ - - gpu_info: Optional[GPUInfo] = None - """ - An object containing information about the GPU capabilities of Droplets created - with this size. - """ diff --git a/src/gradientai/types/shared/snapshots.py b/src/gradientai/types/shared/snapshots.py deleted file mode 100644 index 940b58c8..00000000 --- a/src/gradientai/types/shared/snapshots.py +++ /dev/null @@ -1,47 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional -from datetime import datetime -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["Snapshots"] - - -class Snapshots(BaseModel): - id: str - """The unique identifier for the snapshot.""" - - created_at: datetime - """ - A time value given in ISO8601 combined date and time format that represents when - the snapshot was created. - """ - - min_disk_size: int - """The minimum size in GB required for a volume or Droplet to use this snapshot.""" - - name: str - """A human-readable name for the snapshot.""" - - regions: List[str] - """An array of the regions that the snapshot is available in. - - The regions are represented by their identifying slug values. - """ - - resource_id: str - """The unique identifier for the resource that the snapshot originated from.""" - - resource_type: Literal["droplet", "volume"] - """The type of resource that the snapshot originated from.""" - - size_gigabytes: float - """The billable size of the snapshot in gigabytes.""" - - tags: Optional[List[str]] = None - """An array of Tags the snapshot has been tagged with. - - Requires `tag:read` scope. - """ diff --git a/src/gradientai/types/shared/subscription.py b/src/gradientai/types/shared/subscription.py deleted file mode 100644 index 4d77a9b8..00000000 --- a/src/gradientai/types/shared/subscription.py +++ /dev/null @@ -1,19 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional -from datetime import datetime - -from ..._models import BaseModel -from .subscription_tier_base import SubscriptionTierBase - -__all__ = ["Subscription"] - - -class Subscription(BaseModel): - created_at: Optional[datetime] = None - """The time at which the subscription was created.""" - - tier: Optional[SubscriptionTierBase] = None - - updated_at: Optional[datetime] = None - """The time at which the subscription was last updated.""" diff --git a/src/gradientai/types/shared/subscription_tier_base.py b/src/gradientai/types/shared/subscription_tier_base.py deleted file mode 100644 index 65e1a316..00000000 --- a/src/gradientai/types/shared/subscription_tier_base.py +++ /dev/null @@ -1,44 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel - -__all__ = ["SubscriptionTierBase"] - - -class SubscriptionTierBase(BaseModel): - allow_storage_overage: Optional[bool] = None - """ - A boolean indicating whether the subscription tier supports additional storage - above what is included in the base plan at an additional cost per GiB used. - """ - - included_bandwidth_bytes: Optional[int] = None - """ - The amount of outbound data transfer included in the subscription tier in bytes. - """ - - included_repositories: Optional[int] = None - """The number of repositories included in the subscription tier. - - `0` indicates that the subscription tier includes unlimited repositories. - """ - - included_storage_bytes: Optional[int] = None - """The amount of storage included in the subscription tier in bytes.""" - - monthly_price_in_cents: Optional[int] = None - """The monthly cost of the subscription tier in cents.""" - - name: Optional[str] = None - """The name of the subscription tier.""" - - slug: Optional[str] = None - """The slug identifier of the subscription tier.""" - - storage_overage_price_in_cents: Optional[int] = None - """ - The price paid in cents per GiB for additional storage beyond what is included - in the subscription plan. - """ diff --git a/src/gradientai/types/shared/vpc_peering.py b/src/gradientai/types/shared/vpc_peering.py deleted file mode 100644 index ef674e23..00000000 --- a/src/gradientai/types/shared/vpc_peering.py +++ /dev/null @@ -1,30 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional -from datetime import datetime -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["VpcPeering"] - - -class VpcPeering(BaseModel): - id: Optional[str] = None - """A unique ID that can be used to identify and reference the VPC peering.""" - - created_at: Optional[datetime] = None - """A time value given in ISO8601 combined date and time format.""" - - name: Optional[str] = None - """The name of the VPC peering. - - Must be unique within the team and may only contain alphanumeric characters and - dashes. - """ - - status: Optional[Literal["PROVISIONING", "ACTIVE", "DELETING"]] = None - """The current status of the VPC peering.""" - - vpc_ids: Optional[List[str]] = None - """An array of the two peered VPCs IDs.""" diff --git a/src/gradientai/types/shared_params/__init__.py b/src/gradientai/types/shared_params/__init__.py deleted file mode 100644 index ccdec8fd..00000000 --- a/src/gradientai/types/shared_params/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from .firewall_rule_target import FirewallRuleTarget as FirewallRuleTarget diff --git a/src/gradientai/types/shared_params/firewall_rule_target.py b/src/gradientai/types/shared_params/firewall_rule_target.py deleted file mode 100644 index 49a5f75c..00000000 --- a/src/gradientai/types/shared_params/firewall_rule_target.py +++ /dev/null @@ -1,42 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List, Iterable, Optional -from typing_extensions import TypedDict - -__all__ = ["FirewallRuleTarget"] - - -class FirewallRuleTarget(TypedDict, total=False): - addresses: List[str] - """ - An array of strings containing the IPv4 addresses, IPv6 addresses, IPv4 CIDRs, - and/or IPv6 CIDRs to which the firewall will allow traffic. - """ - - droplet_ids: Iterable[int] - """ - An array containing the IDs of the Droplets to which the firewall will allow - traffic. - """ - - kubernetes_ids: List[str] - """ - An array containing the IDs of the Kubernetes clusters to which the firewall - will allow traffic. - """ - - load_balancer_uids: List[str] - """ - An array containing the IDs of the load balancers to which the firewall will - allow traffic. - """ - - tags: Optional[List[str]] - """A flat array of tag names as strings to be applied to the resource. - - Tag names must exist in order to be referenced in a request. - - Requires `tag:create` and `tag:read` scopes. - """ diff --git a/src/gradientai/types/size_list_params.py b/src/gradientai/types/size_list_params.py deleted file mode 100644 index 5df85a9c..00000000 --- a/src/gradientai/types/size_list_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["SizeListParams"] - - -class SizeListParams(TypedDict, total=False): - page: int - """Which 'page' of paginated results to return.""" - - per_page: int - """Number of items returned per page""" diff --git a/src/gradientai/types/size_list_response.py b/src/gradientai/types/size_list_response.py deleted file mode 100644 index 4ef8078b..00000000 --- a/src/gradientai/types/size_list_response.py +++ /dev/null @@ -1,19 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from .._models import BaseModel -from .shared.size import Size -from .shared.page_links import PageLinks -from .shared.meta_properties import MetaProperties - -__all__ = ["SizeListResponse"] - - -class SizeListResponse(BaseModel): - meta: MetaProperties - """Information about the response itself.""" - - sizes: List[Size] - - links: Optional[PageLinks] = None diff --git a/src/gradientai/types/snapshot_list_params.py b/src/gradientai/types/snapshot_list_params.py deleted file mode 100644 index 6d1b6f5b..00000000 --- a/src/gradientai/types/snapshot_list_params.py +++ /dev/null @@ -1,18 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, TypedDict - -__all__ = ["SnapshotListParams"] - - -class SnapshotListParams(TypedDict, total=False): - page: int - """Which 'page' of paginated results to return.""" - - per_page: int - """Number of items returned per page""" - - resource_type: Literal["droplet", "volume"] - """Used to filter snapshots by a resource type.""" diff --git a/src/gradientai/types/snapshot_list_response.py b/src/gradientai/types/snapshot_list_response.py deleted file mode 100644 index 1b55b099..00000000 --- a/src/gradientai/types/snapshot_list_response.py +++ /dev/null @@ -1,19 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from .._models import BaseModel -from .shared.snapshots import Snapshots -from .shared.page_links import PageLinks -from .shared.meta_properties import MetaProperties - -__all__ = ["SnapshotListResponse"] - - -class SnapshotListResponse(BaseModel): - meta: MetaProperties - """Information about the response itself.""" - - links: Optional[PageLinks] = None - - snapshots: Optional[List[Snapshots]] = None diff --git a/src/gradientai/types/snapshot_retrieve_response.py b/src/gradientai/types/snapshot_retrieve_response.py deleted file mode 100644 index f9ec12bc..00000000 --- a/src/gradientai/types/snapshot_retrieve_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from .._models import BaseModel -from .shared.snapshots import Snapshots - -__all__ = ["SnapshotRetrieveResponse"] - - -class SnapshotRetrieveResponse(BaseModel): - snapshot: Optional[Snapshots] = None diff --git a/src/gradientai/types/sticky_sessions.py b/src/gradientai/types/sticky_sessions.py deleted file mode 100644 index 5245d712..00000000 --- a/src/gradientai/types/sticky_sessions.py +++ /dev/null @@ -1,30 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional -from typing_extensions import Literal - -from .._models import BaseModel - -__all__ = ["StickySessions"] - - -class StickySessions(BaseModel): - cookie_name: Optional[str] = None - """The name of the cookie sent to the client. - - This attribute is only returned when using `cookies` for the sticky sessions - type. - """ - - cookie_ttl_seconds: Optional[int] = None - """The number of seconds until the cookie set by the load balancer expires. - - This attribute is only returned when using `cookies` for the sticky sessions - type. - """ - - type: Optional[Literal["cookies", "none"]] = None - """ - An attribute indicating how and if requests from a client will be persistently - served by the same backend Droplet. The possible values are `cookies` or `none`. - """ diff --git a/src/gradientai/types/sticky_sessions_param.py b/src/gradientai/types/sticky_sessions_param.py deleted file mode 100644 index acea4a4a..00000000 --- a/src/gradientai/types/sticky_sessions_param.py +++ /dev/null @@ -1,29 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, TypedDict - -__all__ = ["StickySessionsParam"] - - -class StickySessionsParam(TypedDict, total=False): - cookie_name: str - """The name of the cookie sent to the client. - - This attribute is only returned when using `cookies` for the sticky sessions - type. - """ - - cookie_ttl_seconds: int - """The number of seconds until the cookie set by the load balancer expires. - - This attribute is only returned when using `cookies` for the sticky sessions - type. - """ - - type: Literal["cookies", "none"] - """ - An attribute indicating how and if requests from a client will be persistently - served by the same backend Droplet. The possible values are `cookies` or `none`. - """ diff --git a/src/gradientai/types/volume_create_params.py b/src/gradientai/types/volume_create_params.py deleted file mode 100644 index fc889801..00000000 --- a/src/gradientai/types/volume_create_params.py +++ /dev/null @@ -1,153 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List, Union, Optional -from typing_extensions import Literal, Required, TypeAlias, TypedDict - -__all__ = ["VolumeCreateParams", "VolumesExt4", "VolumesXfs"] - - -class VolumesExt4(TypedDict, total=False): - name: Required[str] - """A human-readable name for the block storage volume. - - Must be lowercase and be composed only of numbers, letters and "-", up to a - limit of 64 characters. The name must begin with a letter. - """ - - region: Required[ - Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ] - ] - """ - The slug identifier for the region where the resource will initially be - available. - """ - - size_gigabytes: Required[int] - """The size of the block storage volume in GiB (1024^3). - - This field does not apply when creating a volume from a snapshot. - """ - - description: str - """An optional free-form text field to describe a block storage volume.""" - - filesystem_label: str - """The label applied to the filesystem. - - Labels for ext4 type filesystems may contain 16 characters while labels for xfs - type filesystems are limited to 12 characters. May only be used in conjunction - with filesystem_type. - """ - - filesystem_type: str - """The name of the filesystem type to be used on the volume. - - When provided, the volume will automatically be formatted to the specified - filesystem type. Currently, the available options are `ext4` and `xfs`. - Pre-formatted volumes are automatically mounted when attached to Ubuntu, Debian, - Fedora, Fedora Atomic, and CentOS Droplets created on or after April 26, 2018. - Attaching pre-formatted volumes to other Droplets is not recommended. - """ - - snapshot_id: str - """The unique identifier for the volume snapshot from which to create the volume.""" - - tags: Optional[List[str]] - """A flat array of tag names as strings to be applied to the resource. - - Tag names may be for either existing or new tags. - - Requires `tag:create` scope. - """ - - -class VolumesXfs(TypedDict, total=False): - name: Required[str] - """A human-readable name for the block storage volume. - - Must be lowercase and be composed only of numbers, letters and "-", up to a - limit of 64 characters. The name must begin with a letter. - """ - - region: Required[ - Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ] - ] - """ - The slug identifier for the region where the resource will initially be - available. - """ - - size_gigabytes: Required[int] - """The size of the block storage volume in GiB (1024^3). - - This field does not apply when creating a volume from a snapshot. - """ - - description: str - """An optional free-form text field to describe a block storage volume.""" - - filesystem_label: str - """The label applied to the filesystem. - - Labels for ext4 type filesystems may contain 16 characters while labels for xfs - type filesystems are limited to 12 characters. May only be used in conjunction - with filesystem_type. - """ - - filesystem_type: str - """The name of the filesystem type to be used on the volume. - - When provided, the volume will automatically be formatted to the specified - filesystem type. Currently, the available options are `ext4` and `xfs`. - Pre-formatted volumes are automatically mounted when attached to Ubuntu, Debian, - Fedora, Fedora Atomic, and CentOS Droplets created on or after April 26, 2018. - Attaching pre-formatted volumes to other Droplets is not recommended. - """ - - snapshot_id: str - """The unique identifier for the volume snapshot from which to create the volume.""" - - tags: Optional[List[str]] - """A flat array of tag names as strings to be applied to the resource. - - Tag names may be for either existing or new tags. - - Requires `tag:create` scope. - """ - - -VolumeCreateParams: TypeAlias = Union[VolumesExt4, VolumesXfs] diff --git a/src/gradientai/types/volume_create_response.py b/src/gradientai/types/volume_create_response.py deleted file mode 100644 index cc3a560a..00000000 --- a/src/gradientai/types/volume_create_response.py +++ /dev/null @@ -1,65 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from .._models import BaseModel -from .shared.region import Region - -__all__ = ["VolumeCreateResponse", "Volume"] - - -class Volume(BaseModel): - id: Optional[str] = None - """The unique identifier for the block storage volume.""" - - created_at: Optional[str] = None - """ - A time value given in ISO8601 combined date and time format that represents when - the block storage volume was created. - """ - - description: Optional[str] = None - """An optional free-form text field to describe a block storage volume.""" - - droplet_ids: Optional[List[int]] = None - """An array containing the IDs of the Droplets the volume is attached to. - - Note that at this time, a volume can only be attached to a single Droplet. - """ - - filesystem_label: Optional[str] = None - """The label currently applied to the filesystem.""" - - filesystem_type: Optional[str] = None - """The type of filesystem currently in-use on the volume.""" - - name: Optional[str] = None - """A human-readable name for the block storage volume. - - Must be lowercase and be composed only of numbers, letters and "-", up to a - limit of 64 characters. The name must begin with a letter. - """ - - region: Optional[Region] = None - """The region that the block storage volume is located in. - - When setting a region, the value should be the slug identifier for the region. - When you query a block storage volume, the entire region object will be - returned. - """ - - size_gigabytes: Optional[int] = None - """The size of the block storage volume in GiB (1024^3). - - This field does not apply when creating a volume from a snapshot. - """ - - tags: Optional[List[str]] = None - """A flat array of tag names as strings applied to the resource. - - Requires `tag:read` scope. - """ - - -class VolumeCreateResponse(BaseModel): - volume: Optional[Volume] = None diff --git a/src/gradientai/types/volume_delete_by_name_params.py b/src/gradientai/types/volume_delete_by_name_params.py deleted file mode 100644 index 26d173f0..00000000 --- a/src/gradientai/types/volume_delete_by_name_params.py +++ /dev/null @@ -1,31 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, TypedDict - -__all__ = ["VolumeDeleteByNameParams"] - - -class VolumeDeleteByNameParams(TypedDict, total=False): - name: str - """The block storage volume's name.""" - - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ] - """The slug identifier for the region where the resource is available.""" diff --git a/src/gradientai/types/volume_list_params.py b/src/gradientai/types/volume_list_params.py deleted file mode 100644 index b4549651..00000000 --- a/src/gradientai/types/volume_list_params.py +++ /dev/null @@ -1,37 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, TypedDict - -__all__ = ["VolumeListParams"] - - -class VolumeListParams(TypedDict, total=False): - name: str - """The block storage volume's name.""" - - page: int - """Which 'page' of paginated results to return.""" - - per_page: int - """Number of items returned per page""" - - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ] - """The slug identifier for the region where the resource is available.""" diff --git a/src/gradientai/types/volume_list_response.py b/src/gradientai/types/volume_list_response.py deleted file mode 100644 index f8a97439..00000000 --- a/src/gradientai/types/volume_list_response.py +++ /dev/null @@ -1,73 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from .._models import BaseModel -from .shared.region import Region -from .shared.page_links import PageLinks -from .shared.meta_properties import MetaProperties - -__all__ = ["VolumeListResponse", "Volume"] - - -class Volume(BaseModel): - id: Optional[str] = None - """The unique identifier for the block storage volume.""" - - created_at: Optional[str] = None - """ - A time value given in ISO8601 combined date and time format that represents when - the block storage volume was created. - """ - - description: Optional[str] = None - """An optional free-form text field to describe a block storage volume.""" - - droplet_ids: Optional[List[int]] = None - """An array containing the IDs of the Droplets the volume is attached to. - - Note that at this time, a volume can only be attached to a single Droplet. - """ - - filesystem_label: Optional[str] = None - """The label currently applied to the filesystem.""" - - filesystem_type: Optional[str] = None - """The type of filesystem currently in-use on the volume.""" - - name: Optional[str] = None - """A human-readable name for the block storage volume. - - Must be lowercase and be composed only of numbers, letters and "-", up to a - limit of 64 characters. The name must begin with a letter. - """ - - region: Optional[Region] = None - """The region that the block storage volume is located in. - - When setting a region, the value should be the slug identifier for the region. - When you query a block storage volume, the entire region object will be - returned. - """ - - size_gigabytes: Optional[int] = None - """The size of the block storage volume in GiB (1024^3). - - This field does not apply when creating a volume from a snapshot. - """ - - tags: Optional[List[str]] = None - """A flat array of tag names as strings applied to the resource. - - Requires `tag:read` scope. - """ - - -class VolumeListResponse(BaseModel): - meta: MetaProperties - """Information about the response itself.""" - - volumes: List[Volume] - """Array of volumes.""" - - links: Optional[PageLinks] = None diff --git a/src/gradientai/types/volume_retrieve_response.py b/src/gradientai/types/volume_retrieve_response.py deleted file mode 100644 index c9653f9e..00000000 --- a/src/gradientai/types/volume_retrieve_response.py +++ /dev/null @@ -1,65 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from .._models import BaseModel -from .shared.region import Region - -__all__ = ["VolumeRetrieveResponse", "Volume"] - - -class Volume(BaseModel): - id: Optional[str] = None - """The unique identifier for the block storage volume.""" - - created_at: Optional[str] = None - """ - A time value given in ISO8601 combined date and time format that represents when - the block storage volume was created. - """ - - description: Optional[str] = None - """An optional free-form text field to describe a block storage volume.""" - - droplet_ids: Optional[List[int]] = None - """An array containing the IDs of the Droplets the volume is attached to. - - Note that at this time, a volume can only be attached to a single Droplet. - """ - - filesystem_label: Optional[str] = None - """The label currently applied to the filesystem.""" - - filesystem_type: Optional[str] = None - """The type of filesystem currently in-use on the volume.""" - - name: Optional[str] = None - """A human-readable name for the block storage volume. - - Must be lowercase and be composed only of numbers, letters and "-", up to a - limit of 64 characters. The name must begin with a letter. - """ - - region: Optional[Region] = None - """The region that the block storage volume is located in. - - When setting a region, the value should be the slug identifier for the region. - When you query a block storage volume, the entire region object will be - returned. - """ - - size_gigabytes: Optional[int] = None - """The size of the block storage volume in GiB (1024^3). - - This field does not apply when creating a volume from a snapshot. - """ - - tags: Optional[List[str]] = None - """A flat array of tag names as strings applied to the resource. - - Requires `tag:read` scope. - """ - - -class VolumeRetrieveResponse(BaseModel): - volume: Optional[Volume] = None diff --git a/src/gradientai/types/volumes/__init__.py b/src/gradientai/types/volumes/__init__.py deleted file mode 100644 index 68d3d1e9..00000000 --- a/src/gradientai/types/volumes/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from .volume_action import VolumeAction as VolumeAction -from .action_list_params import ActionListParams as ActionListParams -from .action_list_response import ActionListResponse as ActionListResponse -from .snapshot_list_params import SnapshotListParams as SnapshotListParams -from .action_retrieve_params import ActionRetrieveParams as ActionRetrieveParams -from .snapshot_create_params import SnapshotCreateParams as SnapshotCreateParams -from .snapshot_list_response import SnapshotListResponse as SnapshotListResponse -from .action_retrieve_response import ActionRetrieveResponse as ActionRetrieveResponse -from .snapshot_create_response import SnapshotCreateResponse as SnapshotCreateResponse -from .snapshot_retrieve_response import SnapshotRetrieveResponse as SnapshotRetrieveResponse -from .action_initiate_by_id_params import ActionInitiateByIDParams as ActionInitiateByIDParams -from .action_initiate_by_id_response import ActionInitiateByIDResponse as ActionInitiateByIDResponse -from .action_initiate_by_name_params import ActionInitiateByNameParams as ActionInitiateByNameParams -from .action_initiate_by_name_response import ActionInitiateByNameResponse as ActionInitiateByNameResponse diff --git a/src/gradientai/types/volumes/action_initiate_by_id_params.py b/src/gradientai/types/volumes/action_initiate_by_id_params.py deleted file mode 100644 index 6d41d463..00000000 --- a/src/gradientai/types/volumes/action_initiate_by_id_params.py +++ /dev/null @@ -1,133 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List, Union, Optional -from typing_extensions import Literal, Required, TypeAlias, TypedDict - -__all__ = ["ActionInitiateByIDParams", "VolumeActionPostAttach", "VolumeActionPostDetach", "VolumeActionPostResize"] - - -class VolumeActionPostAttach(TypedDict, total=False): - droplet_id: Required[int] - """ - The unique identifier for the Droplet the volume will be attached or detached - from. - """ - - type: Required[Literal["attach", "detach", "resize"]] - """The volume action to initiate.""" - - page: int - """Which 'page' of paginated results to return.""" - - per_page: int - """Number of items returned per page""" - - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ] - """ - The slug identifier for the region where the resource will initially be - available. - """ - - tags: Optional[List[str]] - """A flat array of tag names as strings to be applied to the resource. - - Tag names may be for either existing or new tags. - - Requires `tag:create` scope. - """ - - -class VolumeActionPostDetach(TypedDict, total=False): - droplet_id: Required[int] - """ - The unique identifier for the Droplet the volume will be attached or detached - from. - """ - - type: Required[Literal["attach", "detach", "resize"]] - """The volume action to initiate.""" - - page: int - """Which 'page' of paginated results to return.""" - - per_page: int - """Number of items returned per page""" - - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ] - """ - The slug identifier for the region where the resource will initially be - available. - """ - - -class VolumeActionPostResize(TypedDict, total=False): - size_gigabytes: Required[int] - """The new size of the block storage volume in GiB (1024^3).""" - - type: Required[Literal["attach", "detach", "resize"]] - """The volume action to initiate.""" - - page: int - """Which 'page' of paginated results to return.""" - - per_page: int - """Number of items returned per page""" - - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ] - """ - The slug identifier for the region where the resource will initially be - available. - """ - - -ActionInitiateByIDParams: TypeAlias = Union[VolumeActionPostAttach, VolumeActionPostDetach, VolumeActionPostResize] diff --git a/src/gradientai/types/volumes/action_initiate_by_id_response.py b/src/gradientai/types/volumes/action_initiate_by_id_response.py deleted file mode 100644 index 23484c97..00000000 --- a/src/gradientai/types/volumes/action_initiate_by_id_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel -from .volume_action import VolumeAction - -__all__ = ["ActionInitiateByIDResponse"] - - -class ActionInitiateByIDResponse(BaseModel): - action: Optional[VolumeAction] = None diff --git a/src/gradientai/types/volumes/action_initiate_by_name_params.py b/src/gradientai/types/volumes/action_initiate_by_name_params.py deleted file mode 100644 index d1a7d084..00000000 --- a/src/gradientai/types/volumes/action_initiate_by_name_params.py +++ /dev/null @@ -1,97 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List, Union, Optional -from typing_extensions import Literal, Required, TypeAlias, TypedDict - -__all__ = ["ActionInitiateByNameParams", "VolumeActionPostAttach", "VolumeActionPostDetach"] - - -class VolumeActionPostAttach(TypedDict, total=False): - droplet_id: Required[int] - """ - The unique identifier for the Droplet the volume will be attached or detached - from. - """ - - type: Required[Literal["attach", "detach", "resize"]] - """The volume action to initiate.""" - - page: int - """Which 'page' of paginated results to return.""" - - per_page: int - """Number of items returned per page""" - - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ] - """ - The slug identifier for the region where the resource will initially be - available. - """ - - tags: Optional[List[str]] - """A flat array of tag names as strings to be applied to the resource. - - Tag names may be for either existing or new tags. - - Requires `tag:create` scope. - """ - - -class VolumeActionPostDetach(TypedDict, total=False): - droplet_id: Required[int] - """ - The unique identifier for the Droplet the volume will be attached or detached - from. - """ - - type: Required[Literal["attach", "detach", "resize"]] - """The volume action to initiate.""" - - page: int - """Which 'page' of paginated results to return.""" - - per_page: int - """Number of items returned per page""" - - region: Literal[ - "ams1", - "ams2", - "ams3", - "blr1", - "fra1", - "lon1", - "nyc1", - "nyc2", - "nyc3", - "sfo1", - "sfo2", - "sfo3", - "sgp1", - "tor1", - "syd1", - ] - """ - The slug identifier for the region where the resource will initially be - available. - """ - - -ActionInitiateByNameParams: TypeAlias = Union[VolumeActionPostAttach, VolumeActionPostDetach] diff --git a/src/gradientai/types/volumes/action_initiate_by_name_response.py b/src/gradientai/types/volumes/action_initiate_by_name_response.py deleted file mode 100644 index 0b84be25..00000000 --- a/src/gradientai/types/volumes/action_initiate_by_name_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel -from .volume_action import VolumeAction - -__all__ = ["ActionInitiateByNameResponse"] - - -class ActionInitiateByNameResponse(BaseModel): - action: Optional[VolumeAction] = None diff --git a/src/gradientai/types/volumes/action_list_params.py b/src/gradientai/types/volumes/action_list_params.py deleted file mode 100644 index dd873288..00000000 --- a/src/gradientai/types/volumes/action_list_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["ActionListParams"] - - -class ActionListParams(TypedDict, total=False): - page: int - """Which 'page' of paginated results to return.""" - - per_page: int - """Number of items returned per page""" diff --git a/src/gradientai/types/volumes/action_list_response.py b/src/gradientai/types/volumes/action_list_response.py deleted file mode 100644 index ddb17e23..00000000 --- a/src/gradientai/types/volumes/action_list_response.py +++ /dev/null @@ -1,19 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from ..._models import BaseModel -from .volume_action import VolumeAction -from ..shared.page_links import PageLinks -from ..shared.meta_properties import MetaProperties - -__all__ = ["ActionListResponse"] - - -class ActionListResponse(BaseModel): - meta: MetaProperties - """Information about the response itself.""" - - actions: Optional[List[VolumeAction]] = None - - links: Optional[PageLinks] = None diff --git a/src/gradientai/types/volumes/action_retrieve_params.py b/src/gradientai/types/volumes/action_retrieve_params.py deleted file mode 100644 index 93ab443f..00000000 --- a/src/gradientai/types/volumes/action_retrieve_params.py +++ /dev/null @@ -1,17 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Required, TypedDict - -__all__ = ["ActionRetrieveParams"] - - -class ActionRetrieveParams(TypedDict, total=False): - volume_id: Required[str] - - page: int - """Which 'page' of paginated results to return.""" - - per_page: int - """Number of items returned per page""" diff --git a/src/gradientai/types/volumes/action_retrieve_response.py b/src/gradientai/types/volumes/action_retrieve_response.py deleted file mode 100644 index 9f43cabe..00000000 --- a/src/gradientai/types/volumes/action_retrieve_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel -from .volume_action import VolumeAction - -__all__ = ["ActionRetrieveResponse"] - - -class ActionRetrieveResponse(BaseModel): - action: Optional[VolumeAction] = None diff --git a/src/gradientai/types/volumes/snapshot_create_params.py b/src/gradientai/types/volumes/snapshot_create_params.py deleted file mode 100644 index 8cce4a59..00000000 --- a/src/gradientai/types/volumes/snapshot_create_params.py +++ /dev/null @@ -1,21 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List, Optional -from typing_extensions import Required, TypedDict - -__all__ = ["SnapshotCreateParams"] - - -class SnapshotCreateParams(TypedDict, total=False): - name: Required[str] - """A human-readable name for the volume snapshot.""" - - tags: Optional[List[str]] - """A flat array of tag names as strings to be applied to the resource. - - Tag names may be for either existing or new tags. - - Requires `tag:create` scope. - """ diff --git a/src/gradientai/types/volumes/snapshot_create_response.py b/src/gradientai/types/volumes/snapshot_create_response.py deleted file mode 100644 index 4c7049d1..00000000 --- a/src/gradientai/types/volumes/snapshot_create_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel -from ..shared.snapshots import Snapshots - -__all__ = ["SnapshotCreateResponse"] - - -class SnapshotCreateResponse(BaseModel): - snapshot: Optional[Snapshots] = None diff --git a/src/gradientai/types/volumes/snapshot_list_params.py b/src/gradientai/types/volumes/snapshot_list_params.py deleted file mode 100644 index 65221a79..00000000 --- a/src/gradientai/types/volumes/snapshot_list_params.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import TypedDict - -__all__ = ["SnapshotListParams"] - - -class SnapshotListParams(TypedDict, total=False): - page: int - """Which 'page' of paginated results to return.""" - - per_page: int - """Number of items returned per page""" diff --git a/src/gradientai/types/volumes/snapshot_list_response.py b/src/gradientai/types/volumes/snapshot_list_response.py deleted file mode 100644 index 29b6ec3b..00000000 --- a/src/gradientai/types/volumes/snapshot_list_response.py +++ /dev/null @@ -1,19 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Optional - -from ..._models import BaseModel -from ..shared.snapshots import Snapshots -from ..shared.page_links import PageLinks -from ..shared.meta_properties import MetaProperties - -__all__ = ["SnapshotListResponse"] - - -class SnapshotListResponse(BaseModel): - meta: MetaProperties - """Information about the response itself.""" - - links: Optional[PageLinks] = None - - snapshots: Optional[List[Snapshots]] = None diff --git a/src/gradientai/types/volumes/snapshot_retrieve_response.py b/src/gradientai/types/volumes/snapshot_retrieve_response.py deleted file mode 100644 index 38d84c7a..00000000 --- a/src/gradientai/types/volumes/snapshot_retrieve_response.py +++ /dev/null @@ -1,12 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..._models import BaseModel -from ..shared.snapshots import Snapshots - -__all__ = ["SnapshotRetrieveResponse"] - - -class SnapshotRetrieveResponse(BaseModel): - snapshot: Optional[Snapshots] = None diff --git a/src/gradientai/types/volumes/volume_action.py b/src/gradientai/types/volumes/volume_action.py deleted file mode 100644 index 4d9adf3b..00000000 --- a/src/gradientai/types/volumes/volume_action.py +++ /dev/null @@ -1,18 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional - -from ..shared.action import Action - -__all__ = ["VolumeAction"] - - -class VolumeAction(Action): - resource_id: Optional[int] = None # type: ignore - - type: Optional[str] = None # type: ignore - """This is the type of action that the object represents. - - For example, this could be "attach_volume" to represent the state of a volume - attach action. - """ diff --git a/tests/api_resources/account/__init__.py b/tests/api_resources/account/__init__.py deleted file mode 100644 index fd8019a9..00000000 --- a/tests/api_resources/account/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/account/test_keys.py b/tests/api_resources/account/test_keys.py deleted file mode 100644 index d123c774..00000000 --- a/tests/api_resources/account/test_keys.py +++ /dev/null @@ -1,399 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from gradientai import GradientAI, AsyncGradientAI -from tests.utils import assert_matches_type -from gradientai.types.account import ( - KeyListResponse, - KeyCreateResponse, - KeyUpdateResponse, - KeyRetrieveResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestKeys: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create(self, client: GradientAI) -> None: - key = client.account.keys.create( - name="My SSH Public Key", - public_key="ssh-rsa AEXAMPLEaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example", - ) - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create(self, client: GradientAI) -> None: - response = client.account.keys.with_raw_response.create( - name="My SSH Public Key", - public_key="ssh-rsa AEXAMPLEaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create(self, client: GradientAI) -> None: - with client.account.keys.with_streaming_response.create( - name="My SSH Public Key", - public_key="ssh-rsa AEXAMPLEaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = response.parse() - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: GradientAI) -> None: - key = client.account.keys.retrieve( - 512189, - ) - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: GradientAI) -> None: - response = client.account.keys.with_raw_response.retrieve( - 512189, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: GradientAI) -> None: - with client.account.keys.with_streaming_response.retrieve( - 512189, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = response.parse() - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_update(self, client: GradientAI) -> None: - key = client.account.keys.update( - ssh_key_identifier=512189, - ) - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_update_with_all_params(self, client: GradientAI) -> None: - key = client.account.keys.update( - ssh_key_identifier=512189, - name="My SSH Public Key", - ) - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_update(self, client: GradientAI) -> None: - response = client.account.keys.with_raw_response.update( - ssh_key_identifier=512189, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_update(self, client: GradientAI) -> None: - with client.account.keys.with_streaming_response.update( - ssh_key_identifier=512189, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = response.parse() - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: GradientAI) -> None: - key = client.account.keys.list() - assert_matches_type(KeyListResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: - key = client.account.keys.list( - page=1, - per_page=1, - ) - assert_matches_type(KeyListResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: - response = client.account.keys.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert_matches_type(KeyListResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: - with client.account.keys.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = response.parse() - assert_matches_type(KeyListResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_delete(self, client: GradientAI) -> None: - key = client.account.keys.delete( - 512189, - ) - assert key is None - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete(self, client: GradientAI) -> None: - response = client.account.keys.with_raw_response.delete( - 512189, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = response.parse() - assert key is None - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete(self, client: GradientAI) -> None: - with client.account.keys.with_streaming_response.delete( - 512189, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = response.parse() - assert key is None - - assert cast(Any, response.is_closed) is True - - -class TestAsyncKeys: - parametrize = pytest.mark.parametrize( - "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] - ) - - @pytest.mark.skip() - @parametrize - async def test_method_create(self, async_client: AsyncGradientAI) -> None: - key = await async_client.account.keys.create( - name="My SSH Public Key", - public_key="ssh-rsa AEXAMPLEaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example", - ) - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: - response = await async_client.account.keys.with_raw_response.create( - name="My SSH Public Key", - public_key="ssh-rsa AEXAMPLEaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: - async with async_client.account.keys.with_streaming_response.create( - name="My SSH Public Key", - public_key="ssh-rsa AEXAMPLEaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = await response.parse() - assert_matches_type(KeyCreateResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: - key = await async_client.account.keys.retrieve( - 512189, - ) - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: - response = await async_client.account.keys.with_raw_response.retrieve( - 512189, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: - async with async_client.account.keys.with_streaming_response.retrieve( - 512189, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = await response.parse() - assert_matches_type(KeyRetrieveResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_update(self, async_client: AsyncGradientAI) -> None: - key = await async_client.account.keys.update( - ssh_key_identifier=512189, - ) - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: - key = await async_client.account.keys.update( - ssh_key_identifier=512189, - name="My SSH Public Key", - ) - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: - response = await async_client.account.keys.with_raw_response.update( - ssh_key_identifier=512189, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: - async with async_client.account.keys.with_streaming_response.update( - ssh_key_identifier=512189, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = await response.parse() - assert_matches_type(KeyUpdateResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: - key = await async_client.account.keys.list() - assert_matches_type(KeyListResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: - key = await async_client.account.keys.list( - page=1, - per_page=1, - ) - assert_matches_type(KeyListResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: - response = await async_client.account.keys.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert_matches_type(KeyListResponse, key, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: - async with async_client.account.keys.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = await response.parse() - assert_matches_type(KeyListResponse, key, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_delete(self, async_client: AsyncGradientAI) -> None: - key = await async_client.account.keys.delete( - 512189, - ) - assert key is None - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: - response = await async_client.account.keys.with_raw_response.delete( - 512189, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - key = await response.parse() - assert key is None - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: - async with async_client.account.keys.with_streaming_response.delete( - 512189, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - key = await response.parse() - assert key is None - - assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/agents/chat/test_completions.py b/tests/api_resources/agents/chat/test_completions.py index 6533a423..48707a55 100644 --- a/tests/api_resources/agents/chat/test_completions.py +++ b/tests/api_resources/agents/chat/test_completions.py @@ -56,17 +56,6 @@ def test_method_create_with_all_params_overload_1(self, client: GradientAI) -> N stream=False, stream_options={"include_usage": True}, temperature=1, - tool_choice="none", - tools=[ - { - "function": { - "name": "name", - "description": "description", - "parameters": {"foo": "bar"}, - }, - "type": "function", - } - ], top_logprobs=0, top_p=1, user="user-1234", @@ -149,17 +138,6 @@ def test_method_create_with_all_params_overload_2(self, client: GradientAI) -> N stop="\n", stream_options={"include_usage": True}, temperature=1, - tool_choice="none", - tools=[ - { - "function": { - "name": "name", - "description": "description", - "parameters": {"foo": "bar"}, - }, - "type": "function", - } - ], top_logprobs=0, top_p=1, user="user-1234", @@ -255,17 +233,6 @@ async def test_method_create_with_all_params_overload_1( stream=False, stream_options={"include_usage": True}, temperature=1, - tool_choice="none", - tools=[ - { - "function": { - "name": "name", - "description": "description", - "parameters": {"foo": "bar"}, - }, - "type": "function", - } - ], top_logprobs=0, top_p=1, user="user-1234", @@ -356,17 +323,6 @@ async def test_method_create_with_all_params_overload_2( stop="\n", stream_options={"include_usage": True}, temperature=1, - tool_choice="none", - tools=[ - { - "function": { - "name": "name", - "description": "description", - "parameters": {"foo": "bar"}, - }, - "type": "function", - } - ], top_logprobs=0, top_p=1, user="user-1234", diff --git a/tests/api_resources/agents/evaluation_metrics/test_models.py b/tests/api_resources/agents/evaluation_metrics/test_models.py deleted file mode 100644 index 6b8f8bc7..00000000 --- a/tests/api_resources/agents/evaluation_metrics/test_models.py +++ /dev/null @@ -1,102 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from gradientai import GradientAI, AsyncGradientAI -from tests.utils import assert_matches_type -from gradientai.types.agents.evaluation_metrics import ModelListResponse - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestModels: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: GradientAI) -> None: - model = client.agents.evaluation_metrics.models.list() - assert_matches_type(ModelListResponse, model, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: - model = client.agents.evaluation_metrics.models.list( - page=0, - per_page=0, - public_only=True, - usecases=["MODEL_USECASE_UNKNOWN"], - ) - assert_matches_type(ModelListResponse, model, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: - response = client.agents.evaluation_metrics.models.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - model = response.parse() - assert_matches_type(ModelListResponse, model, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: - with client.agents.evaluation_metrics.models.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - model = response.parse() - assert_matches_type(ModelListResponse, model, path=["response"]) - - assert cast(Any, response.is_closed) is True - - -class TestAsyncModels: - parametrize = pytest.mark.parametrize( - "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: - model = await async_client.agents.evaluation_metrics.models.list() - assert_matches_type(ModelListResponse, model, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: - model = await async_client.agents.evaluation_metrics.models.list( - page=0, - per_page=0, - public_only=True, - usecases=["MODEL_USECASE_UNKNOWN"], - ) - assert_matches_type(ModelListResponse, model, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.evaluation_metrics.models.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - model = await response.parse() - assert_matches_type(ModelListResponse, model, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.evaluation_metrics.models.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - model = await response.parse() - assert_matches_type(ModelListResponse, model, path=["response"]) - - assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/agents/evaluation_metrics/test_workspaces.py b/tests/api_resources/agents/evaluation_metrics/test_workspaces.py index ea39c474..afeaa8f1 100644 --- a/tests/api_resources/agents/evaluation_metrics/test_workspaces.py +++ b/tests/api_resources/agents/evaluation_metrics/test_workspaces.py @@ -34,9 +34,9 @@ def test_method_create(self, client: GradientAI) -> None: @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: workspace = client.agents.evaluation_metrics.workspaces.create( - agent_uuids=["example string"], - description='"example string"', - name='"example name"', + agent_uuids=["string"], + description="description", + name="name", ) assert_matches_type(WorkspaceCreateResponse, workspace, path=["response"]) @@ -108,7 +108,7 @@ def test_path_params_retrieve(self, client: GradientAI) -> None: @parametrize def test_method_update(self, client: GradientAI) -> None: workspace = client.agents.evaluation_metrics.workspaces.update( - path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_workspace_uuid="workspace_uuid", ) assert_matches_type(WorkspaceUpdateResponse, workspace, path=["response"]) @@ -116,10 +116,10 @@ def test_method_update(self, client: GradientAI) -> None: @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: workspace = client.agents.evaluation_metrics.workspaces.update( - path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', - description='"example string"', - name='"example name"', - body_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_workspace_uuid="workspace_uuid", + description="description", + name="name", + body_workspace_uuid="workspace_uuid", ) assert_matches_type(WorkspaceUpdateResponse, workspace, path=["response"]) @@ -127,7 +127,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_update(self, client: GradientAI) -> None: response = client.agents.evaluation_metrics.workspaces.with_raw_response.update( - path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_workspace_uuid="workspace_uuid", ) assert response.is_closed is True @@ -139,7 +139,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: with client.agents.evaluation_metrics.workspaces.with_streaming_response.update( - path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_workspace_uuid="workspace_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -285,9 +285,9 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: workspace = await async_client.agents.evaluation_metrics.workspaces.create( - agent_uuids=["example string"], - description='"example string"', - name='"example name"', + agent_uuids=["string"], + description="description", + name="name", ) assert_matches_type(WorkspaceCreateResponse, workspace, path=["response"]) @@ -359,7 +359,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: workspace = await async_client.agents.evaluation_metrics.workspaces.update( - path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_workspace_uuid="workspace_uuid", ) assert_matches_type(WorkspaceUpdateResponse, workspace, path=["response"]) @@ -367,10 +367,10 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: workspace = await async_client.agents.evaluation_metrics.workspaces.update( - path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', - description='"example string"', - name='"example name"', - body_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_workspace_uuid="workspace_uuid", + description="description", + name="name", + body_workspace_uuid="workspace_uuid", ) assert_matches_type(WorkspaceUpdateResponse, workspace, path=["response"]) @@ -378,7 +378,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.evaluation_metrics.workspaces.with_raw_response.update( - path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_workspace_uuid="workspace_uuid", ) assert response.is_closed is True @@ -390,7 +390,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.evaluation_metrics.workspaces.with_streaming_response.update( - path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_workspace_uuid="workspace_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py b/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py index 635721b3..764e13e0 100644 --- a/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py +++ b/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py @@ -24,7 +24,7 @@ class TestAgents: @parametrize def test_method_list(self, client: GradientAI) -> None: agent = client.agents.evaluation_metrics.workspaces.agents.list( - workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', + workspace_uuid="workspace_uuid", ) assert_matches_type(AgentListResponse, agent, path=["response"]) @@ -32,7 +32,8 @@ def test_method_list(self, client: GradientAI) -> None: @parametrize def test_method_list_with_all_params(self, client: GradientAI) -> None: agent = client.agents.evaluation_metrics.workspaces.agents.list( - workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', + workspace_uuid="workspace_uuid", + field_mask={"paths": ["string"]}, only_deployed=True, page=0, per_page=0, @@ -43,7 +44,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_list(self, client: GradientAI) -> None: response = client.agents.evaluation_metrics.workspaces.agents.with_raw_response.list( - workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', + workspace_uuid="workspace_uuid", ) assert response.is_closed is True @@ -55,7 +56,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @parametrize def test_streaming_response_list(self, client: GradientAI) -> None: with client.agents.evaluation_metrics.workspaces.agents.with_streaming_response.list( - workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', + workspace_uuid="workspace_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -77,7 +78,7 @@ def test_path_params_list(self, client: GradientAI) -> None: @parametrize def test_method_move(self, client: GradientAI) -> None: agent = client.agents.evaluation_metrics.workspaces.agents.move( - path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_workspace_uuid="workspace_uuid", ) assert_matches_type(AgentMoveResponse, agent, path=["response"]) @@ -85,9 +86,9 @@ def test_method_move(self, client: GradientAI) -> None: @parametrize def test_method_move_with_all_params(self, client: GradientAI) -> None: agent = client.agents.evaluation_metrics.workspaces.agents.move( - path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', - agent_uuids=["example string"], - body_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_workspace_uuid="workspace_uuid", + agent_uuids=["string"], + body_workspace_uuid="workspace_uuid", ) assert_matches_type(AgentMoveResponse, agent, path=["response"]) @@ -95,7 +96,7 @@ def test_method_move_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_move(self, client: GradientAI) -> None: response = client.agents.evaluation_metrics.workspaces.agents.with_raw_response.move( - path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_workspace_uuid="workspace_uuid", ) assert response.is_closed is True @@ -107,7 +108,7 @@ def test_raw_response_move(self, client: GradientAI) -> None: @parametrize def test_streaming_response_move(self, client: GradientAI) -> None: with client.agents.evaluation_metrics.workspaces.agents.with_streaming_response.move( - path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_workspace_uuid="workspace_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -135,7 +136,7 @@ class TestAsyncAgents: @parametrize async def test_method_list(self, async_client: AsyncGradientAI) -> None: agent = await async_client.agents.evaluation_metrics.workspaces.agents.list( - workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', + workspace_uuid="workspace_uuid", ) assert_matches_type(AgentListResponse, agent, path=["response"]) @@ -143,7 +144,8 @@ async def test_method_list(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: agent = await async_client.agents.evaluation_metrics.workspaces.agents.list( - workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', + workspace_uuid="workspace_uuid", + field_mask={"paths": ["string"]}, only_deployed=True, page=0, per_page=0, @@ -154,7 +156,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @parametrize async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.evaluation_metrics.workspaces.agents.with_raw_response.list( - workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', + workspace_uuid="workspace_uuid", ) assert response.is_closed is True @@ -166,7 +168,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.evaluation_metrics.workspaces.agents.with_streaming_response.list( - workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', + workspace_uuid="workspace_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -188,7 +190,7 @@ async def test_path_params_list(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_move(self, async_client: AsyncGradientAI) -> None: agent = await async_client.agents.evaluation_metrics.workspaces.agents.move( - path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_workspace_uuid="workspace_uuid", ) assert_matches_type(AgentMoveResponse, agent, path=["response"]) @@ -196,9 +198,9 @@ async def test_method_move(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_move_with_all_params(self, async_client: AsyncGradientAI) -> None: agent = await async_client.agents.evaluation_metrics.workspaces.agents.move( - path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', - agent_uuids=["example string"], - body_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_workspace_uuid="workspace_uuid", + agent_uuids=["string"], + body_workspace_uuid="workspace_uuid", ) assert_matches_type(AgentMoveResponse, agent, path=["response"]) @@ -206,7 +208,7 @@ async def test_method_move_with_all_params(self, async_client: AsyncGradientAI) @parametrize async def test_raw_response_move(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.evaluation_metrics.workspaces.agents.with_raw_response.move( - path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_workspace_uuid="workspace_uuid", ) assert response.is_closed is True @@ -218,7 +220,7 @@ async def test_raw_response_move(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_move(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.evaluation_metrics.workspaces.agents.with_streaming_response.move( - path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_workspace_uuid="workspace_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/agents/test_api_keys.py b/tests/api_resources/agents/test_api_keys.py index c29511f5..beb9666a 100644 --- a/tests/api_resources/agents/test_api_keys.py +++ b/tests/api_resources/agents/test_api_keys.py @@ -27,7 +27,7 @@ class TestAPIKeys: @parametrize def test_method_create(self, client: GradientAI) -> None: api_key = client.agents.api_keys.create( - path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_agent_uuid="agent_uuid", ) assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) @@ -35,9 +35,9 @@ def test_method_create(self, client: GradientAI) -> None: @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: api_key = client.agents.api_keys.create( - path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', - body_agent_uuid='"12345678-1234-1234-1234-123456789012"', - name="Production Key", + path_agent_uuid="agent_uuid", + body_agent_uuid="agent_uuid", + name="name", ) assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) @@ -45,7 +45,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_create(self, client: GradientAI) -> None: response = client.agents.api_keys.with_raw_response.create( - path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_agent_uuid="agent_uuid", ) assert response.is_closed is True @@ -57,7 +57,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @parametrize def test_streaming_response_create(self, client: GradientAI) -> None: with client.agents.api_keys.with_streaming_response.create( - path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_agent_uuid="agent_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -79,8 +79,8 @@ def test_path_params_create(self, client: GradientAI) -> None: @parametrize def test_method_update(self, client: GradientAI) -> None: api_key = client.agents.api_keys.update( - path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', - path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_api_key_uuid="api_key_uuid", + path_agent_uuid="agent_uuid", ) assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) @@ -88,11 +88,11 @@ def test_method_update(self, client: GradientAI) -> None: @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: api_key = client.agents.api_keys.update( - path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', - path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', - body_agent_uuid='"12345678-1234-1234-1234-123456789012"', - body_api_key_uuid='"12345678-1234-1234-1234-123456789012"', - name='"Production Key"', + path_api_key_uuid="api_key_uuid", + path_agent_uuid="agent_uuid", + body_agent_uuid="agent_uuid", + body_api_key_uuid="api_key_uuid", + name="name", ) assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) @@ -100,8 +100,8 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_update(self, client: GradientAI) -> None: response = client.agents.api_keys.with_raw_response.update( - path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', - path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_api_key_uuid="api_key_uuid", + path_agent_uuid="agent_uuid", ) assert response.is_closed is True @@ -113,8 +113,8 @@ def test_raw_response_update(self, client: GradientAI) -> None: @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: with client.agents.api_keys.with_streaming_response.update( - path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', - path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_api_key_uuid="api_key_uuid", + path_agent_uuid="agent_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -129,21 +129,21 @@ def test_streaming_response_update(self, client: GradientAI) -> None: def test_path_params_update(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): client.agents.api_keys.with_raw_response.update( - path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_api_key_uuid="api_key_uuid", path_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): client.agents.api_keys.with_raw_response.update( path_api_key_uuid="", - path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_agent_uuid="agent_uuid", ) @pytest.mark.skip() @parametrize def test_method_list(self, client: GradientAI) -> None: api_key = client.agents.api_keys.list( - agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid="agent_uuid", ) assert_matches_type(APIKeyListResponse, api_key, path=["response"]) @@ -151,7 +151,7 @@ def test_method_list(self, client: GradientAI) -> None: @parametrize def test_method_list_with_all_params(self, client: GradientAI) -> None: api_key = client.agents.api_keys.list( - agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid="agent_uuid", page=0, per_page=0, ) @@ -161,7 +161,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_list(self, client: GradientAI) -> None: response = client.agents.api_keys.with_raw_response.list( - agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid="agent_uuid", ) assert response.is_closed is True @@ -173,7 +173,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @parametrize def test_streaming_response_list(self, client: GradientAI) -> None: with client.agents.api_keys.with_streaming_response.list( - agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid="agent_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -195,8 +195,8 @@ def test_path_params_list(self, client: GradientAI) -> None: @parametrize def test_method_delete(self, client: GradientAI) -> None: api_key = client.agents.api_keys.delete( - api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', - agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + api_key_uuid="api_key_uuid", + agent_uuid="agent_uuid", ) assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) @@ -204,8 +204,8 @@ def test_method_delete(self, client: GradientAI) -> None: @parametrize def test_raw_response_delete(self, client: GradientAI) -> None: response = client.agents.api_keys.with_raw_response.delete( - api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', - agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + api_key_uuid="api_key_uuid", + agent_uuid="agent_uuid", ) assert response.is_closed is True @@ -217,8 +217,8 @@ def test_raw_response_delete(self, client: GradientAI) -> None: @parametrize def test_streaming_response_delete(self, client: GradientAI) -> None: with client.agents.api_keys.with_streaming_response.delete( - api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', - agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + api_key_uuid="api_key_uuid", + agent_uuid="agent_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -233,22 +233,22 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: def test_path_params_delete(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): client.agents.api_keys.with_raw_response.delete( - api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + api_key_uuid="api_key_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): client.agents.api_keys.with_raw_response.delete( api_key_uuid="", - agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid="agent_uuid", ) @pytest.mark.skip() @parametrize def test_method_regenerate(self, client: GradientAI) -> None: api_key = client.agents.api_keys.regenerate( - api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', - agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + api_key_uuid="api_key_uuid", + agent_uuid="agent_uuid", ) assert_matches_type(APIKeyRegenerateResponse, api_key, path=["response"]) @@ -256,8 +256,8 @@ def test_method_regenerate(self, client: GradientAI) -> None: @parametrize def test_raw_response_regenerate(self, client: GradientAI) -> None: response = client.agents.api_keys.with_raw_response.regenerate( - api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', - agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + api_key_uuid="api_key_uuid", + agent_uuid="agent_uuid", ) assert response.is_closed is True @@ -269,8 +269,8 @@ def test_raw_response_regenerate(self, client: GradientAI) -> None: @parametrize def test_streaming_response_regenerate(self, client: GradientAI) -> None: with client.agents.api_keys.with_streaming_response.regenerate( - api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', - agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + api_key_uuid="api_key_uuid", + agent_uuid="agent_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -285,14 +285,14 @@ def test_streaming_response_regenerate(self, client: GradientAI) -> None: def test_path_params_regenerate(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): client.agents.api_keys.with_raw_response.regenerate( - api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + api_key_uuid="api_key_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): client.agents.api_keys.with_raw_response.regenerate( api_key_uuid="", - agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid="agent_uuid", ) @@ -305,7 +305,7 @@ class TestAsyncAPIKeys: @parametrize async def test_method_create(self, async_client: AsyncGradientAI) -> None: api_key = await async_client.agents.api_keys.create( - path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_agent_uuid="agent_uuid", ) assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) @@ -313,9 +313,9 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: api_key = await async_client.agents.api_keys.create( - path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', - body_agent_uuid='"12345678-1234-1234-1234-123456789012"', - name="Production Key", + path_agent_uuid="agent_uuid", + body_agent_uuid="agent_uuid", + name="name", ) assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) @@ -323,7 +323,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI @parametrize async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.api_keys.with_raw_response.create( - path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_agent_uuid="agent_uuid", ) assert response.is_closed is True @@ -335,7 +335,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.api_keys.with_streaming_response.create( - path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_agent_uuid="agent_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -357,8 +357,8 @@ async def test_path_params_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: api_key = await async_client.agents.api_keys.update( - path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', - path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_api_key_uuid="api_key_uuid", + path_agent_uuid="agent_uuid", ) assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) @@ -366,11 +366,11 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: api_key = await async_client.agents.api_keys.update( - path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', - path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', - body_agent_uuid='"12345678-1234-1234-1234-123456789012"', - body_api_key_uuid='"12345678-1234-1234-1234-123456789012"', - name='"Production Key"', + path_api_key_uuid="api_key_uuid", + path_agent_uuid="agent_uuid", + body_agent_uuid="agent_uuid", + body_api_key_uuid="api_key_uuid", + name="name", ) assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) @@ -378,8 +378,8 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.api_keys.with_raw_response.update( - path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', - path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_api_key_uuid="api_key_uuid", + path_agent_uuid="agent_uuid", ) assert response.is_closed is True @@ -391,8 +391,8 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.api_keys.with_streaming_response.update( - path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', - path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_api_key_uuid="api_key_uuid", + path_agent_uuid="agent_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -407,21 +407,21 @@ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): await async_client.agents.api_keys.with_raw_response.update( - path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_api_key_uuid="api_key_uuid", path_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): await async_client.agents.api_keys.with_raw_response.update( path_api_key_uuid="", - path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_agent_uuid="agent_uuid", ) @pytest.mark.skip() @parametrize async def test_method_list(self, async_client: AsyncGradientAI) -> None: api_key = await async_client.agents.api_keys.list( - agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid="agent_uuid", ) assert_matches_type(APIKeyListResponse, api_key, path=["response"]) @@ -429,7 +429,7 @@ async def test_method_list(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: api_key = await async_client.agents.api_keys.list( - agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid="agent_uuid", page=0, per_page=0, ) @@ -439,7 +439,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @parametrize async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.api_keys.with_raw_response.list( - agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid="agent_uuid", ) assert response.is_closed is True @@ -451,7 +451,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.api_keys.with_streaming_response.list( - agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid="agent_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -473,8 +473,8 @@ async def test_path_params_list(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_delete(self, async_client: AsyncGradientAI) -> None: api_key = await async_client.agents.api_keys.delete( - api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', - agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + api_key_uuid="api_key_uuid", + agent_uuid="agent_uuid", ) assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) @@ -482,8 +482,8 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.api_keys.with_raw_response.delete( - api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', - agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + api_key_uuid="api_key_uuid", + agent_uuid="agent_uuid", ) assert response.is_closed is True @@ -495,8 +495,8 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.api_keys.with_streaming_response.delete( - api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', - agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + api_key_uuid="api_key_uuid", + agent_uuid="agent_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -511,22 +511,22 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): await async_client.agents.api_keys.with_raw_response.delete( - api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + api_key_uuid="api_key_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): await async_client.agents.api_keys.with_raw_response.delete( api_key_uuid="", - agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid="agent_uuid", ) @pytest.mark.skip() @parametrize async def test_method_regenerate(self, async_client: AsyncGradientAI) -> None: api_key = await async_client.agents.api_keys.regenerate( - api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', - agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + api_key_uuid="api_key_uuid", + agent_uuid="agent_uuid", ) assert_matches_type(APIKeyRegenerateResponse, api_key, path=["response"]) @@ -534,8 +534,8 @@ async def test_method_regenerate(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_raw_response_regenerate(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.api_keys.with_raw_response.regenerate( - api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', - agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + api_key_uuid="api_key_uuid", + agent_uuid="agent_uuid", ) assert response.is_closed is True @@ -547,8 +547,8 @@ async def test_raw_response_regenerate(self, async_client: AsyncGradientAI) -> N @parametrize async def test_streaming_response_regenerate(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.api_keys.with_streaming_response.regenerate( - api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', - agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + api_key_uuid="api_key_uuid", + agent_uuid="agent_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -563,12 +563,12 @@ async def test_streaming_response_regenerate(self, async_client: AsyncGradientAI async def test_path_params_regenerate(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): await async_client.agents.api_keys.with_raw_response.regenerate( - api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + api_key_uuid="api_key_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): await async_client.agents.api_keys.with_raw_response.regenerate( api_key_uuid="", - agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid="agent_uuid", ) diff --git a/tests/api_resources/agents/test_evaluation_datasets.py b/tests/api_resources/agents/test_evaluation_datasets.py index 0413591e..e6ca2644 100644 --- a/tests/api_resources/agents/test_evaluation_datasets.py +++ b/tests/api_resources/agents/test_evaluation_datasets.py @@ -31,11 +31,11 @@ def test_method_create(self, client: GradientAI) -> None: def test_method_create_with_all_params(self, client: GradientAI) -> None: evaluation_dataset = client.agents.evaluation_datasets.create( file_upload_dataset={ - "original_file_name": '"example name"', - "size_in_bytes": '"12345"', - "stored_object_key": '"example string"', + "original_file_name": "original_file_name", + "size_in_bytes": "size_in_bytes", + "stored_object_key": "stored_object_key", }, - name='"example name"', + name="name", ) assert_matches_type(EvaluationDatasetCreateResponse, evaluation_dataset, path=["response"]) @@ -75,7 +75,7 @@ def test_method_create_file_upload_presigned_urls_with_all_params(self, client: evaluation_dataset = client.agents.evaluation_datasets.create_file_upload_presigned_urls( files=[ { - "file_name": '"example name"', + "file_name": "file_name", "file_size": "file_size", } ], @@ -127,11 +127,11 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: evaluation_dataset = await async_client.agents.evaluation_datasets.create( file_upload_dataset={ - "original_file_name": '"example name"', - "size_in_bytes": '"12345"', - "stored_object_key": '"example string"', + "original_file_name": "original_file_name", + "size_in_bytes": "size_in_bytes", + "stored_object_key": "stored_object_key", }, - name='"example name"', + name="name", ) assert_matches_type(EvaluationDatasetCreateResponse, evaluation_dataset, path=["response"]) @@ -173,7 +173,7 @@ async def test_method_create_file_upload_presigned_urls_with_all_params( evaluation_dataset = await async_client.agents.evaluation_datasets.create_file_upload_presigned_urls( files=[ { - "file_name": '"example name"', + "file_name": "file_name", "file_size": "file_size", } ], diff --git a/tests/api_resources/agents/test_evaluation_metrics.py b/tests/api_resources/agents/test_evaluation_metrics.py index d64367ae..be83e330 100644 --- a/tests/api_resources/agents/test_evaluation_metrics.py +++ b/tests/api_resources/agents/test_evaluation_metrics.py @@ -9,10 +9,7 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import ( - EvaluationMetricListResponse, - EvaluationMetricListRegionsResponse, -) +from gradientai.types.agents import EvaluationMetricListResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -48,43 +45,6 @@ def test_streaming_response_list(self, client: GradientAI) -> None: assert cast(Any, response.is_closed) is True - @pytest.mark.skip() - @parametrize - def test_method_list_regions(self, client: GradientAI) -> None: - evaluation_metric = client.agents.evaluation_metrics.list_regions() - assert_matches_type(EvaluationMetricListRegionsResponse, evaluation_metric, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_regions_with_all_params(self, client: GradientAI) -> None: - evaluation_metric = client.agents.evaluation_metrics.list_regions( - serves_batch=True, - serves_inference=True, - ) - assert_matches_type(EvaluationMetricListRegionsResponse, evaluation_metric, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list_regions(self, client: GradientAI) -> None: - response = client.agents.evaluation_metrics.with_raw_response.list_regions() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - evaluation_metric = response.parse() - assert_matches_type(EvaluationMetricListRegionsResponse, evaluation_metric, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list_regions(self, client: GradientAI) -> None: - with client.agents.evaluation_metrics.with_streaming_response.list_regions() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - evaluation_metric = response.parse() - assert_matches_type(EvaluationMetricListRegionsResponse, evaluation_metric, path=["response"]) - - assert cast(Any, response.is_closed) is True - class TestAsyncEvaluationMetrics: parametrize = pytest.mark.parametrize( @@ -118,40 +78,3 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N assert_matches_type(EvaluationMetricListResponse, evaluation_metric, path=["response"]) assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_list_regions(self, async_client: AsyncGradientAI) -> None: - evaluation_metric = await async_client.agents.evaluation_metrics.list_regions() - assert_matches_type(EvaluationMetricListRegionsResponse, evaluation_metric, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_regions_with_all_params(self, async_client: AsyncGradientAI) -> None: - evaluation_metric = await async_client.agents.evaluation_metrics.list_regions( - serves_batch=True, - serves_inference=True, - ) - assert_matches_type(EvaluationMetricListRegionsResponse, evaluation_metric, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list_regions(self, async_client: AsyncGradientAI) -> None: - response = await async_client.agents.evaluation_metrics.with_raw_response.list_regions() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - evaluation_metric = await response.parse() - assert_matches_type(EvaluationMetricListRegionsResponse, evaluation_metric, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list_regions(self, async_client: AsyncGradientAI) -> None: - async with async_client.agents.evaluation_metrics.with_streaming_response.list_regions() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - evaluation_metric = await response.parse() - assert_matches_type(EvaluationMetricListRegionsResponse, evaluation_metric, path=["response"]) - - assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/agents/test_evaluation_runs.py b/tests/api_resources/agents/test_evaluation_runs.py index 2ea44e6b..b2fce320 100644 --- a/tests/api_resources/agents/test_evaluation_runs.py +++ b/tests/api_resources/agents/test_evaluation_runs.py @@ -32,9 +32,9 @@ def test_method_create(self, client: GradientAI) -> None: @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: evaluation_run = client.agents.evaluation_runs.create( - agent_uuids=["example string"], - run_name="Evaluation Run Name", - test_case_uuid='"12345678-1234-1234-1234-123456789012"', + agent_uuids=["string"], + run_name="run_name", + test_case_uuid="test_case_uuid", ) assert_matches_type(EvaluationRunCreateResponse, evaluation_run, path=["response"]) @@ -106,17 +106,7 @@ def test_path_params_retrieve(self, client: GradientAI) -> None: @parametrize def test_method_list_results(self, client: GradientAI) -> None: evaluation_run = client.agents.evaluation_runs.list_results( - evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', - ) - assert_matches_type(EvaluationRunListResultsResponse, evaluation_run, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_results_with_all_params(self, client: GradientAI) -> None: - evaluation_run = client.agents.evaluation_runs.list_results( - evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', - page=0, - per_page=0, + "evaluation_run_uuid", ) assert_matches_type(EvaluationRunListResultsResponse, evaluation_run, path=["response"]) @@ -124,7 +114,7 @@ def test_method_list_results_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_list_results(self, client: GradientAI) -> None: response = client.agents.evaluation_runs.with_raw_response.list_results( - evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', + "evaluation_run_uuid", ) assert response.is_closed is True @@ -136,7 +126,7 @@ def test_raw_response_list_results(self, client: GradientAI) -> None: @parametrize def test_streaming_response_list_results(self, client: GradientAI) -> None: with client.agents.evaluation_runs.with_streaming_response.list_results( - evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', + "evaluation_run_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -151,15 +141,15 @@ def test_streaming_response_list_results(self, client: GradientAI) -> None: def test_path_params_list_results(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"): client.agents.evaluation_runs.with_raw_response.list_results( - evaluation_run_uuid="", + "", ) @pytest.mark.skip() @parametrize def test_method_retrieve_results(self, client: GradientAI) -> None: evaluation_run = client.agents.evaluation_runs.retrieve_results( - prompt_id=1, - evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', + prompt_id=0, + evaluation_run_uuid="evaluation_run_uuid", ) assert_matches_type(EvaluationRunRetrieveResultsResponse, evaluation_run, path=["response"]) @@ -167,8 +157,8 @@ def test_method_retrieve_results(self, client: GradientAI) -> None: @parametrize def test_raw_response_retrieve_results(self, client: GradientAI) -> None: response = client.agents.evaluation_runs.with_raw_response.retrieve_results( - prompt_id=1, - evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', + prompt_id=0, + evaluation_run_uuid="evaluation_run_uuid", ) assert response.is_closed is True @@ -180,8 +170,8 @@ def test_raw_response_retrieve_results(self, client: GradientAI) -> None: @parametrize def test_streaming_response_retrieve_results(self, client: GradientAI) -> None: with client.agents.evaluation_runs.with_streaming_response.retrieve_results( - prompt_id=1, - evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', + prompt_id=0, + evaluation_run_uuid="evaluation_run_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -196,7 +186,7 @@ def test_streaming_response_retrieve_results(self, client: GradientAI) -> None: def test_path_params_retrieve_results(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"): client.agents.evaluation_runs.with_raw_response.retrieve_results( - prompt_id=1, + prompt_id=0, evaluation_run_uuid="", ) @@ -216,9 +206,9 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: evaluation_run = await async_client.agents.evaluation_runs.create( - agent_uuids=["example string"], - run_name="Evaluation Run Name", - test_case_uuid='"12345678-1234-1234-1234-123456789012"', + agent_uuids=["string"], + run_name="run_name", + test_case_uuid="test_case_uuid", ) assert_matches_type(EvaluationRunCreateResponse, evaluation_run, path=["response"]) @@ -290,17 +280,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None @parametrize async def test_method_list_results(self, async_client: AsyncGradientAI) -> None: evaluation_run = await async_client.agents.evaluation_runs.list_results( - evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', - ) - assert_matches_type(EvaluationRunListResultsResponse, evaluation_run, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_results_with_all_params(self, async_client: AsyncGradientAI) -> None: - evaluation_run = await async_client.agents.evaluation_runs.list_results( - evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', - page=0, - per_page=0, + "evaluation_run_uuid", ) assert_matches_type(EvaluationRunListResultsResponse, evaluation_run, path=["response"]) @@ -308,7 +288,7 @@ async def test_method_list_results_with_all_params(self, async_client: AsyncGrad @parametrize async def test_raw_response_list_results(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.evaluation_runs.with_raw_response.list_results( - evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', + "evaluation_run_uuid", ) assert response.is_closed is True @@ -320,7 +300,7 @@ async def test_raw_response_list_results(self, async_client: AsyncGradientAI) -> @parametrize async def test_streaming_response_list_results(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.evaluation_runs.with_streaming_response.list_results( - evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', + "evaluation_run_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -335,15 +315,15 @@ async def test_streaming_response_list_results(self, async_client: AsyncGradient async def test_path_params_list_results(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"): await async_client.agents.evaluation_runs.with_raw_response.list_results( - evaluation_run_uuid="", + "", ) @pytest.mark.skip() @parametrize async def test_method_retrieve_results(self, async_client: AsyncGradientAI) -> None: evaluation_run = await async_client.agents.evaluation_runs.retrieve_results( - prompt_id=1, - evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', + prompt_id=0, + evaluation_run_uuid="evaluation_run_uuid", ) assert_matches_type(EvaluationRunRetrieveResultsResponse, evaluation_run, path=["response"]) @@ -351,8 +331,8 @@ async def test_method_retrieve_results(self, async_client: AsyncGradientAI) -> N @parametrize async def test_raw_response_retrieve_results(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.evaluation_runs.with_raw_response.retrieve_results( - prompt_id=1, - evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', + prompt_id=0, + evaluation_run_uuid="evaluation_run_uuid", ) assert response.is_closed is True @@ -364,8 +344,8 @@ async def test_raw_response_retrieve_results(self, async_client: AsyncGradientAI @parametrize async def test_streaming_response_retrieve_results(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.evaluation_runs.with_streaming_response.retrieve_results( - prompt_id=1, - evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', + prompt_id=0, + evaluation_run_uuid="evaluation_run_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -380,6 +360,6 @@ async def test_streaming_response_retrieve_results(self, async_client: AsyncGrad async def test_path_params_retrieve_results(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"): await async_client.agents.evaluation_runs.with_raw_response.retrieve_results( - prompt_id=1, + prompt_id=0, evaluation_run_uuid="", ) diff --git a/tests/api_resources/agents/test_evaluation_test_cases.py b/tests/api_resources/agents/test_evaluation_test_cases.py index e9083ba3..a0b5ee77 100644 --- a/tests/api_resources/agents/test_evaluation_test_cases.py +++ b/tests/api_resources/agents/test_evaluation_test_cases.py @@ -33,17 +33,16 @@ def test_method_create(self, client: GradientAI) -> None: @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: evaluation_test_case = client.agents.evaluation_test_cases.create( - dataset_uuid='"123e4567-e89b-12d3-a456-426614174000"', - description='"example string"', - metrics=["example string"], - name='"example name"', + dataset_uuid="dataset_uuid", + description="description", + metrics=["string"], + name="name", star_metric={ - "metric_uuid": '"123e4567-e89b-12d3-a456-426614174000"', - "name": '"example name"', - "success_threshold": 123, - "success_threshold_pct": 123, + "metric_uuid": "metric_uuid", + "name": "name", + "success_threshold_pct": 0, }, - workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', + workspace_uuid="workspace_uuid", ) assert_matches_type(EvaluationTestCaseCreateResponse, evaluation_test_case, path=["response"]) @@ -73,7 +72,7 @@ def test_streaming_response_create(self, client: GradientAI) -> None: @parametrize def test_method_retrieve(self, client: GradientAI) -> None: evaluation_test_case = client.agents.evaluation_test_cases.retrieve( - test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', + test_case_uuid="test_case_uuid", ) assert_matches_type(EvaluationTestCaseRetrieveResponse, evaluation_test_case, path=["response"]) @@ -81,7 +80,7 @@ def test_method_retrieve(self, client: GradientAI) -> None: @parametrize def test_method_retrieve_with_all_params(self, client: GradientAI) -> None: evaluation_test_case = client.agents.evaluation_test_cases.retrieve( - test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', + test_case_uuid="test_case_uuid", evaluation_test_case_version=0, ) assert_matches_type(EvaluationTestCaseRetrieveResponse, evaluation_test_case, path=["response"]) @@ -90,7 +89,7 @@ def test_method_retrieve_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_retrieve(self, client: GradientAI) -> None: response = client.agents.evaluation_test_cases.with_raw_response.retrieve( - test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', + test_case_uuid="test_case_uuid", ) assert response.is_closed is True @@ -102,7 +101,7 @@ def test_raw_response_retrieve(self, client: GradientAI) -> None: @parametrize def test_streaming_response_retrieve(self, client: GradientAI) -> None: with client.agents.evaluation_test_cases.with_streaming_response.retrieve( - test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', + test_case_uuid="test_case_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -124,7 +123,7 @@ def test_path_params_retrieve(self, client: GradientAI) -> None: @parametrize def test_method_update(self, client: GradientAI) -> None: evaluation_test_case = client.agents.evaluation_test_cases.update( - path_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_test_case_uuid="test_case_uuid", ) assert_matches_type(EvaluationTestCaseUpdateResponse, evaluation_test_case, path=["response"]) @@ -132,18 +131,17 @@ def test_method_update(self, client: GradientAI) -> None: @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: evaluation_test_case = client.agents.evaluation_test_cases.update( - path_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', - dataset_uuid='"123e4567-e89b-12d3-a456-426614174000"', - description='"example string"', - metrics={"metric_uuids": ["example string"]}, - name='"example name"', + path_test_case_uuid="test_case_uuid", + dataset_uuid="dataset_uuid", + description="description", + metrics={"metric_uuids": ["string"]}, + name="name", star_metric={ - "metric_uuid": '"123e4567-e89b-12d3-a456-426614174000"', - "name": '"example name"', - "success_threshold": 123, - "success_threshold_pct": 123, + "metric_uuid": "metric_uuid", + "name": "name", + "success_threshold_pct": 0, }, - body_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', + body_test_case_uuid="test_case_uuid", ) assert_matches_type(EvaluationTestCaseUpdateResponse, evaluation_test_case, path=["response"]) @@ -151,7 +149,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_update(self, client: GradientAI) -> None: response = client.agents.evaluation_test_cases.with_raw_response.update( - path_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_test_case_uuid="test_case_uuid", ) assert response.is_closed is True @@ -163,7 +161,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: with client.agents.evaluation_test_cases.with_streaming_response.update( - path_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_test_case_uuid="test_case_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -213,7 +211,7 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @parametrize def test_method_list_evaluation_runs(self, client: GradientAI) -> None: evaluation_test_case = client.agents.evaluation_test_cases.list_evaluation_runs( - evaluation_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', + evaluation_test_case_uuid="evaluation_test_case_uuid", ) assert_matches_type(EvaluationTestCaseListEvaluationRunsResponse, evaluation_test_case, path=["response"]) @@ -221,7 +219,7 @@ def test_method_list_evaluation_runs(self, client: GradientAI) -> None: @parametrize def test_method_list_evaluation_runs_with_all_params(self, client: GradientAI) -> None: evaluation_test_case = client.agents.evaluation_test_cases.list_evaluation_runs( - evaluation_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', + evaluation_test_case_uuid="evaluation_test_case_uuid", evaluation_test_case_version=0, ) assert_matches_type(EvaluationTestCaseListEvaluationRunsResponse, evaluation_test_case, path=["response"]) @@ -230,7 +228,7 @@ def test_method_list_evaluation_runs_with_all_params(self, client: GradientAI) - @parametrize def test_raw_response_list_evaluation_runs(self, client: GradientAI) -> None: response = client.agents.evaluation_test_cases.with_raw_response.list_evaluation_runs( - evaluation_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', + evaluation_test_case_uuid="evaluation_test_case_uuid", ) assert response.is_closed is True @@ -242,7 +240,7 @@ def test_raw_response_list_evaluation_runs(self, client: GradientAI) -> None: @parametrize def test_streaming_response_list_evaluation_runs(self, client: GradientAI) -> None: with client.agents.evaluation_test_cases.with_streaming_response.list_evaluation_runs( - evaluation_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', + evaluation_test_case_uuid="evaluation_test_case_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -278,17 +276,16 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: evaluation_test_case = await async_client.agents.evaluation_test_cases.create( - dataset_uuid='"123e4567-e89b-12d3-a456-426614174000"', - description='"example string"', - metrics=["example string"], - name='"example name"', + dataset_uuid="dataset_uuid", + description="description", + metrics=["string"], + name="name", star_metric={ - "metric_uuid": '"123e4567-e89b-12d3-a456-426614174000"', - "name": '"example name"', - "success_threshold": 123, - "success_threshold_pct": 123, + "metric_uuid": "metric_uuid", + "name": "name", + "success_threshold_pct": 0, }, - workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', + workspace_uuid="workspace_uuid", ) assert_matches_type(EvaluationTestCaseCreateResponse, evaluation_test_case, path=["response"]) @@ -318,7 +315,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> @parametrize async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: evaluation_test_case = await async_client.agents.evaluation_test_cases.retrieve( - test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', + test_case_uuid="test_case_uuid", ) assert_matches_type(EvaluationTestCaseRetrieveResponse, evaluation_test_case, path=["response"]) @@ -326,7 +323,7 @@ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_retrieve_with_all_params(self, async_client: AsyncGradientAI) -> None: evaluation_test_case = await async_client.agents.evaluation_test_cases.retrieve( - test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', + test_case_uuid="test_case_uuid", evaluation_test_case_version=0, ) assert_matches_type(EvaluationTestCaseRetrieveResponse, evaluation_test_case, path=["response"]) @@ -335,7 +332,7 @@ async def test_method_retrieve_with_all_params(self, async_client: AsyncGradient @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.evaluation_test_cases.with_raw_response.retrieve( - test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', + test_case_uuid="test_case_uuid", ) assert response.is_closed is True @@ -347,7 +344,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> Non @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.evaluation_test_cases.with_streaming_response.retrieve( - test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', + test_case_uuid="test_case_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -369,7 +366,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: evaluation_test_case = await async_client.agents.evaluation_test_cases.update( - path_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_test_case_uuid="test_case_uuid", ) assert_matches_type(EvaluationTestCaseUpdateResponse, evaluation_test_case, path=["response"]) @@ -377,18 +374,17 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: evaluation_test_case = await async_client.agents.evaluation_test_cases.update( - path_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', - dataset_uuid='"123e4567-e89b-12d3-a456-426614174000"', - description='"example string"', - metrics={"metric_uuids": ["example string"]}, - name='"example name"', + path_test_case_uuid="test_case_uuid", + dataset_uuid="dataset_uuid", + description="description", + metrics={"metric_uuids": ["string"]}, + name="name", star_metric={ - "metric_uuid": '"123e4567-e89b-12d3-a456-426614174000"', - "name": '"example name"', - "success_threshold": 123, - "success_threshold_pct": 123, + "metric_uuid": "metric_uuid", + "name": "name", + "success_threshold_pct": 0, }, - body_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', + body_test_case_uuid="test_case_uuid", ) assert_matches_type(EvaluationTestCaseUpdateResponse, evaluation_test_case, path=["response"]) @@ -396,7 +392,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.evaluation_test_cases.with_raw_response.update( - path_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_test_case_uuid="test_case_uuid", ) assert response.is_closed is True @@ -408,7 +404,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.evaluation_test_cases.with_streaming_response.update( - path_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_test_case_uuid="test_case_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -458,7 +454,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @parametrize async def test_method_list_evaluation_runs(self, async_client: AsyncGradientAI) -> None: evaluation_test_case = await async_client.agents.evaluation_test_cases.list_evaluation_runs( - evaluation_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', + evaluation_test_case_uuid="evaluation_test_case_uuid", ) assert_matches_type(EvaluationTestCaseListEvaluationRunsResponse, evaluation_test_case, path=["response"]) @@ -466,7 +462,7 @@ async def test_method_list_evaluation_runs(self, async_client: AsyncGradientAI) @parametrize async def test_method_list_evaluation_runs_with_all_params(self, async_client: AsyncGradientAI) -> None: evaluation_test_case = await async_client.agents.evaluation_test_cases.list_evaluation_runs( - evaluation_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', + evaluation_test_case_uuid="evaluation_test_case_uuid", evaluation_test_case_version=0, ) assert_matches_type(EvaluationTestCaseListEvaluationRunsResponse, evaluation_test_case, path=["response"]) @@ -475,7 +471,7 @@ async def test_method_list_evaluation_runs_with_all_params(self, async_client: A @parametrize async def test_raw_response_list_evaluation_runs(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.evaluation_test_cases.with_raw_response.list_evaluation_runs( - evaluation_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', + evaluation_test_case_uuid="evaluation_test_case_uuid", ) assert response.is_closed is True @@ -487,7 +483,7 @@ async def test_raw_response_list_evaluation_runs(self, async_client: AsyncGradie @parametrize async def test_streaming_response_list_evaluation_runs(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.evaluation_test_cases.with_streaming_response.list_evaluation_runs( - evaluation_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', + evaluation_test_case_uuid="evaluation_test_case_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/agents/test_functions.py b/tests/api_resources/agents/test_functions.py index 4390d1d2..5a3693cb 100644 --- a/tests/api_resources/agents/test_functions.py +++ b/tests/api_resources/agents/test_functions.py @@ -25,7 +25,7 @@ class TestFunctions: @parametrize def test_method_create(self, client: GradientAI) -> None: function = client.agents.functions.create( - path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_agent_uuid="agent_uuid", ) assert_matches_type(FunctionCreateResponse, function, path=["response"]) @@ -33,12 +33,12 @@ def test_method_create(self, client: GradientAI) -> None: @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: function = client.agents.functions.create( - path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', - body_agent_uuid='"12345678-1234-1234-1234-123456789012"', - description='"My Function Description"', - faas_name='"my-function"', - faas_namespace='"default"', - function_name='"My Function"', + path_agent_uuid="agent_uuid", + body_agent_uuid="agent_uuid", + description="description", + faas_name="faas_name", + faas_namespace="faas_namespace", + function_name="function_name", input_schema={}, output_schema={}, ) @@ -48,7 +48,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_create(self, client: GradientAI) -> None: response = client.agents.functions.with_raw_response.create( - path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_agent_uuid="agent_uuid", ) assert response.is_closed is True @@ -60,7 +60,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @parametrize def test_streaming_response_create(self, client: GradientAI) -> None: with client.agents.functions.with_streaming_response.create( - path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_agent_uuid="agent_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -82,8 +82,8 @@ def test_path_params_create(self, client: GradientAI) -> None: @parametrize def test_method_update(self, client: GradientAI) -> None: function = client.agents.functions.update( - path_function_uuid='"123e4567-e89b-12d3-a456-426614174000"', - path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_function_uuid="function_uuid", + path_agent_uuid="agent_uuid", ) assert_matches_type(FunctionUpdateResponse, function, path=["response"]) @@ -91,14 +91,14 @@ def test_method_update(self, client: GradientAI) -> None: @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: function = client.agents.functions.update( - path_function_uuid='"123e4567-e89b-12d3-a456-426614174000"', - path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', - body_agent_uuid='"12345678-1234-1234-1234-123456789012"', - description='"My Function Description"', - faas_name='"my-function"', - faas_namespace='"default"', - function_name='"My Function"', - body_function_uuid='"12345678-1234-1234-1234-123456789012"', + path_function_uuid="function_uuid", + path_agent_uuid="agent_uuid", + body_agent_uuid="agent_uuid", + description="description", + faas_name="faas_name", + faas_namespace="faas_namespace", + function_name="function_name", + body_function_uuid="function_uuid", input_schema={}, output_schema={}, ) @@ -108,8 +108,8 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_update(self, client: GradientAI) -> None: response = client.agents.functions.with_raw_response.update( - path_function_uuid='"123e4567-e89b-12d3-a456-426614174000"', - path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_function_uuid="function_uuid", + path_agent_uuid="agent_uuid", ) assert response.is_closed is True @@ -121,8 +121,8 @@ def test_raw_response_update(self, client: GradientAI) -> None: @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: with client.agents.functions.with_streaming_response.update( - path_function_uuid='"123e4567-e89b-12d3-a456-426614174000"', - path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_function_uuid="function_uuid", + path_agent_uuid="agent_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -137,22 +137,22 @@ def test_streaming_response_update(self, client: GradientAI) -> None: def test_path_params_update(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): client.agents.functions.with_raw_response.update( - path_function_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_function_uuid="function_uuid", path_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_function_uuid` but received ''"): client.agents.functions.with_raw_response.update( path_function_uuid="", - path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_agent_uuid="agent_uuid", ) @pytest.mark.skip() @parametrize def test_method_delete(self, client: GradientAI) -> None: function = client.agents.functions.delete( - function_uuid='"123e4567-e89b-12d3-a456-426614174000"', - agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + function_uuid="function_uuid", + agent_uuid="agent_uuid", ) assert_matches_type(FunctionDeleteResponse, function, path=["response"]) @@ -160,8 +160,8 @@ def test_method_delete(self, client: GradientAI) -> None: @parametrize def test_raw_response_delete(self, client: GradientAI) -> None: response = client.agents.functions.with_raw_response.delete( - function_uuid='"123e4567-e89b-12d3-a456-426614174000"', - agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + function_uuid="function_uuid", + agent_uuid="agent_uuid", ) assert response.is_closed is True @@ -173,8 +173,8 @@ def test_raw_response_delete(self, client: GradientAI) -> None: @parametrize def test_streaming_response_delete(self, client: GradientAI) -> None: with client.agents.functions.with_streaming_response.delete( - function_uuid='"123e4567-e89b-12d3-a456-426614174000"', - agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + function_uuid="function_uuid", + agent_uuid="agent_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -189,14 +189,14 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: def test_path_params_delete(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): client.agents.functions.with_raw_response.delete( - function_uuid='"123e4567-e89b-12d3-a456-426614174000"', + function_uuid="function_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `function_uuid` but received ''"): client.agents.functions.with_raw_response.delete( function_uuid="", - agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid="agent_uuid", ) @@ -209,7 +209,7 @@ class TestAsyncFunctions: @parametrize async def test_method_create(self, async_client: AsyncGradientAI) -> None: function = await async_client.agents.functions.create( - path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_agent_uuid="agent_uuid", ) assert_matches_type(FunctionCreateResponse, function, path=["response"]) @@ -217,12 +217,12 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: function = await async_client.agents.functions.create( - path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', - body_agent_uuid='"12345678-1234-1234-1234-123456789012"', - description='"My Function Description"', - faas_name='"my-function"', - faas_namespace='"default"', - function_name='"My Function"', + path_agent_uuid="agent_uuid", + body_agent_uuid="agent_uuid", + description="description", + faas_name="faas_name", + faas_namespace="faas_namespace", + function_name="function_name", input_schema={}, output_schema={}, ) @@ -232,7 +232,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI @parametrize async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.functions.with_raw_response.create( - path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_agent_uuid="agent_uuid", ) assert response.is_closed is True @@ -244,7 +244,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.functions.with_streaming_response.create( - path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_agent_uuid="agent_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -266,8 +266,8 @@ async def test_path_params_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: function = await async_client.agents.functions.update( - path_function_uuid='"123e4567-e89b-12d3-a456-426614174000"', - path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_function_uuid="function_uuid", + path_agent_uuid="agent_uuid", ) assert_matches_type(FunctionUpdateResponse, function, path=["response"]) @@ -275,14 +275,14 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: function = await async_client.agents.functions.update( - path_function_uuid='"123e4567-e89b-12d3-a456-426614174000"', - path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', - body_agent_uuid='"12345678-1234-1234-1234-123456789012"', - description='"My Function Description"', - faas_name='"my-function"', - faas_namespace='"default"', - function_name='"My Function"', - body_function_uuid='"12345678-1234-1234-1234-123456789012"', + path_function_uuid="function_uuid", + path_agent_uuid="agent_uuid", + body_agent_uuid="agent_uuid", + description="description", + faas_name="faas_name", + faas_namespace="faas_namespace", + function_name="function_name", + body_function_uuid="function_uuid", input_schema={}, output_schema={}, ) @@ -292,8 +292,8 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.functions.with_raw_response.update( - path_function_uuid='"123e4567-e89b-12d3-a456-426614174000"', - path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_function_uuid="function_uuid", + path_agent_uuid="agent_uuid", ) assert response.is_closed is True @@ -305,8 +305,8 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.functions.with_streaming_response.update( - path_function_uuid='"123e4567-e89b-12d3-a456-426614174000"', - path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_function_uuid="function_uuid", + path_agent_uuid="agent_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -321,22 +321,22 @@ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): await async_client.agents.functions.with_raw_response.update( - path_function_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_function_uuid="function_uuid", path_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_function_uuid` but received ''"): await async_client.agents.functions.with_raw_response.update( path_function_uuid="", - path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_agent_uuid="agent_uuid", ) @pytest.mark.skip() @parametrize async def test_method_delete(self, async_client: AsyncGradientAI) -> None: function = await async_client.agents.functions.delete( - function_uuid='"123e4567-e89b-12d3-a456-426614174000"', - agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + function_uuid="function_uuid", + agent_uuid="agent_uuid", ) assert_matches_type(FunctionDeleteResponse, function, path=["response"]) @@ -344,8 +344,8 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.functions.with_raw_response.delete( - function_uuid='"123e4567-e89b-12d3-a456-426614174000"', - agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + function_uuid="function_uuid", + agent_uuid="agent_uuid", ) assert response.is_closed is True @@ -357,8 +357,8 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.functions.with_streaming_response.delete( - function_uuid='"123e4567-e89b-12d3-a456-426614174000"', - agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + function_uuid="function_uuid", + agent_uuid="agent_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -373,12 +373,12 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): await async_client.agents.functions.with_raw_response.delete( - function_uuid='"123e4567-e89b-12d3-a456-426614174000"', + function_uuid="function_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `function_uuid` but received ''"): await async_client.agents.functions.with_raw_response.delete( function_uuid="", - agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid="agent_uuid", ) diff --git a/tests/api_resources/agents/test_knowledge_bases.py b/tests/api_resources/agents/test_knowledge_bases.py index 2ac20d89..e62c05ff 100644 --- a/tests/api_resources/agents/test_knowledge_bases.py +++ b/tests/api_resources/agents/test_knowledge_bases.py @@ -63,8 +63,8 @@ def test_path_params_attach(self, client: GradientAI) -> None: @parametrize def test_method_attach_single(self, client: GradientAI) -> None: knowledge_base = client.agents.knowledge_bases.attach_single( - knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', - agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="agent_uuid", ) assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) @@ -72,8 +72,8 @@ def test_method_attach_single(self, client: GradientAI) -> None: @parametrize def test_raw_response_attach_single(self, client: GradientAI) -> None: response = client.agents.knowledge_bases.with_raw_response.attach_single( - knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', - agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="agent_uuid", ) assert response.is_closed is True @@ -85,8 +85,8 @@ def test_raw_response_attach_single(self, client: GradientAI) -> None: @parametrize def test_streaming_response_attach_single(self, client: GradientAI) -> None: with client.agents.knowledge_bases.with_streaming_response.attach_single( - knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', - agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="agent_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -101,22 +101,22 @@ def test_streaming_response_attach_single(self, client: GradientAI) -> None: def test_path_params_attach_single(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): client.agents.knowledge_bases.with_raw_response.attach_single( - knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + knowledge_base_uuid="knowledge_base_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): client.agents.knowledge_bases.with_raw_response.attach_single( knowledge_base_uuid="", - agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid="agent_uuid", ) @pytest.mark.skip() @parametrize def test_method_detach(self, client: GradientAI) -> None: knowledge_base = client.agents.knowledge_bases.detach( - knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', - agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="agent_uuid", ) assert_matches_type(KnowledgeBaseDetachResponse, knowledge_base, path=["response"]) @@ -124,8 +124,8 @@ def test_method_detach(self, client: GradientAI) -> None: @parametrize def test_raw_response_detach(self, client: GradientAI) -> None: response = client.agents.knowledge_bases.with_raw_response.detach( - knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', - agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="agent_uuid", ) assert response.is_closed is True @@ -137,8 +137,8 @@ def test_raw_response_detach(self, client: GradientAI) -> None: @parametrize def test_streaming_response_detach(self, client: GradientAI) -> None: with client.agents.knowledge_bases.with_streaming_response.detach( - knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', - agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="agent_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -153,14 +153,14 @@ def test_streaming_response_detach(self, client: GradientAI) -> None: def test_path_params_detach(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): client.agents.knowledge_bases.with_raw_response.detach( - knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + knowledge_base_uuid="knowledge_base_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): client.agents.knowledge_bases.with_raw_response.detach( knowledge_base_uuid="", - agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid="agent_uuid", ) @@ -215,8 +215,8 @@ async def test_path_params_attach(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_attach_single(self, async_client: AsyncGradientAI) -> None: knowledge_base = await async_client.agents.knowledge_bases.attach_single( - knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', - agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="agent_uuid", ) assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) @@ -224,8 +224,8 @@ async def test_method_attach_single(self, async_client: AsyncGradientAI) -> None @parametrize async def test_raw_response_attach_single(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.knowledge_bases.with_raw_response.attach_single( - knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', - agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="agent_uuid", ) assert response.is_closed is True @@ -237,8 +237,8 @@ async def test_raw_response_attach_single(self, async_client: AsyncGradientAI) - @parametrize async def test_streaming_response_attach_single(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.knowledge_bases.with_streaming_response.attach_single( - knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', - agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="agent_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -253,22 +253,22 @@ async def test_streaming_response_attach_single(self, async_client: AsyncGradien async def test_path_params_attach_single(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): await async_client.agents.knowledge_bases.with_raw_response.attach_single( - knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + knowledge_base_uuid="knowledge_base_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): await async_client.agents.knowledge_bases.with_raw_response.attach_single( knowledge_base_uuid="", - agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid="agent_uuid", ) @pytest.mark.skip() @parametrize async def test_method_detach(self, async_client: AsyncGradientAI) -> None: knowledge_base = await async_client.agents.knowledge_bases.detach( - knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', - agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="agent_uuid", ) assert_matches_type(KnowledgeBaseDetachResponse, knowledge_base, path=["response"]) @@ -276,8 +276,8 @@ async def test_method_detach(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_raw_response_detach(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.knowledge_bases.with_raw_response.detach( - knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', - agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="agent_uuid", ) assert response.is_closed is True @@ -289,8 +289,8 @@ async def test_raw_response_detach(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_detach(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.knowledge_bases.with_streaming_response.detach( - knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', - agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + knowledge_base_uuid="knowledge_base_uuid", + agent_uuid="agent_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -305,12 +305,12 @@ async def test_streaming_response_detach(self, async_client: AsyncGradientAI) -> async def test_path_params_detach(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): await async_client.agents.knowledge_bases.with_raw_response.detach( - knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + knowledge_base_uuid="knowledge_base_uuid", agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): await async_client.agents.knowledge_bases.with_raw_response.detach( knowledge_base_uuid="", - agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid="agent_uuid", ) diff --git a/tests/api_resources/agents/test_routes.py b/tests/api_resources/agents/test_routes.py index d04e8c90..2e6dfd7b 100644 --- a/tests/api_resources/agents/test_routes.py +++ b/tests/api_resources/agents/test_routes.py @@ -26,8 +26,8 @@ class TestRoutes: @parametrize def test_method_update(self, client: GradientAI) -> None: route = client.agents.routes.update( - path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', - path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", ) assert_matches_type(RouteUpdateResponse, route, path=["response"]) @@ -35,13 +35,13 @@ def test_method_update(self, client: GradientAI) -> None: @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: route = client.agents.routes.update( - path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', - path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', - body_child_agent_uuid='"12345678-1234-1234-1234-123456789012"', - if_case='"use this to get weather information"', - body_parent_agent_uuid='"12345678-1234-1234-1234-123456789012"', - route_name='"weather_route"', - uuid='"12345678-1234-1234-1234-123456789012"', + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + body_child_agent_uuid="child_agent_uuid", + if_case="if_case", + body_parent_agent_uuid="parent_agent_uuid", + route_name="route_name", + uuid="uuid", ) assert_matches_type(RouteUpdateResponse, route, path=["response"]) @@ -49,8 +49,8 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_update(self, client: GradientAI) -> None: response = client.agents.routes.with_raw_response.update( - path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', - path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", ) assert response.is_closed is True @@ -62,8 +62,8 @@ def test_raw_response_update(self, client: GradientAI) -> None: @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: with client.agents.routes.with_streaming_response.update( - path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', - path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -80,22 +80,22 @@ def test_path_params_update(self, client: GradientAI) -> None: ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" ): client.agents.routes.with_raw_response.update( - path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): client.agents.routes.with_raw_response.update( path_child_agent_uuid="", - path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_parent_agent_uuid="parent_agent_uuid", ) @pytest.mark.skip() @parametrize def test_method_delete(self, client: GradientAI) -> None: route = client.agents.routes.delete( - child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', - parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + child_agent_uuid="child_agent_uuid", + parent_agent_uuid="parent_agent_uuid", ) assert_matches_type(RouteDeleteResponse, route, path=["response"]) @@ -103,8 +103,8 @@ def test_method_delete(self, client: GradientAI) -> None: @parametrize def test_raw_response_delete(self, client: GradientAI) -> None: response = client.agents.routes.with_raw_response.delete( - child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', - parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + child_agent_uuid="child_agent_uuid", + parent_agent_uuid="parent_agent_uuid", ) assert response.is_closed is True @@ -116,8 +116,8 @@ def test_raw_response_delete(self, client: GradientAI) -> None: @parametrize def test_streaming_response_delete(self, client: GradientAI) -> None: with client.agents.routes.with_streaming_response.delete( - child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', - parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + child_agent_uuid="child_agent_uuid", + parent_agent_uuid="parent_agent_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -132,22 +132,22 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: def test_path_params_delete(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `parent_agent_uuid` but received ''"): client.agents.routes.with_raw_response.delete( - child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + child_agent_uuid="child_agent_uuid", parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `child_agent_uuid` but received ''"): client.agents.routes.with_raw_response.delete( child_agent_uuid="", - parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + parent_agent_uuid="parent_agent_uuid", ) @pytest.mark.skip() @parametrize def test_method_add(self, client: GradientAI) -> None: route = client.agents.routes.add( - path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', - path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", ) assert_matches_type(RouteAddResponse, route, path=["response"]) @@ -155,12 +155,12 @@ def test_method_add(self, client: GradientAI) -> None: @parametrize def test_method_add_with_all_params(self, client: GradientAI) -> None: route = client.agents.routes.add( - path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', - path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', - body_child_agent_uuid='"12345678-1234-1234-1234-123456789012"', - if_case='"use this to get weather information"', - body_parent_agent_uuid='"12345678-1234-1234-1234-123456789012"', - route_name='"weather_route"', + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + body_child_agent_uuid="child_agent_uuid", + if_case="if_case", + body_parent_agent_uuid="parent_agent_uuid", + route_name="route_name", ) assert_matches_type(RouteAddResponse, route, path=["response"]) @@ -168,8 +168,8 @@ def test_method_add_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_add(self, client: GradientAI) -> None: response = client.agents.routes.with_raw_response.add( - path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', - path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", ) assert response.is_closed is True @@ -181,8 +181,8 @@ def test_raw_response_add(self, client: GradientAI) -> None: @parametrize def test_streaming_response_add(self, client: GradientAI) -> None: with client.agents.routes.with_streaming_response.add( - path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', - path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -199,14 +199,14 @@ def test_path_params_add(self, client: GradientAI) -> None: ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" ): client.agents.routes.with_raw_response.add( - path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): client.agents.routes.with_raw_response.add( path_child_agent_uuid="", - path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_parent_agent_uuid="parent_agent_uuid", ) @pytest.mark.skip() @@ -261,8 +261,8 @@ class TestAsyncRoutes: @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: route = await async_client.agents.routes.update( - path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', - path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", ) assert_matches_type(RouteUpdateResponse, route, path=["response"]) @@ -270,13 +270,13 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: route = await async_client.agents.routes.update( - path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', - path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', - body_child_agent_uuid='"12345678-1234-1234-1234-123456789012"', - if_case='"use this to get weather information"', - body_parent_agent_uuid='"12345678-1234-1234-1234-123456789012"', - route_name='"weather_route"', - uuid='"12345678-1234-1234-1234-123456789012"', + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + body_child_agent_uuid="child_agent_uuid", + if_case="if_case", + body_parent_agent_uuid="parent_agent_uuid", + route_name="route_name", + uuid="uuid", ) assert_matches_type(RouteUpdateResponse, route, path=["response"]) @@ -284,8 +284,8 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.routes.with_raw_response.update( - path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', - path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", ) assert response.is_closed is True @@ -297,8 +297,8 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.routes.with_streaming_response.update( - path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', - path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -315,22 +315,22 @@ async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" ): await async_client.agents.routes.with_raw_response.update( - path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): await async_client.agents.routes.with_raw_response.update( path_child_agent_uuid="", - path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_parent_agent_uuid="parent_agent_uuid", ) @pytest.mark.skip() @parametrize async def test_method_delete(self, async_client: AsyncGradientAI) -> None: route = await async_client.agents.routes.delete( - child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', - parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + child_agent_uuid="child_agent_uuid", + parent_agent_uuid="parent_agent_uuid", ) assert_matches_type(RouteDeleteResponse, route, path=["response"]) @@ -338,8 +338,8 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.routes.with_raw_response.delete( - child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', - parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + child_agent_uuid="child_agent_uuid", + parent_agent_uuid="parent_agent_uuid", ) assert response.is_closed is True @@ -351,8 +351,8 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.routes.with_streaming_response.delete( - child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', - parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + child_agent_uuid="child_agent_uuid", + parent_agent_uuid="parent_agent_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -367,22 +367,22 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `parent_agent_uuid` but received ''"): await async_client.agents.routes.with_raw_response.delete( - child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + child_agent_uuid="child_agent_uuid", parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `child_agent_uuid` but received ''"): await async_client.agents.routes.with_raw_response.delete( child_agent_uuid="", - parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + parent_agent_uuid="parent_agent_uuid", ) @pytest.mark.skip() @parametrize async def test_method_add(self, async_client: AsyncGradientAI) -> None: route = await async_client.agents.routes.add( - path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', - path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", ) assert_matches_type(RouteAddResponse, route, path=["response"]) @@ -390,12 +390,12 @@ async def test_method_add(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_add_with_all_params(self, async_client: AsyncGradientAI) -> None: route = await async_client.agents.routes.add( - path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', - path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', - body_child_agent_uuid='"12345678-1234-1234-1234-123456789012"', - if_case='"use this to get weather information"', - body_parent_agent_uuid='"12345678-1234-1234-1234-123456789012"', - route_name='"weather_route"', + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", + body_child_agent_uuid="child_agent_uuid", + if_case="if_case", + body_parent_agent_uuid="parent_agent_uuid", + route_name="route_name", ) assert_matches_type(RouteAddResponse, route, path=["response"]) @@ -403,8 +403,8 @@ async def test_method_add_with_all_params(self, async_client: AsyncGradientAI) - @parametrize async def test_raw_response_add(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.routes.with_raw_response.add( - path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', - path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", ) assert response.is_closed is True @@ -416,8 +416,8 @@ async def test_raw_response_add(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_add(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.routes.with_streaming_response.add( - path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', - path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_child_agent_uuid="child_agent_uuid", + path_parent_agent_uuid="parent_agent_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -434,14 +434,14 @@ async def test_path_params_add(self, async_client: AsyncGradientAI) -> None: ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" ): await async_client.agents.routes.with_raw_response.add( - path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_child_agent_uuid="child_agent_uuid", path_parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): await async_client.agents.routes.with_raw_response.add( path_child_agent_uuid="", - path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_parent_agent_uuid="parent_agent_uuid", ) @pytest.mark.skip() diff --git a/tests/api_resources/agents/test_versions.py b/tests/api_resources/agents/test_versions.py index d6151470..79f73672 100644 --- a/tests/api_resources/agents/test_versions.py +++ b/tests/api_resources/agents/test_versions.py @@ -24,7 +24,7 @@ class TestVersions: @parametrize def test_method_update(self, client: GradientAI) -> None: version = client.agents.versions.update( - path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_uuid="uuid", ) assert_matches_type(VersionUpdateResponse, version, path=["response"]) @@ -32,9 +32,9 @@ def test_method_update(self, client: GradientAI) -> None: @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: version = client.agents.versions.update( - path_uuid='"123e4567-e89b-12d3-a456-426614174000"', - body_uuid='"12345678-1234-1234-1234-123456789012"', - version_hash="c3658d8b5c05494cd03ce042926ef08157889ed54b1b74b5ee0b3d66dcee4b73", + path_uuid="uuid", + body_uuid="uuid", + version_hash="version_hash", ) assert_matches_type(VersionUpdateResponse, version, path=["response"]) @@ -42,7 +42,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_update(self, client: GradientAI) -> None: response = client.agents.versions.with_raw_response.update( - path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_uuid="uuid", ) assert response.is_closed is True @@ -54,7 +54,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: with client.agents.versions.with_streaming_response.update( - path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_uuid="uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -76,7 +76,7 @@ def test_path_params_update(self, client: GradientAI) -> None: @parametrize def test_method_list(self, client: GradientAI) -> None: version = client.agents.versions.list( - uuid='"123e4567-e89b-12d3-a456-426614174000"', + uuid="uuid", ) assert_matches_type(VersionListResponse, version, path=["response"]) @@ -84,7 +84,7 @@ def test_method_list(self, client: GradientAI) -> None: @parametrize def test_method_list_with_all_params(self, client: GradientAI) -> None: version = client.agents.versions.list( - uuid='"123e4567-e89b-12d3-a456-426614174000"', + uuid="uuid", page=0, per_page=0, ) @@ -94,7 +94,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_list(self, client: GradientAI) -> None: response = client.agents.versions.with_raw_response.list( - uuid='"123e4567-e89b-12d3-a456-426614174000"', + uuid="uuid", ) assert response.is_closed is True @@ -106,7 +106,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @parametrize def test_streaming_response_list(self, client: GradientAI) -> None: with client.agents.versions.with_streaming_response.list( - uuid='"123e4567-e89b-12d3-a456-426614174000"', + uuid="uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -134,7 +134,7 @@ class TestAsyncVersions: @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: version = await async_client.agents.versions.update( - path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_uuid="uuid", ) assert_matches_type(VersionUpdateResponse, version, path=["response"]) @@ -142,9 +142,9 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: version = await async_client.agents.versions.update( - path_uuid='"123e4567-e89b-12d3-a456-426614174000"', - body_uuid='"12345678-1234-1234-1234-123456789012"', - version_hash="c3658d8b5c05494cd03ce042926ef08157889ed54b1b74b5ee0b3d66dcee4b73", + path_uuid="uuid", + body_uuid="uuid", + version_hash="version_hash", ) assert_matches_type(VersionUpdateResponse, version, path=["response"]) @@ -152,7 +152,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.versions.with_raw_response.update( - path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_uuid="uuid", ) assert response.is_closed is True @@ -164,7 +164,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.versions.with_streaming_response.update( - path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_uuid="uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -186,7 +186,7 @@ async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_list(self, async_client: AsyncGradientAI) -> None: version = await async_client.agents.versions.list( - uuid='"123e4567-e89b-12d3-a456-426614174000"', + uuid="uuid", ) assert_matches_type(VersionListResponse, version, path=["response"]) @@ -194,7 +194,7 @@ async def test_method_list(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: version = await async_client.agents.versions.list( - uuid='"123e4567-e89b-12d3-a456-426614174000"', + uuid="uuid", page=0, per_page=0, ) @@ -204,7 +204,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @parametrize async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.versions.with_raw_response.list( - uuid='"123e4567-e89b-12d3-a456-426614174000"', + uuid="uuid", ) assert response.is_closed is True @@ -216,7 +216,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.versions.with_streaming_response.list( - uuid='"123e4567-e89b-12d3-a456-426614174000"', + uuid="uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index 46c8b431..25b8419a 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -54,17 +54,6 @@ def test_method_create_with_all_params_overload_1(self, client: GradientAI) -> N stream=False, stream_options={"include_usage": True}, temperature=1, - tool_choice="none", - tools=[ - { - "function": { - "name": "name", - "description": "description", - "parameters": {"foo": "bar"}, - }, - "type": "function", - } - ], top_logprobs=0, top_p=1, user="user-1234", @@ -147,17 +136,6 @@ def test_method_create_with_all_params_overload_2(self, client: GradientAI) -> N stop="\n", stream_options={"include_usage": True}, temperature=1, - tool_choice="none", - tools=[ - { - "function": { - "name": "name", - "description": "description", - "parameters": {"foo": "bar"}, - }, - "type": "function", - } - ], top_logprobs=0, top_p=1, user="user-1234", @@ -246,17 +224,6 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn stream=False, stream_options={"include_usage": True}, temperature=1, - tool_choice="none", - tools=[ - { - "function": { - "name": "name", - "description": "description", - "parameters": {"foo": "bar"}, - }, - "type": "function", - } - ], top_logprobs=0, top_p=1, user="user-1234", @@ -339,17 +306,6 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn stop="\n", stream_options={"include_usage": True}, temperature=1, - tool_choice="none", - tools=[ - { - "function": { - "name": "name", - "description": "description", - "parameters": {"foo": "bar"}, - }, - "type": "function", - } - ], top_logprobs=0, top_p=1, user="user-1234", diff --git a/tests/api_resources/droplets/__init__.py b/tests/api_resources/droplets/__init__.py deleted file mode 100644 index fd8019a9..00000000 --- a/tests/api_resources/droplets/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/droplets/test_actions.py b/tests/api_resources/droplets/test_actions.py deleted file mode 100644 index 33ecb60c..00000000 --- a/tests/api_resources/droplets/test_actions.py +++ /dev/null @@ -1,1209 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from gradientai import GradientAI, AsyncGradientAI -from tests.utils import assert_matches_type -from gradientai.types.droplets import ( - ActionListResponse, - ActionInitiateResponse, - ActionRetrieveResponse, - ActionBulkInitiateResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestActions: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: GradientAI) -> None: - action = client.droplets.actions.retrieve( - action_id=36804636, - droplet_id=3164444, - ) - assert_matches_type(ActionRetrieveResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: GradientAI) -> None: - response = client.droplets.actions.with_raw_response.retrieve( - action_id=36804636, - droplet_id=3164444, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = response.parse() - assert_matches_type(ActionRetrieveResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: GradientAI) -> None: - with client.droplets.actions.with_streaming_response.retrieve( - action_id=36804636, - droplet_id=3164444, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = response.parse() - assert_matches_type(ActionRetrieveResponse, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: GradientAI) -> None: - action = client.droplets.actions.list( - droplet_id=3164444, - ) - assert_matches_type(ActionListResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: - action = client.droplets.actions.list( - droplet_id=3164444, - page=1, - per_page=1, - ) - assert_matches_type(ActionListResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: - response = client.droplets.actions.with_raw_response.list( - droplet_id=3164444, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = response.parse() - assert_matches_type(ActionListResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: - with client.droplets.actions.with_streaming_response.list( - droplet_id=3164444, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = response.parse() - assert_matches_type(ActionListResponse, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_bulk_initiate_overload_1(self, client: GradientAI) -> None: - action = client.droplets.actions.bulk_initiate( - type="reboot", - ) - assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_bulk_initiate_with_all_params_overload_1(self, client: GradientAI) -> None: - action = client.droplets.actions.bulk_initiate( - type="reboot", - tag_name="tag_name", - ) - assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_bulk_initiate_overload_1(self, client: GradientAI) -> None: - response = client.droplets.actions.with_raw_response.bulk_initiate( - type="reboot", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = response.parse() - assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_bulk_initiate_overload_1(self, client: GradientAI) -> None: - with client.droplets.actions.with_streaming_response.bulk_initiate( - type="reboot", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = response.parse() - assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_bulk_initiate_overload_2(self, client: GradientAI) -> None: - action = client.droplets.actions.bulk_initiate( - type="reboot", - ) - assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_bulk_initiate_with_all_params_overload_2(self, client: GradientAI) -> None: - action = client.droplets.actions.bulk_initiate( - type="reboot", - tag_name="tag_name", - name="Nifty New Snapshot", - ) - assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_bulk_initiate_overload_2(self, client: GradientAI) -> None: - response = client.droplets.actions.with_raw_response.bulk_initiate( - type="reboot", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = response.parse() - assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_bulk_initiate_overload_2(self, client: GradientAI) -> None: - with client.droplets.actions.with_streaming_response.bulk_initiate( - type="reboot", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = response.parse() - assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_initiate_overload_1(self, client: GradientAI) -> None: - action = client.droplets.actions.initiate( - droplet_id=3164444, - type="reboot", - ) - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_initiate_overload_1(self, client: GradientAI) -> None: - response = client.droplets.actions.with_raw_response.initiate( - droplet_id=3164444, - type="reboot", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = response.parse() - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_initiate_overload_1(self, client: GradientAI) -> None: - with client.droplets.actions.with_streaming_response.initiate( - droplet_id=3164444, - type="reboot", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = response.parse() - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_initiate_overload_2(self, client: GradientAI) -> None: - action = client.droplets.actions.initiate( - droplet_id=3164444, - type="enable_backups", - ) - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_initiate_with_all_params_overload_2(self, client: GradientAI) -> None: - action = client.droplets.actions.initiate( - droplet_id=3164444, - type="enable_backups", - backup_policy={ - "hour": 20, - "plan": "daily", - "weekday": "SUN", - }, - ) - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_initiate_overload_2(self, client: GradientAI) -> None: - response = client.droplets.actions.with_raw_response.initiate( - droplet_id=3164444, - type="enable_backups", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = response.parse() - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_initiate_overload_2(self, client: GradientAI) -> None: - with client.droplets.actions.with_streaming_response.initiate( - droplet_id=3164444, - type="enable_backups", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = response.parse() - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_initiate_overload_3(self, client: GradientAI) -> None: - action = client.droplets.actions.initiate( - droplet_id=3164444, - type="enable_backups", - ) - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_initiate_with_all_params_overload_3(self, client: GradientAI) -> None: - action = client.droplets.actions.initiate( - droplet_id=3164444, - type="enable_backups", - backup_policy={ - "hour": 20, - "plan": "weekly", - "weekday": "SUN", - }, - ) - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_initiate_overload_3(self, client: GradientAI) -> None: - response = client.droplets.actions.with_raw_response.initiate( - droplet_id=3164444, - type="enable_backups", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = response.parse() - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_initiate_overload_3(self, client: GradientAI) -> None: - with client.droplets.actions.with_streaming_response.initiate( - droplet_id=3164444, - type="enable_backups", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = response.parse() - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_initiate_overload_4(self, client: GradientAI) -> None: - action = client.droplets.actions.initiate( - droplet_id=3164444, - type="reboot", - ) - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_initiate_with_all_params_overload_4(self, client: GradientAI) -> None: - action = client.droplets.actions.initiate( - droplet_id=3164444, - type="reboot", - image=12389723, - ) - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_initiate_overload_4(self, client: GradientAI) -> None: - response = client.droplets.actions.with_raw_response.initiate( - droplet_id=3164444, - type="reboot", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = response.parse() - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_initiate_overload_4(self, client: GradientAI) -> None: - with client.droplets.actions.with_streaming_response.initiate( - droplet_id=3164444, - type="reboot", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = response.parse() - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_initiate_overload_5(self, client: GradientAI) -> None: - action = client.droplets.actions.initiate( - droplet_id=3164444, - type="reboot", - ) - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_initiate_with_all_params_overload_5(self, client: GradientAI) -> None: - action = client.droplets.actions.initiate( - droplet_id=3164444, - type="reboot", - disk=True, - size="s-2vcpu-2gb", - ) - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_initiate_overload_5(self, client: GradientAI) -> None: - response = client.droplets.actions.with_raw_response.initiate( - droplet_id=3164444, - type="reboot", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = response.parse() - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_initiate_overload_5(self, client: GradientAI) -> None: - with client.droplets.actions.with_streaming_response.initiate( - droplet_id=3164444, - type="reboot", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = response.parse() - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_initiate_overload_6(self, client: GradientAI) -> None: - action = client.droplets.actions.initiate( - droplet_id=3164444, - type="reboot", - ) - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_initiate_with_all_params_overload_6(self, client: GradientAI) -> None: - action = client.droplets.actions.initiate( - droplet_id=3164444, - type="reboot", - image="ubuntu-20-04-x64", - ) - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_initiate_overload_6(self, client: GradientAI) -> None: - response = client.droplets.actions.with_raw_response.initiate( - droplet_id=3164444, - type="reboot", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = response.parse() - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_initiate_overload_6(self, client: GradientAI) -> None: - with client.droplets.actions.with_streaming_response.initiate( - droplet_id=3164444, - type="reboot", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = response.parse() - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_initiate_overload_7(self, client: GradientAI) -> None: - action = client.droplets.actions.initiate( - droplet_id=3164444, - type="reboot", - ) - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_initiate_with_all_params_overload_7(self, client: GradientAI) -> None: - action = client.droplets.actions.initiate( - droplet_id=3164444, - type="reboot", - name="nifty-new-name", - ) - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_initiate_overload_7(self, client: GradientAI) -> None: - response = client.droplets.actions.with_raw_response.initiate( - droplet_id=3164444, - type="reboot", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = response.parse() - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_initiate_overload_7(self, client: GradientAI) -> None: - with client.droplets.actions.with_streaming_response.initiate( - droplet_id=3164444, - type="reboot", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = response.parse() - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_initiate_overload_8(self, client: GradientAI) -> None: - action = client.droplets.actions.initiate( - droplet_id=3164444, - type="reboot", - ) - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_initiate_with_all_params_overload_8(self, client: GradientAI) -> None: - action = client.droplets.actions.initiate( - droplet_id=3164444, - type="reboot", - kernel=12389723, - ) - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_initiate_overload_8(self, client: GradientAI) -> None: - response = client.droplets.actions.with_raw_response.initiate( - droplet_id=3164444, - type="reboot", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = response.parse() - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_initiate_overload_8(self, client: GradientAI) -> None: - with client.droplets.actions.with_streaming_response.initiate( - droplet_id=3164444, - type="reboot", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = response.parse() - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_initiate_overload_9(self, client: GradientAI) -> None: - action = client.droplets.actions.initiate( - droplet_id=3164444, - type="reboot", - ) - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_initiate_with_all_params_overload_9(self, client: GradientAI) -> None: - action = client.droplets.actions.initiate( - droplet_id=3164444, - type="reboot", - name="Nifty New Snapshot", - ) - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_initiate_overload_9(self, client: GradientAI) -> None: - response = client.droplets.actions.with_raw_response.initiate( - droplet_id=3164444, - type="reboot", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = response.parse() - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_initiate_overload_9(self, client: GradientAI) -> None: - with client.droplets.actions.with_streaming_response.initiate( - droplet_id=3164444, - type="reboot", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = response.parse() - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - -class TestAsyncActions: - parametrize = pytest.mark.parametrize( - "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] - ) - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: - action = await async_client.droplets.actions.retrieve( - action_id=36804636, - droplet_id=3164444, - ) - assert_matches_type(ActionRetrieveResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: - response = await async_client.droplets.actions.with_raw_response.retrieve( - action_id=36804636, - droplet_id=3164444, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = await response.parse() - assert_matches_type(ActionRetrieveResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: - async with async_client.droplets.actions.with_streaming_response.retrieve( - action_id=36804636, - droplet_id=3164444, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = await response.parse() - assert_matches_type(ActionRetrieveResponse, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: - action = await async_client.droplets.actions.list( - droplet_id=3164444, - ) - assert_matches_type(ActionListResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: - action = await async_client.droplets.actions.list( - droplet_id=3164444, - page=1, - per_page=1, - ) - assert_matches_type(ActionListResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: - response = await async_client.droplets.actions.with_raw_response.list( - droplet_id=3164444, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = await response.parse() - assert_matches_type(ActionListResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: - async with async_client.droplets.actions.with_streaming_response.list( - droplet_id=3164444, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = await response.parse() - assert_matches_type(ActionListResponse, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_bulk_initiate_overload_1(self, async_client: AsyncGradientAI) -> None: - action = await async_client.droplets.actions.bulk_initiate( - type="reboot", - ) - assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_bulk_initiate_with_all_params_overload_1(self, async_client: AsyncGradientAI) -> None: - action = await async_client.droplets.actions.bulk_initiate( - type="reboot", - tag_name="tag_name", - ) - assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_bulk_initiate_overload_1(self, async_client: AsyncGradientAI) -> None: - response = await async_client.droplets.actions.with_raw_response.bulk_initiate( - type="reboot", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = await response.parse() - assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_bulk_initiate_overload_1(self, async_client: AsyncGradientAI) -> None: - async with async_client.droplets.actions.with_streaming_response.bulk_initiate( - type="reboot", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = await response.parse() - assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_bulk_initiate_overload_2(self, async_client: AsyncGradientAI) -> None: - action = await async_client.droplets.actions.bulk_initiate( - type="reboot", - ) - assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_bulk_initiate_with_all_params_overload_2(self, async_client: AsyncGradientAI) -> None: - action = await async_client.droplets.actions.bulk_initiate( - type="reboot", - tag_name="tag_name", - name="Nifty New Snapshot", - ) - assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_bulk_initiate_overload_2(self, async_client: AsyncGradientAI) -> None: - response = await async_client.droplets.actions.with_raw_response.bulk_initiate( - type="reboot", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = await response.parse() - assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_bulk_initiate_overload_2(self, async_client: AsyncGradientAI) -> None: - async with async_client.droplets.actions.with_streaming_response.bulk_initiate( - type="reboot", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = await response.parse() - assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_initiate_overload_1(self, async_client: AsyncGradientAI) -> None: - action = await async_client.droplets.actions.initiate( - droplet_id=3164444, - type="reboot", - ) - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_initiate_overload_1(self, async_client: AsyncGradientAI) -> None: - response = await async_client.droplets.actions.with_raw_response.initiate( - droplet_id=3164444, - type="reboot", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = await response.parse() - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_initiate_overload_1(self, async_client: AsyncGradientAI) -> None: - async with async_client.droplets.actions.with_streaming_response.initiate( - droplet_id=3164444, - type="reboot", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = await response.parse() - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_initiate_overload_2(self, async_client: AsyncGradientAI) -> None: - action = await async_client.droplets.actions.initiate( - droplet_id=3164444, - type="enable_backups", - ) - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_initiate_with_all_params_overload_2(self, async_client: AsyncGradientAI) -> None: - action = await async_client.droplets.actions.initiate( - droplet_id=3164444, - type="enable_backups", - backup_policy={ - "hour": 20, - "plan": "daily", - "weekday": "SUN", - }, - ) - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_initiate_overload_2(self, async_client: AsyncGradientAI) -> None: - response = await async_client.droplets.actions.with_raw_response.initiate( - droplet_id=3164444, - type="enable_backups", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = await response.parse() - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_initiate_overload_2(self, async_client: AsyncGradientAI) -> None: - async with async_client.droplets.actions.with_streaming_response.initiate( - droplet_id=3164444, - type="enable_backups", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = await response.parse() - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_initiate_overload_3(self, async_client: AsyncGradientAI) -> None: - action = await async_client.droplets.actions.initiate( - droplet_id=3164444, - type="enable_backups", - ) - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_initiate_with_all_params_overload_3(self, async_client: AsyncGradientAI) -> None: - action = await async_client.droplets.actions.initiate( - droplet_id=3164444, - type="enable_backups", - backup_policy={ - "hour": 20, - "plan": "weekly", - "weekday": "SUN", - }, - ) - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_initiate_overload_3(self, async_client: AsyncGradientAI) -> None: - response = await async_client.droplets.actions.with_raw_response.initiate( - droplet_id=3164444, - type="enable_backups", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = await response.parse() - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_initiate_overload_3(self, async_client: AsyncGradientAI) -> None: - async with async_client.droplets.actions.with_streaming_response.initiate( - droplet_id=3164444, - type="enable_backups", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = await response.parse() - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_initiate_overload_4(self, async_client: AsyncGradientAI) -> None: - action = await async_client.droplets.actions.initiate( - droplet_id=3164444, - type="reboot", - ) - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_initiate_with_all_params_overload_4(self, async_client: AsyncGradientAI) -> None: - action = await async_client.droplets.actions.initiate( - droplet_id=3164444, - type="reboot", - image=12389723, - ) - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_initiate_overload_4(self, async_client: AsyncGradientAI) -> None: - response = await async_client.droplets.actions.with_raw_response.initiate( - droplet_id=3164444, - type="reboot", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = await response.parse() - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_initiate_overload_4(self, async_client: AsyncGradientAI) -> None: - async with async_client.droplets.actions.with_streaming_response.initiate( - droplet_id=3164444, - type="reboot", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = await response.parse() - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_initiate_overload_5(self, async_client: AsyncGradientAI) -> None: - action = await async_client.droplets.actions.initiate( - droplet_id=3164444, - type="reboot", - ) - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_initiate_with_all_params_overload_5(self, async_client: AsyncGradientAI) -> None: - action = await async_client.droplets.actions.initiate( - droplet_id=3164444, - type="reboot", - disk=True, - size="s-2vcpu-2gb", - ) - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_initiate_overload_5(self, async_client: AsyncGradientAI) -> None: - response = await async_client.droplets.actions.with_raw_response.initiate( - droplet_id=3164444, - type="reboot", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = await response.parse() - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_initiate_overload_5(self, async_client: AsyncGradientAI) -> None: - async with async_client.droplets.actions.with_streaming_response.initiate( - droplet_id=3164444, - type="reboot", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = await response.parse() - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_initiate_overload_6(self, async_client: AsyncGradientAI) -> None: - action = await async_client.droplets.actions.initiate( - droplet_id=3164444, - type="reboot", - ) - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_initiate_with_all_params_overload_6(self, async_client: AsyncGradientAI) -> None: - action = await async_client.droplets.actions.initiate( - droplet_id=3164444, - type="reboot", - image="ubuntu-20-04-x64", - ) - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_initiate_overload_6(self, async_client: AsyncGradientAI) -> None: - response = await async_client.droplets.actions.with_raw_response.initiate( - droplet_id=3164444, - type="reboot", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = await response.parse() - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_initiate_overload_6(self, async_client: AsyncGradientAI) -> None: - async with async_client.droplets.actions.with_streaming_response.initiate( - droplet_id=3164444, - type="reboot", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = await response.parse() - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_initiate_overload_7(self, async_client: AsyncGradientAI) -> None: - action = await async_client.droplets.actions.initiate( - droplet_id=3164444, - type="reboot", - ) - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_initiate_with_all_params_overload_7(self, async_client: AsyncGradientAI) -> None: - action = await async_client.droplets.actions.initiate( - droplet_id=3164444, - type="reboot", - name="nifty-new-name", - ) - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_initiate_overload_7(self, async_client: AsyncGradientAI) -> None: - response = await async_client.droplets.actions.with_raw_response.initiate( - droplet_id=3164444, - type="reboot", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = await response.parse() - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_initiate_overload_7(self, async_client: AsyncGradientAI) -> None: - async with async_client.droplets.actions.with_streaming_response.initiate( - droplet_id=3164444, - type="reboot", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = await response.parse() - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_initiate_overload_8(self, async_client: AsyncGradientAI) -> None: - action = await async_client.droplets.actions.initiate( - droplet_id=3164444, - type="reboot", - ) - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_initiate_with_all_params_overload_8(self, async_client: AsyncGradientAI) -> None: - action = await async_client.droplets.actions.initiate( - droplet_id=3164444, - type="reboot", - kernel=12389723, - ) - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_initiate_overload_8(self, async_client: AsyncGradientAI) -> None: - response = await async_client.droplets.actions.with_raw_response.initiate( - droplet_id=3164444, - type="reboot", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = await response.parse() - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_initiate_overload_8(self, async_client: AsyncGradientAI) -> None: - async with async_client.droplets.actions.with_streaming_response.initiate( - droplet_id=3164444, - type="reboot", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = await response.parse() - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_initiate_overload_9(self, async_client: AsyncGradientAI) -> None: - action = await async_client.droplets.actions.initiate( - droplet_id=3164444, - type="reboot", - ) - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_initiate_with_all_params_overload_9(self, async_client: AsyncGradientAI) -> None: - action = await async_client.droplets.actions.initiate( - droplet_id=3164444, - type="reboot", - name="Nifty New Snapshot", - ) - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_initiate_overload_9(self, async_client: AsyncGradientAI) -> None: - response = await async_client.droplets.actions.with_raw_response.initiate( - droplet_id=3164444, - type="reboot", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = await response.parse() - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_initiate_overload_9(self, async_client: AsyncGradientAI) -> None: - async with async_client.droplets.actions.with_streaming_response.initiate( - droplet_id=3164444, - type="reboot", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = await response.parse() - assert_matches_type(ActionInitiateResponse, action, path=["response"]) - - assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/droplets/test_autoscale.py b/tests/api_resources/droplets/test_autoscale.py deleted file mode 100644 index c1865864..00000000 --- a/tests/api_resources/droplets/test_autoscale.py +++ /dev/null @@ -1,953 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from gradientai import GradientAI, AsyncGradientAI -from tests.utils import assert_matches_type -from gradientai.types.droplets import ( - AutoscaleListResponse, - AutoscaleCreateResponse, - AutoscaleUpdateResponse, - AutoscaleRetrieveResponse, - AutoscaleListHistoryResponse, - AutoscaleListMembersResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestAutoscale: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create(self, client: GradientAI) -> None: - autoscale = client.droplets.autoscale.create( - config={ - "max_instances": 5, - "min_instances": 1, - }, - droplet_template={ - "image": "ubuntu-20-04-x64", - "region": "nyc3", - "size": "c-2", - "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], - }, - name="my-autoscale-pool", - ) - assert_matches_type(AutoscaleCreateResponse, autoscale, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params(self, client: GradientAI) -> None: - autoscale = client.droplets.autoscale.create( - config={ - "max_instances": 5, - "min_instances": 1, - "cooldown_minutes": 10, - "target_cpu_utilization": 0.5, - "target_memory_utilization": 0.6, - }, - droplet_template={ - "image": "ubuntu-20-04-x64", - "region": "nyc3", - "size": "c-2", - "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], - "ipv6": True, - "name": "example.com", - "project_id": "746c6152-2fa2-11ed-92d3-27aaa54e4988", - "tags": ["env:prod", "web"], - "user_data": "#cloud-config\nruncmd:\n - touch /test.txt\n", - "vpc_uuid": "760e09ef-dc84-11e8-981e-3cfdfeaae000", - "with_droplet_agent": True, - }, - name="my-autoscale-pool", - ) - assert_matches_type(AutoscaleCreateResponse, autoscale, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create(self, client: GradientAI) -> None: - response = client.droplets.autoscale.with_raw_response.create( - config={ - "max_instances": 5, - "min_instances": 1, - }, - droplet_template={ - "image": "ubuntu-20-04-x64", - "region": "nyc3", - "size": "c-2", - "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], - }, - name="my-autoscale-pool", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - autoscale = response.parse() - assert_matches_type(AutoscaleCreateResponse, autoscale, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create(self, client: GradientAI) -> None: - with client.droplets.autoscale.with_streaming_response.create( - config={ - "max_instances": 5, - "min_instances": 1, - }, - droplet_template={ - "image": "ubuntu-20-04-x64", - "region": "nyc3", - "size": "c-2", - "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], - }, - name="my-autoscale-pool", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - autoscale = response.parse() - assert_matches_type(AutoscaleCreateResponse, autoscale, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: GradientAI) -> None: - autoscale = client.droplets.autoscale.retrieve( - "autoscale_pool_id", - ) - assert_matches_type(AutoscaleRetrieveResponse, autoscale, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: GradientAI) -> None: - response = client.droplets.autoscale.with_raw_response.retrieve( - "autoscale_pool_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - autoscale = response.parse() - assert_matches_type(AutoscaleRetrieveResponse, autoscale, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: GradientAI) -> None: - with client.droplets.autoscale.with_streaming_response.retrieve( - "autoscale_pool_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - autoscale = response.parse() - assert_matches_type(AutoscaleRetrieveResponse, autoscale, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): - client.droplets.autoscale.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_update(self, client: GradientAI) -> None: - autoscale = client.droplets.autoscale.update( - autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", - config={"target_number_instances": 2}, - droplet_template={ - "image": "ubuntu-20-04-x64", - "region": "nyc3", - "size": "c-2", - "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], - }, - name="my-autoscale-pool", - ) - assert_matches_type(AutoscaleUpdateResponse, autoscale, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_update_with_all_params(self, client: GradientAI) -> None: - autoscale = client.droplets.autoscale.update( - autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", - config={"target_number_instances": 2}, - droplet_template={ - "image": "ubuntu-20-04-x64", - "region": "nyc3", - "size": "c-2", - "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], - "ipv6": True, - "name": "example.com", - "project_id": "746c6152-2fa2-11ed-92d3-27aaa54e4988", - "tags": ["env:prod", "web"], - "user_data": "#cloud-config\nruncmd:\n - touch /test.txt\n", - "vpc_uuid": "760e09ef-dc84-11e8-981e-3cfdfeaae000", - "with_droplet_agent": True, - }, - name="my-autoscale-pool", - ) - assert_matches_type(AutoscaleUpdateResponse, autoscale, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_update(self, client: GradientAI) -> None: - response = client.droplets.autoscale.with_raw_response.update( - autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", - config={"target_number_instances": 2}, - droplet_template={ - "image": "ubuntu-20-04-x64", - "region": "nyc3", - "size": "c-2", - "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], - }, - name="my-autoscale-pool", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - autoscale = response.parse() - assert_matches_type(AutoscaleUpdateResponse, autoscale, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_update(self, client: GradientAI) -> None: - with client.droplets.autoscale.with_streaming_response.update( - autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", - config={"target_number_instances": 2}, - droplet_template={ - "image": "ubuntu-20-04-x64", - "region": "nyc3", - "size": "c-2", - "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], - }, - name="my-autoscale-pool", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - autoscale = response.parse() - assert_matches_type(AutoscaleUpdateResponse, autoscale, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_update(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): - client.droplets.autoscale.with_raw_response.update( - autoscale_pool_id="", - config={"target_number_instances": 2}, - droplet_template={ - "image": "ubuntu-20-04-x64", - "region": "nyc3", - "size": "c-2", - "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], - }, - name="my-autoscale-pool", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: GradientAI) -> None: - autoscale = client.droplets.autoscale.list() - assert_matches_type(AutoscaleListResponse, autoscale, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: - autoscale = client.droplets.autoscale.list( - name="name", - page=1, - per_page=1, - ) - assert_matches_type(AutoscaleListResponse, autoscale, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: - response = client.droplets.autoscale.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - autoscale = response.parse() - assert_matches_type(AutoscaleListResponse, autoscale, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: - with client.droplets.autoscale.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - autoscale = response.parse() - assert_matches_type(AutoscaleListResponse, autoscale, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_delete(self, client: GradientAI) -> None: - autoscale = client.droplets.autoscale.delete( - "autoscale_pool_id", - ) - assert autoscale is None - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete(self, client: GradientAI) -> None: - response = client.droplets.autoscale.with_raw_response.delete( - "autoscale_pool_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - autoscale = response.parse() - assert autoscale is None - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete(self, client: GradientAI) -> None: - with client.droplets.autoscale.with_streaming_response.delete( - "autoscale_pool_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - autoscale = response.parse() - assert autoscale is None - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_delete(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): - client.droplets.autoscale.with_raw_response.delete( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_delete_dangerous(self, client: GradientAI) -> None: - autoscale = client.droplets.autoscale.delete_dangerous( - autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", - x_dangerous=True, - ) - assert autoscale is None - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete_dangerous(self, client: GradientAI) -> None: - response = client.droplets.autoscale.with_raw_response.delete_dangerous( - autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", - x_dangerous=True, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - autoscale = response.parse() - assert autoscale is None - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete_dangerous(self, client: GradientAI) -> None: - with client.droplets.autoscale.with_streaming_response.delete_dangerous( - autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", - x_dangerous=True, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - autoscale = response.parse() - assert autoscale is None - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_delete_dangerous(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): - client.droplets.autoscale.with_raw_response.delete_dangerous( - autoscale_pool_id="", - x_dangerous=True, - ) - - @pytest.mark.skip() - @parametrize - def test_method_list_history(self, client: GradientAI) -> None: - autoscale = client.droplets.autoscale.list_history( - autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", - ) - assert_matches_type(AutoscaleListHistoryResponse, autoscale, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_history_with_all_params(self, client: GradientAI) -> None: - autoscale = client.droplets.autoscale.list_history( - autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", - page=1, - per_page=1, - ) - assert_matches_type(AutoscaleListHistoryResponse, autoscale, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list_history(self, client: GradientAI) -> None: - response = client.droplets.autoscale.with_raw_response.list_history( - autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - autoscale = response.parse() - assert_matches_type(AutoscaleListHistoryResponse, autoscale, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list_history(self, client: GradientAI) -> None: - with client.droplets.autoscale.with_streaming_response.list_history( - autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - autoscale = response.parse() - assert_matches_type(AutoscaleListHistoryResponse, autoscale, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_list_history(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): - client.droplets.autoscale.with_raw_response.list_history( - autoscale_pool_id="", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list_members(self, client: GradientAI) -> None: - autoscale = client.droplets.autoscale.list_members( - autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", - ) - assert_matches_type(AutoscaleListMembersResponse, autoscale, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_members_with_all_params(self, client: GradientAI) -> None: - autoscale = client.droplets.autoscale.list_members( - autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", - page=1, - per_page=1, - ) - assert_matches_type(AutoscaleListMembersResponse, autoscale, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list_members(self, client: GradientAI) -> None: - response = client.droplets.autoscale.with_raw_response.list_members( - autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - autoscale = response.parse() - assert_matches_type(AutoscaleListMembersResponse, autoscale, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list_members(self, client: GradientAI) -> None: - with client.droplets.autoscale.with_streaming_response.list_members( - autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - autoscale = response.parse() - assert_matches_type(AutoscaleListMembersResponse, autoscale, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_list_members(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): - client.droplets.autoscale.with_raw_response.list_members( - autoscale_pool_id="", - ) - - -class TestAsyncAutoscale: - parametrize = pytest.mark.parametrize( - "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] - ) - - @pytest.mark.skip() - @parametrize - async def test_method_create(self, async_client: AsyncGradientAI) -> None: - autoscale = await async_client.droplets.autoscale.create( - config={ - "max_instances": 5, - "min_instances": 1, - }, - droplet_template={ - "image": "ubuntu-20-04-x64", - "region": "nyc3", - "size": "c-2", - "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], - }, - name="my-autoscale-pool", - ) - assert_matches_type(AutoscaleCreateResponse, autoscale, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: - autoscale = await async_client.droplets.autoscale.create( - config={ - "max_instances": 5, - "min_instances": 1, - "cooldown_minutes": 10, - "target_cpu_utilization": 0.5, - "target_memory_utilization": 0.6, - }, - droplet_template={ - "image": "ubuntu-20-04-x64", - "region": "nyc3", - "size": "c-2", - "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], - "ipv6": True, - "name": "example.com", - "project_id": "746c6152-2fa2-11ed-92d3-27aaa54e4988", - "tags": ["env:prod", "web"], - "user_data": "#cloud-config\nruncmd:\n - touch /test.txt\n", - "vpc_uuid": "760e09ef-dc84-11e8-981e-3cfdfeaae000", - "with_droplet_agent": True, - }, - name="my-autoscale-pool", - ) - assert_matches_type(AutoscaleCreateResponse, autoscale, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: - response = await async_client.droplets.autoscale.with_raw_response.create( - config={ - "max_instances": 5, - "min_instances": 1, - }, - droplet_template={ - "image": "ubuntu-20-04-x64", - "region": "nyc3", - "size": "c-2", - "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], - }, - name="my-autoscale-pool", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - autoscale = await response.parse() - assert_matches_type(AutoscaleCreateResponse, autoscale, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: - async with async_client.droplets.autoscale.with_streaming_response.create( - config={ - "max_instances": 5, - "min_instances": 1, - }, - droplet_template={ - "image": "ubuntu-20-04-x64", - "region": "nyc3", - "size": "c-2", - "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], - }, - name="my-autoscale-pool", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - autoscale = await response.parse() - assert_matches_type(AutoscaleCreateResponse, autoscale, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: - autoscale = await async_client.droplets.autoscale.retrieve( - "autoscale_pool_id", - ) - assert_matches_type(AutoscaleRetrieveResponse, autoscale, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: - response = await async_client.droplets.autoscale.with_raw_response.retrieve( - "autoscale_pool_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - autoscale = await response.parse() - assert_matches_type(AutoscaleRetrieveResponse, autoscale, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: - async with async_client.droplets.autoscale.with_streaming_response.retrieve( - "autoscale_pool_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - autoscale = await response.parse() - assert_matches_type(AutoscaleRetrieveResponse, autoscale, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): - await async_client.droplets.autoscale.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_update(self, async_client: AsyncGradientAI) -> None: - autoscale = await async_client.droplets.autoscale.update( - autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", - config={"target_number_instances": 2}, - droplet_template={ - "image": "ubuntu-20-04-x64", - "region": "nyc3", - "size": "c-2", - "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], - }, - name="my-autoscale-pool", - ) - assert_matches_type(AutoscaleUpdateResponse, autoscale, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: - autoscale = await async_client.droplets.autoscale.update( - autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", - config={"target_number_instances": 2}, - droplet_template={ - "image": "ubuntu-20-04-x64", - "region": "nyc3", - "size": "c-2", - "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], - "ipv6": True, - "name": "example.com", - "project_id": "746c6152-2fa2-11ed-92d3-27aaa54e4988", - "tags": ["env:prod", "web"], - "user_data": "#cloud-config\nruncmd:\n - touch /test.txt\n", - "vpc_uuid": "760e09ef-dc84-11e8-981e-3cfdfeaae000", - "with_droplet_agent": True, - }, - name="my-autoscale-pool", - ) - assert_matches_type(AutoscaleUpdateResponse, autoscale, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: - response = await async_client.droplets.autoscale.with_raw_response.update( - autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", - config={"target_number_instances": 2}, - droplet_template={ - "image": "ubuntu-20-04-x64", - "region": "nyc3", - "size": "c-2", - "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], - }, - name="my-autoscale-pool", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - autoscale = await response.parse() - assert_matches_type(AutoscaleUpdateResponse, autoscale, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: - async with async_client.droplets.autoscale.with_streaming_response.update( - autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", - config={"target_number_instances": 2}, - droplet_template={ - "image": "ubuntu-20-04-x64", - "region": "nyc3", - "size": "c-2", - "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], - }, - name="my-autoscale-pool", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - autoscale = await response.parse() - assert_matches_type(AutoscaleUpdateResponse, autoscale, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): - await async_client.droplets.autoscale.with_raw_response.update( - autoscale_pool_id="", - config={"target_number_instances": 2}, - droplet_template={ - "image": "ubuntu-20-04-x64", - "region": "nyc3", - "size": "c-2", - "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], - }, - name="my-autoscale-pool", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: - autoscale = await async_client.droplets.autoscale.list() - assert_matches_type(AutoscaleListResponse, autoscale, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: - autoscale = await async_client.droplets.autoscale.list( - name="name", - page=1, - per_page=1, - ) - assert_matches_type(AutoscaleListResponse, autoscale, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: - response = await async_client.droplets.autoscale.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - autoscale = await response.parse() - assert_matches_type(AutoscaleListResponse, autoscale, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: - async with async_client.droplets.autoscale.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - autoscale = await response.parse() - assert_matches_type(AutoscaleListResponse, autoscale, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_delete(self, async_client: AsyncGradientAI) -> None: - autoscale = await async_client.droplets.autoscale.delete( - "autoscale_pool_id", - ) - assert autoscale is None - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: - response = await async_client.droplets.autoscale.with_raw_response.delete( - "autoscale_pool_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - autoscale = await response.parse() - assert autoscale is None - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: - async with async_client.droplets.autoscale.with_streaming_response.delete( - "autoscale_pool_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - autoscale = await response.parse() - assert autoscale is None - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): - await async_client.droplets.autoscale.with_raw_response.delete( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_delete_dangerous(self, async_client: AsyncGradientAI) -> None: - autoscale = await async_client.droplets.autoscale.delete_dangerous( - autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", - x_dangerous=True, - ) - assert autoscale is None - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete_dangerous(self, async_client: AsyncGradientAI) -> None: - response = await async_client.droplets.autoscale.with_raw_response.delete_dangerous( - autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", - x_dangerous=True, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - autoscale = await response.parse() - assert autoscale is None - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete_dangerous(self, async_client: AsyncGradientAI) -> None: - async with async_client.droplets.autoscale.with_streaming_response.delete_dangerous( - autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", - x_dangerous=True, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - autoscale = await response.parse() - assert autoscale is None - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_delete_dangerous(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): - await async_client.droplets.autoscale.with_raw_response.delete_dangerous( - autoscale_pool_id="", - x_dangerous=True, - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list_history(self, async_client: AsyncGradientAI) -> None: - autoscale = await async_client.droplets.autoscale.list_history( - autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", - ) - assert_matches_type(AutoscaleListHistoryResponse, autoscale, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_history_with_all_params(self, async_client: AsyncGradientAI) -> None: - autoscale = await async_client.droplets.autoscale.list_history( - autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", - page=1, - per_page=1, - ) - assert_matches_type(AutoscaleListHistoryResponse, autoscale, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list_history(self, async_client: AsyncGradientAI) -> None: - response = await async_client.droplets.autoscale.with_raw_response.list_history( - autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - autoscale = await response.parse() - assert_matches_type(AutoscaleListHistoryResponse, autoscale, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list_history(self, async_client: AsyncGradientAI) -> None: - async with async_client.droplets.autoscale.with_streaming_response.list_history( - autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - autoscale = await response.parse() - assert_matches_type(AutoscaleListHistoryResponse, autoscale, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_list_history(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): - await async_client.droplets.autoscale.with_raw_response.list_history( - autoscale_pool_id="", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list_members(self, async_client: AsyncGradientAI) -> None: - autoscale = await async_client.droplets.autoscale.list_members( - autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", - ) - assert_matches_type(AutoscaleListMembersResponse, autoscale, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_members_with_all_params(self, async_client: AsyncGradientAI) -> None: - autoscale = await async_client.droplets.autoscale.list_members( - autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", - page=1, - per_page=1, - ) - assert_matches_type(AutoscaleListMembersResponse, autoscale, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list_members(self, async_client: AsyncGradientAI) -> None: - response = await async_client.droplets.autoscale.with_raw_response.list_members( - autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - autoscale = await response.parse() - assert_matches_type(AutoscaleListMembersResponse, autoscale, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list_members(self, async_client: AsyncGradientAI) -> None: - async with async_client.droplets.autoscale.with_streaming_response.list_members( - autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - autoscale = await response.parse() - assert_matches_type(AutoscaleListMembersResponse, autoscale, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_list_members(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): - await async_client.droplets.autoscale.with_raw_response.list_members( - autoscale_pool_id="", - ) diff --git a/tests/api_resources/droplets/test_backups.py b/tests/api_resources/droplets/test_backups.py deleted file mode 100644 index f1c18a5f..00000000 --- a/tests/api_resources/droplets/test_backups.py +++ /dev/null @@ -1,315 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from gradientai import GradientAI, AsyncGradientAI -from tests.utils import assert_matches_type -from gradientai.types.droplets import ( - BackupListResponse, - BackupListPoliciesResponse, - BackupRetrievePolicyResponse, - BackupListSupportedPoliciesResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestBackups: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: GradientAI) -> None: - backup = client.droplets.backups.list( - droplet_id=3164444, - ) - assert_matches_type(BackupListResponse, backup, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: - backup = client.droplets.backups.list( - droplet_id=3164444, - page=1, - per_page=1, - ) - assert_matches_type(BackupListResponse, backup, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: - response = client.droplets.backups.with_raw_response.list( - droplet_id=3164444, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - backup = response.parse() - assert_matches_type(BackupListResponse, backup, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: - with client.droplets.backups.with_streaming_response.list( - droplet_id=3164444, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - backup = response.parse() - assert_matches_type(BackupListResponse, backup, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_list_policies(self, client: GradientAI) -> None: - backup = client.droplets.backups.list_policies() - assert_matches_type(BackupListPoliciesResponse, backup, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_policies_with_all_params(self, client: GradientAI) -> None: - backup = client.droplets.backups.list_policies( - page=1, - per_page=1, - ) - assert_matches_type(BackupListPoliciesResponse, backup, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list_policies(self, client: GradientAI) -> None: - response = client.droplets.backups.with_raw_response.list_policies() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - backup = response.parse() - assert_matches_type(BackupListPoliciesResponse, backup, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list_policies(self, client: GradientAI) -> None: - with client.droplets.backups.with_streaming_response.list_policies() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - backup = response.parse() - assert_matches_type(BackupListPoliciesResponse, backup, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_list_supported_policies(self, client: GradientAI) -> None: - backup = client.droplets.backups.list_supported_policies() - assert_matches_type(BackupListSupportedPoliciesResponse, backup, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list_supported_policies(self, client: GradientAI) -> None: - response = client.droplets.backups.with_raw_response.list_supported_policies() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - backup = response.parse() - assert_matches_type(BackupListSupportedPoliciesResponse, backup, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list_supported_policies(self, client: GradientAI) -> None: - with client.droplets.backups.with_streaming_response.list_supported_policies() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - backup = response.parse() - assert_matches_type(BackupListSupportedPoliciesResponse, backup, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_retrieve_policy(self, client: GradientAI) -> None: - backup = client.droplets.backups.retrieve_policy( - 1, - ) - assert_matches_type(BackupRetrievePolicyResponse, backup, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve_policy(self, client: GradientAI) -> None: - response = client.droplets.backups.with_raw_response.retrieve_policy( - 1, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - backup = response.parse() - assert_matches_type(BackupRetrievePolicyResponse, backup, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve_policy(self, client: GradientAI) -> None: - with client.droplets.backups.with_streaming_response.retrieve_policy( - 1, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - backup = response.parse() - assert_matches_type(BackupRetrievePolicyResponse, backup, path=["response"]) - - assert cast(Any, response.is_closed) is True - - -class TestAsyncBackups: - parametrize = pytest.mark.parametrize( - "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: - backup = await async_client.droplets.backups.list( - droplet_id=3164444, - ) - assert_matches_type(BackupListResponse, backup, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: - backup = await async_client.droplets.backups.list( - droplet_id=3164444, - page=1, - per_page=1, - ) - assert_matches_type(BackupListResponse, backup, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: - response = await async_client.droplets.backups.with_raw_response.list( - droplet_id=3164444, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - backup = await response.parse() - assert_matches_type(BackupListResponse, backup, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: - async with async_client.droplets.backups.with_streaming_response.list( - droplet_id=3164444, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - backup = await response.parse() - assert_matches_type(BackupListResponse, backup, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_list_policies(self, async_client: AsyncGradientAI) -> None: - backup = await async_client.droplets.backups.list_policies() - assert_matches_type(BackupListPoliciesResponse, backup, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_policies_with_all_params(self, async_client: AsyncGradientAI) -> None: - backup = await async_client.droplets.backups.list_policies( - page=1, - per_page=1, - ) - assert_matches_type(BackupListPoliciesResponse, backup, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list_policies(self, async_client: AsyncGradientAI) -> None: - response = await async_client.droplets.backups.with_raw_response.list_policies() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - backup = await response.parse() - assert_matches_type(BackupListPoliciesResponse, backup, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list_policies(self, async_client: AsyncGradientAI) -> None: - async with async_client.droplets.backups.with_streaming_response.list_policies() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - backup = await response.parse() - assert_matches_type(BackupListPoliciesResponse, backup, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_list_supported_policies(self, async_client: AsyncGradientAI) -> None: - backup = await async_client.droplets.backups.list_supported_policies() - assert_matches_type(BackupListSupportedPoliciesResponse, backup, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list_supported_policies(self, async_client: AsyncGradientAI) -> None: - response = await async_client.droplets.backups.with_raw_response.list_supported_policies() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - backup = await response.parse() - assert_matches_type(BackupListSupportedPoliciesResponse, backup, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list_supported_policies(self, async_client: AsyncGradientAI) -> None: - async with async_client.droplets.backups.with_streaming_response.list_supported_policies() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - backup = await response.parse() - assert_matches_type(BackupListSupportedPoliciesResponse, backup, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve_policy(self, async_client: AsyncGradientAI) -> None: - backup = await async_client.droplets.backups.retrieve_policy( - 1, - ) - assert_matches_type(BackupRetrievePolicyResponse, backup, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve_policy(self, async_client: AsyncGradientAI) -> None: - response = await async_client.droplets.backups.with_raw_response.retrieve_policy( - 1, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - backup = await response.parse() - assert_matches_type(BackupRetrievePolicyResponse, backup, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve_policy(self, async_client: AsyncGradientAI) -> None: - async with async_client.droplets.backups.with_streaming_response.retrieve_policy( - 1, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - backup = await response.parse() - assert_matches_type(BackupRetrievePolicyResponse, backup, path=["response"]) - - assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/droplets/test_destroy_with_associated_resources.py b/tests/api_resources/droplets/test_destroy_with_associated_resources.py deleted file mode 100644 index 491de054..00000000 --- a/tests/api_resources/droplets/test_destroy_with_associated_resources.py +++ /dev/null @@ -1,429 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from gradientai import GradientAI, AsyncGradientAI -from tests.utils import assert_matches_type -from gradientai.types.droplets import ( - DestroyWithAssociatedResourceListResponse, - DestroyWithAssociatedResourceCheckStatusResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestDestroyWithAssociatedResources: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: GradientAI) -> None: - destroy_with_associated_resource = client.droplets.destroy_with_associated_resources.list( - 1, - ) - assert_matches_type( - DestroyWithAssociatedResourceListResponse, destroy_with_associated_resource, path=["response"] - ) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: - response = client.droplets.destroy_with_associated_resources.with_raw_response.list( - 1, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - destroy_with_associated_resource = response.parse() - assert_matches_type( - DestroyWithAssociatedResourceListResponse, destroy_with_associated_resource, path=["response"] - ) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: - with client.droplets.destroy_with_associated_resources.with_streaming_response.list( - 1, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - destroy_with_associated_resource = response.parse() - assert_matches_type( - DestroyWithAssociatedResourceListResponse, destroy_with_associated_resource, path=["response"] - ) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_check_status(self, client: GradientAI) -> None: - destroy_with_associated_resource = client.droplets.destroy_with_associated_resources.check_status( - 1, - ) - assert_matches_type( - DestroyWithAssociatedResourceCheckStatusResponse, destroy_with_associated_resource, path=["response"] - ) - - @pytest.mark.skip() - @parametrize - def test_raw_response_check_status(self, client: GradientAI) -> None: - response = client.droplets.destroy_with_associated_resources.with_raw_response.check_status( - 1, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - destroy_with_associated_resource = response.parse() - assert_matches_type( - DestroyWithAssociatedResourceCheckStatusResponse, destroy_with_associated_resource, path=["response"] - ) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_check_status(self, client: GradientAI) -> None: - with client.droplets.destroy_with_associated_resources.with_streaming_response.check_status( - 1, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - destroy_with_associated_resource = response.parse() - assert_matches_type( - DestroyWithAssociatedResourceCheckStatusResponse, destroy_with_associated_resource, path=["response"] - ) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_delete_dangerous(self, client: GradientAI) -> None: - destroy_with_associated_resource = client.droplets.destroy_with_associated_resources.delete_dangerous( - droplet_id=3164444, - x_dangerous=True, - ) - assert destroy_with_associated_resource is None - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete_dangerous(self, client: GradientAI) -> None: - response = client.droplets.destroy_with_associated_resources.with_raw_response.delete_dangerous( - droplet_id=3164444, - x_dangerous=True, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - destroy_with_associated_resource = response.parse() - assert destroy_with_associated_resource is None - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete_dangerous(self, client: GradientAI) -> None: - with client.droplets.destroy_with_associated_resources.with_streaming_response.delete_dangerous( - droplet_id=3164444, - x_dangerous=True, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - destroy_with_associated_resource = response.parse() - assert destroy_with_associated_resource is None - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_delete_selective(self, client: GradientAI) -> None: - destroy_with_associated_resource = client.droplets.destroy_with_associated_resources.delete_selective( - droplet_id=3164444, - ) - assert destroy_with_associated_resource is None - - @pytest.mark.skip() - @parametrize - def test_method_delete_selective_with_all_params(self, client: GradientAI) -> None: - destroy_with_associated_resource = client.droplets.destroy_with_associated_resources.delete_selective( - droplet_id=3164444, - floating_ips=["6186916"], - reserved_ips=["6186916"], - snapshots=["61486916"], - volume_snapshots=["edb0478d-7436-11ea-86e6-0a58ac144b91"], - volumes=["ba49449a-7435-11ea-b89e-0a58ac14480f"], - ) - assert destroy_with_associated_resource is None - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete_selective(self, client: GradientAI) -> None: - response = client.droplets.destroy_with_associated_resources.with_raw_response.delete_selective( - droplet_id=3164444, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - destroy_with_associated_resource = response.parse() - assert destroy_with_associated_resource is None - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete_selective(self, client: GradientAI) -> None: - with client.droplets.destroy_with_associated_resources.with_streaming_response.delete_selective( - droplet_id=3164444, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - destroy_with_associated_resource = response.parse() - assert destroy_with_associated_resource is None - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_retry(self, client: GradientAI) -> None: - destroy_with_associated_resource = client.droplets.destroy_with_associated_resources.retry( - 1, - ) - assert destroy_with_associated_resource is None - - @pytest.mark.skip() - @parametrize - def test_raw_response_retry(self, client: GradientAI) -> None: - response = client.droplets.destroy_with_associated_resources.with_raw_response.retry( - 1, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - destroy_with_associated_resource = response.parse() - assert destroy_with_associated_resource is None - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retry(self, client: GradientAI) -> None: - with client.droplets.destroy_with_associated_resources.with_streaming_response.retry( - 1, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - destroy_with_associated_resource = response.parse() - assert destroy_with_associated_resource is None - - assert cast(Any, response.is_closed) is True - - -class TestAsyncDestroyWithAssociatedResources: - parametrize = pytest.mark.parametrize( - "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: - destroy_with_associated_resource = await async_client.droplets.destroy_with_associated_resources.list( - 1, - ) - assert_matches_type( - DestroyWithAssociatedResourceListResponse, destroy_with_associated_resource, path=["response"] - ) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: - response = await async_client.droplets.destroy_with_associated_resources.with_raw_response.list( - 1, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - destroy_with_associated_resource = await response.parse() - assert_matches_type( - DestroyWithAssociatedResourceListResponse, destroy_with_associated_resource, path=["response"] - ) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: - async with async_client.droplets.destroy_with_associated_resources.with_streaming_response.list( - 1, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - destroy_with_associated_resource = await response.parse() - assert_matches_type( - DestroyWithAssociatedResourceListResponse, destroy_with_associated_resource, path=["response"] - ) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_check_status(self, async_client: AsyncGradientAI) -> None: - destroy_with_associated_resource = await async_client.droplets.destroy_with_associated_resources.check_status( - 1, - ) - assert_matches_type( - DestroyWithAssociatedResourceCheckStatusResponse, destroy_with_associated_resource, path=["response"] - ) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_check_status(self, async_client: AsyncGradientAI) -> None: - response = await async_client.droplets.destroy_with_associated_resources.with_raw_response.check_status( - 1, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - destroy_with_associated_resource = await response.parse() - assert_matches_type( - DestroyWithAssociatedResourceCheckStatusResponse, destroy_with_associated_resource, path=["response"] - ) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_check_status(self, async_client: AsyncGradientAI) -> None: - async with async_client.droplets.destroy_with_associated_resources.with_streaming_response.check_status( - 1, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - destroy_with_associated_resource = await response.parse() - assert_matches_type( - DestroyWithAssociatedResourceCheckStatusResponse, destroy_with_associated_resource, path=["response"] - ) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_delete_dangerous(self, async_client: AsyncGradientAI) -> None: - destroy_with_associated_resource = ( - await async_client.droplets.destroy_with_associated_resources.delete_dangerous( - droplet_id=3164444, - x_dangerous=True, - ) - ) - assert destroy_with_associated_resource is None - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete_dangerous(self, async_client: AsyncGradientAI) -> None: - response = await async_client.droplets.destroy_with_associated_resources.with_raw_response.delete_dangerous( - droplet_id=3164444, - x_dangerous=True, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - destroy_with_associated_resource = await response.parse() - assert destroy_with_associated_resource is None - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete_dangerous(self, async_client: AsyncGradientAI) -> None: - async with async_client.droplets.destroy_with_associated_resources.with_streaming_response.delete_dangerous( - droplet_id=3164444, - x_dangerous=True, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - destroy_with_associated_resource = await response.parse() - assert destroy_with_associated_resource is None - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_delete_selective(self, async_client: AsyncGradientAI) -> None: - destroy_with_associated_resource = ( - await async_client.droplets.destroy_with_associated_resources.delete_selective( - droplet_id=3164444, - ) - ) - assert destroy_with_associated_resource is None - - @pytest.mark.skip() - @parametrize - async def test_method_delete_selective_with_all_params(self, async_client: AsyncGradientAI) -> None: - destroy_with_associated_resource = ( - await async_client.droplets.destroy_with_associated_resources.delete_selective( - droplet_id=3164444, - floating_ips=["6186916"], - reserved_ips=["6186916"], - snapshots=["61486916"], - volume_snapshots=["edb0478d-7436-11ea-86e6-0a58ac144b91"], - volumes=["ba49449a-7435-11ea-b89e-0a58ac14480f"], - ) - ) - assert destroy_with_associated_resource is None - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete_selective(self, async_client: AsyncGradientAI) -> None: - response = await async_client.droplets.destroy_with_associated_resources.with_raw_response.delete_selective( - droplet_id=3164444, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - destroy_with_associated_resource = await response.parse() - assert destroy_with_associated_resource is None - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete_selective(self, async_client: AsyncGradientAI) -> None: - async with async_client.droplets.destroy_with_associated_resources.with_streaming_response.delete_selective( - droplet_id=3164444, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - destroy_with_associated_resource = await response.parse() - assert destroy_with_associated_resource is None - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_retry(self, async_client: AsyncGradientAI) -> None: - destroy_with_associated_resource = await async_client.droplets.destroy_with_associated_resources.retry( - 1, - ) - assert destroy_with_associated_resource is None - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retry(self, async_client: AsyncGradientAI) -> None: - response = await async_client.droplets.destroy_with_associated_resources.with_raw_response.retry( - 1, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - destroy_with_associated_resource = await response.parse() - assert destroy_with_associated_resource is None - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retry(self, async_client: AsyncGradientAI) -> None: - async with async_client.droplets.destroy_with_associated_resources.with_streaming_response.retry( - 1, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - destroy_with_associated_resource = await response.parse() - assert destroy_with_associated_resource is None - - assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/firewalls/__init__.py b/tests/api_resources/firewalls/__init__.py deleted file mode 100644 index fd8019a9..00000000 --- a/tests/api_resources/firewalls/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/firewalls/test_droplets.py b/tests/api_resources/firewalls/test_droplets.py deleted file mode 100644 index 3df04735..00000000 --- a/tests/api_resources/firewalls/test_droplets.py +++ /dev/null @@ -1,206 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from gradientai import GradientAI, AsyncGradientAI - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestDroplets: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_add(self, client: GradientAI) -> None: - droplet = client.firewalls.droplets.add( - firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", - droplet_ids=[49696269], - ) - assert droplet is None - - @pytest.mark.skip() - @parametrize - def test_raw_response_add(self, client: GradientAI) -> None: - response = client.firewalls.droplets.with_raw_response.add( - firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", - droplet_ids=[49696269], - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - droplet = response.parse() - assert droplet is None - - @pytest.mark.skip() - @parametrize - def test_streaming_response_add(self, client: GradientAI) -> None: - with client.firewalls.droplets.with_streaming_response.add( - firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", - droplet_ids=[49696269], - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - droplet = response.parse() - assert droplet is None - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_add(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): - client.firewalls.droplets.with_raw_response.add( - firewall_id="", - droplet_ids=[49696269], - ) - - @pytest.mark.skip() - @parametrize - def test_method_remove(self, client: GradientAI) -> None: - droplet = client.firewalls.droplets.remove( - firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", - droplet_ids=[49696269], - ) - assert droplet is None - - @pytest.mark.skip() - @parametrize - def test_raw_response_remove(self, client: GradientAI) -> None: - response = client.firewalls.droplets.with_raw_response.remove( - firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", - droplet_ids=[49696269], - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - droplet = response.parse() - assert droplet is None - - @pytest.mark.skip() - @parametrize - def test_streaming_response_remove(self, client: GradientAI) -> None: - with client.firewalls.droplets.with_streaming_response.remove( - firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", - droplet_ids=[49696269], - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - droplet = response.parse() - assert droplet is None - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_remove(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): - client.firewalls.droplets.with_raw_response.remove( - firewall_id="", - droplet_ids=[49696269], - ) - - -class TestAsyncDroplets: - parametrize = pytest.mark.parametrize( - "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] - ) - - @pytest.mark.skip() - @parametrize - async def test_method_add(self, async_client: AsyncGradientAI) -> None: - droplet = await async_client.firewalls.droplets.add( - firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", - droplet_ids=[49696269], - ) - assert droplet is None - - @pytest.mark.skip() - @parametrize - async def test_raw_response_add(self, async_client: AsyncGradientAI) -> None: - response = await async_client.firewalls.droplets.with_raw_response.add( - firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", - droplet_ids=[49696269], - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - droplet = await response.parse() - assert droplet is None - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_add(self, async_client: AsyncGradientAI) -> None: - async with async_client.firewalls.droplets.with_streaming_response.add( - firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", - droplet_ids=[49696269], - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - droplet = await response.parse() - assert droplet is None - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_add(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): - await async_client.firewalls.droplets.with_raw_response.add( - firewall_id="", - droplet_ids=[49696269], - ) - - @pytest.mark.skip() - @parametrize - async def test_method_remove(self, async_client: AsyncGradientAI) -> None: - droplet = await async_client.firewalls.droplets.remove( - firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", - droplet_ids=[49696269], - ) - assert droplet is None - - @pytest.mark.skip() - @parametrize - async def test_raw_response_remove(self, async_client: AsyncGradientAI) -> None: - response = await async_client.firewalls.droplets.with_raw_response.remove( - firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", - droplet_ids=[49696269], - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - droplet = await response.parse() - assert droplet is None - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_remove(self, async_client: AsyncGradientAI) -> None: - async with async_client.firewalls.droplets.with_streaming_response.remove( - firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", - droplet_ids=[49696269], - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - droplet = await response.parse() - assert droplet is None - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_remove(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): - await async_client.firewalls.droplets.with_raw_response.remove( - firewall_id="", - droplet_ids=[49696269], - ) diff --git a/tests/api_resources/firewalls/test_rules.py b/tests/api_resources/firewalls/test_rules.py deleted file mode 100644 index 2f8a7de0..00000000 --- a/tests/api_resources/firewalls/test_rules.py +++ /dev/null @@ -1,326 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from gradientai import GradientAI, AsyncGradientAI - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestRules: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_add(self, client: GradientAI) -> None: - rule = client.firewalls.rules.add( - firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", - ) - assert rule is None - - @pytest.mark.skip() - @parametrize - def test_method_add_with_all_params(self, client: GradientAI) -> None: - rule = client.firewalls.rules.add( - firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", - inbound_rules=[ - { - "ports": "3306", - "protocol": "tcp", - "sources": { - "addresses": ["1.2.3.4", "18.0.0.0/8"], - "droplet_ids": [49696269], - "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], - "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], - "tags": ["base-image", "prod"], - }, - } - ], - outbound_rules=[ - { - "destinations": { - "addresses": ["1.2.3.4", "18.0.0.0/8"], - "droplet_ids": [49696269], - "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], - "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], - "tags": ["base-image", "prod"], - }, - "ports": "3306", - "protocol": "tcp", - } - ], - ) - assert rule is None - - @pytest.mark.skip() - @parametrize - def test_raw_response_add(self, client: GradientAI) -> None: - response = client.firewalls.rules.with_raw_response.add( - firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - rule = response.parse() - assert rule is None - - @pytest.mark.skip() - @parametrize - def test_streaming_response_add(self, client: GradientAI) -> None: - with client.firewalls.rules.with_streaming_response.add( - firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - rule = response.parse() - assert rule is None - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_add(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): - client.firewalls.rules.with_raw_response.add( - firewall_id="", - ) - - @pytest.mark.skip() - @parametrize - def test_method_remove(self, client: GradientAI) -> None: - rule = client.firewalls.rules.remove( - firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", - ) - assert rule is None - - @pytest.mark.skip() - @parametrize - def test_method_remove_with_all_params(self, client: GradientAI) -> None: - rule = client.firewalls.rules.remove( - firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", - inbound_rules=[ - { - "ports": "3306", - "protocol": "tcp", - "sources": { - "addresses": ["1.2.3.4", "18.0.0.0/8"], - "droplet_ids": [49696269], - "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], - "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], - "tags": ["base-image", "prod"], - }, - } - ], - outbound_rules=[ - { - "destinations": { - "addresses": ["1.2.3.4", "18.0.0.0/8"], - "droplet_ids": [49696269], - "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], - "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], - "tags": ["base-image", "prod"], - }, - "ports": "3306", - "protocol": "tcp", - } - ], - ) - assert rule is None - - @pytest.mark.skip() - @parametrize - def test_raw_response_remove(self, client: GradientAI) -> None: - response = client.firewalls.rules.with_raw_response.remove( - firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - rule = response.parse() - assert rule is None - - @pytest.mark.skip() - @parametrize - def test_streaming_response_remove(self, client: GradientAI) -> None: - with client.firewalls.rules.with_streaming_response.remove( - firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - rule = response.parse() - assert rule is None - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_remove(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): - client.firewalls.rules.with_raw_response.remove( - firewall_id="", - ) - - -class TestAsyncRules: - parametrize = pytest.mark.parametrize( - "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] - ) - - @pytest.mark.skip() - @parametrize - async def test_method_add(self, async_client: AsyncGradientAI) -> None: - rule = await async_client.firewalls.rules.add( - firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", - ) - assert rule is None - - @pytest.mark.skip() - @parametrize - async def test_method_add_with_all_params(self, async_client: AsyncGradientAI) -> None: - rule = await async_client.firewalls.rules.add( - firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", - inbound_rules=[ - { - "ports": "3306", - "protocol": "tcp", - "sources": { - "addresses": ["1.2.3.4", "18.0.0.0/8"], - "droplet_ids": [49696269], - "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], - "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], - "tags": ["base-image", "prod"], - }, - } - ], - outbound_rules=[ - { - "destinations": { - "addresses": ["1.2.3.4", "18.0.0.0/8"], - "droplet_ids": [49696269], - "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], - "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], - "tags": ["base-image", "prod"], - }, - "ports": "3306", - "protocol": "tcp", - } - ], - ) - assert rule is None - - @pytest.mark.skip() - @parametrize - async def test_raw_response_add(self, async_client: AsyncGradientAI) -> None: - response = await async_client.firewalls.rules.with_raw_response.add( - firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - rule = await response.parse() - assert rule is None - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_add(self, async_client: AsyncGradientAI) -> None: - async with async_client.firewalls.rules.with_streaming_response.add( - firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - rule = await response.parse() - assert rule is None - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_add(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): - await async_client.firewalls.rules.with_raw_response.add( - firewall_id="", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_remove(self, async_client: AsyncGradientAI) -> None: - rule = await async_client.firewalls.rules.remove( - firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", - ) - assert rule is None - - @pytest.mark.skip() - @parametrize - async def test_method_remove_with_all_params(self, async_client: AsyncGradientAI) -> None: - rule = await async_client.firewalls.rules.remove( - firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", - inbound_rules=[ - { - "ports": "3306", - "protocol": "tcp", - "sources": { - "addresses": ["1.2.3.4", "18.0.0.0/8"], - "droplet_ids": [49696269], - "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], - "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], - "tags": ["base-image", "prod"], - }, - } - ], - outbound_rules=[ - { - "destinations": { - "addresses": ["1.2.3.4", "18.0.0.0/8"], - "droplet_ids": [49696269], - "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], - "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], - "tags": ["base-image", "prod"], - }, - "ports": "3306", - "protocol": "tcp", - } - ], - ) - assert rule is None - - @pytest.mark.skip() - @parametrize - async def test_raw_response_remove(self, async_client: AsyncGradientAI) -> None: - response = await async_client.firewalls.rules.with_raw_response.remove( - firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - rule = await response.parse() - assert rule is None - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_remove(self, async_client: AsyncGradientAI) -> None: - async with async_client.firewalls.rules.with_streaming_response.remove( - firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - rule = await response.parse() - assert rule is None - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_remove(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): - await async_client.firewalls.rules.with_raw_response.remove( - firewall_id="", - ) diff --git a/tests/api_resources/firewalls/test_tags.py b/tests/api_resources/firewalls/test_tags.py deleted file mode 100644 index 68c8b107..00000000 --- a/tests/api_resources/firewalls/test_tags.py +++ /dev/null @@ -1,206 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from gradientai import GradientAI, AsyncGradientAI - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestTags: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_add(self, client: GradientAI) -> None: - tag = client.firewalls.tags.add( - firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", - tags=["frontend"], - ) - assert tag is None - - @pytest.mark.skip() - @parametrize - def test_raw_response_add(self, client: GradientAI) -> None: - response = client.firewalls.tags.with_raw_response.add( - firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", - tags=["frontend"], - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - tag = response.parse() - assert tag is None - - @pytest.mark.skip() - @parametrize - def test_streaming_response_add(self, client: GradientAI) -> None: - with client.firewalls.tags.with_streaming_response.add( - firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", - tags=["frontend"], - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - tag = response.parse() - assert tag is None - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_add(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): - client.firewalls.tags.with_raw_response.add( - firewall_id="", - tags=["frontend"], - ) - - @pytest.mark.skip() - @parametrize - def test_method_remove(self, client: GradientAI) -> None: - tag = client.firewalls.tags.remove( - firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", - tags=["frontend"], - ) - assert tag is None - - @pytest.mark.skip() - @parametrize - def test_raw_response_remove(self, client: GradientAI) -> None: - response = client.firewalls.tags.with_raw_response.remove( - firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", - tags=["frontend"], - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - tag = response.parse() - assert tag is None - - @pytest.mark.skip() - @parametrize - def test_streaming_response_remove(self, client: GradientAI) -> None: - with client.firewalls.tags.with_streaming_response.remove( - firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", - tags=["frontend"], - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - tag = response.parse() - assert tag is None - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_remove(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): - client.firewalls.tags.with_raw_response.remove( - firewall_id="", - tags=["frontend"], - ) - - -class TestAsyncTags: - parametrize = pytest.mark.parametrize( - "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] - ) - - @pytest.mark.skip() - @parametrize - async def test_method_add(self, async_client: AsyncGradientAI) -> None: - tag = await async_client.firewalls.tags.add( - firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", - tags=["frontend"], - ) - assert tag is None - - @pytest.mark.skip() - @parametrize - async def test_raw_response_add(self, async_client: AsyncGradientAI) -> None: - response = await async_client.firewalls.tags.with_raw_response.add( - firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", - tags=["frontend"], - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - tag = await response.parse() - assert tag is None - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_add(self, async_client: AsyncGradientAI) -> None: - async with async_client.firewalls.tags.with_streaming_response.add( - firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", - tags=["frontend"], - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - tag = await response.parse() - assert tag is None - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_add(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): - await async_client.firewalls.tags.with_raw_response.add( - firewall_id="", - tags=["frontend"], - ) - - @pytest.mark.skip() - @parametrize - async def test_method_remove(self, async_client: AsyncGradientAI) -> None: - tag = await async_client.firewalls.tags.remove( - firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", - tags=["frontend"], - ) - assert tag is None - - @pytest.mark.skip() - @parametrize - async def test_raw_response_remove(self, async_client: AsyncGradientAI) -> None: - response = await async_client.firewalls.tags.with_raw_response.remove( - firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", - tags=["frontend"], - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - tag = await response.parse() - assert tag is None - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_remove(self, async_client: AsyncGradientAI) -> None: - async with async_client.firewalls.tags.with_streaming_response.remove( - firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", - tags=["frontend"], - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - tag = await response.parse() - assert tag is None - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_remove(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): - await async_client.firewalls.tags.with_raw_response.remove( - firewall_id="", - tags=["frontend"], - ) diff --git a/tests/api_resources/floating_ips/__init__.py b/tests/api_resources/floating_ips/__init__.py deleted file mode 100644 index fd8019a9..00000000 --- a/tests/api_resources/floating_ips/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/floating_ips/test_actions.py b/tests/api_resources/floating_ips/test_actions.py deleted file mode 100644 index a89a739c..00000000 --- a/tests/api_resources/floating_ips/test_actions.py +++ /dev/null @@ -1,396 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from gradientai import GradientAI, AsyncGradientAI -from tests.utils import assert_matches_type -from gradientai.types.floating_ips import ( - ActionListResponse, - ActionCreateResponse, - ActionRetrieveResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestActions: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_overload_1(self, client: GradientAI) -> None: - action = client.floating_ips.actions.create( - floating_ip="45.55.96.47", - type="assign", - ) - assert_matches_type(ActionCreateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create_overload_1(self, client: GradientAI) -> None: - response = client.floating_ips.actions.with_raw_response.create( - floating_ip="45.55.96.47", - type="assign", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = response.parse() - assert_matches_type(ActionCreateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create_overload_1(self, client: GradientAI) -> None: - with client.floating_ips.actions.with_streaming_response.create( - floating_ip="45.55.96.47", - type="assign", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = response.parse() - assert_matches_type(ActionCreateResponse, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_create_overload_1(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): - client.floating_ips.actions.with_raw_response.create( - floating_ip="", - type="assign", - ) - - @pytest.mark.skip() - @parametrize - def test_method_create_overload_2(self, client: GradientAI) -> None: - action = client.floating_ips.actions.create( - floating_ip="45.55.96.47", - droplet_id=758604968, - type="assign", - ) - assert_matches_type(ActionCreateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create_overload_2(self, client: GradientAI) -> None: - response = client.floating_ips.actions.with_raw_response.create( - floating_ip="45.55.96.47", - droplet_id=758604968, - type="assign", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = response.parse() - assert_matches_type(ActionCreateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create_overload_2(self, client: GradientAI) -> None: - with client.floating_ips.actions.with_streaming_response.create( - floating_ip="45.55.96.47", - droplet_id=758604968, - type="assign", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = response.parse() - assert_matches_type(ActionCreateResponse, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_create_overload_2(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): - client.floating_ips.actions.with_raw_response.create( - floating_ip="", - droplet_id=758604968, - type="assign", - ) - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: GradientAI) -> None: - action = client.floating_ips.actions.retrieve( - action_id=36804636, - floating_ip="45.55.96.47", - ) - assert_matches_type(ActionRetrieveResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: GradientAI) -> None: - response = client.floating_ips.actions.with_raw_response.retrieve( - action_id=36804636, - floating_ip="45.55.96.47", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = response.parse() - assert_matches_type(ActionRetrieveResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: GradientAI) -> None: - with client.floating_ips.actions.with_streaming_response.retrieve( - action_id=36804636, - floating_ip="45.55.96.47", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = response.parse() - assert_matches_type(ActionRetrieveResponse, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): - client.floating_ips.actions.with_raw_response.retrieve( - action_id=36804636, - floating_ip="", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: GradientAI) -> None: - action = client.floating_ips.actions.list( - "192.168.1.1", - ) - assert_matches_type(ActionListResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: - response = client.floating_ips.actions.with_raw_response.list( - "192.168.1.1", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = response.parse() - assert_matches_type(ActionListResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: - with client.floating_ips.actions.with_streaming_response.list( - "192.168.1.1", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = response.parse() - assert_matches_type(ActionListResponse, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_list(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): - client.floating_ips.actions.with_raw_response.list( - "", - ) - - -class TestAsyncActions: - parametrize = pytest.mark.parametrize( - "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] - ) - - @pytest.mark.skip() - @parametrize - async def test_method_create_overload_1(self, async_client: AsyncGradientAI) -> None: - action = await async_client.floating_ips.actions.create( - floating_ip="45.55.96.47", - type="assign", - ) - assert_matches_type(ActionCreateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: - response = await async_client.floating_ips.actions.with_raw_response.create( - floating_ip="45.55.96.47", - type="assign", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = await response.parse() - assert_matches_type(ActionCreateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: - async with async_client.floating_ips.actions.with_streaming_response.create( - floating_ip="45.55.96.47", - type="assign", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = await response.parse() - assert_matches_type(ActionCreateResponse, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_create_overload_1(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): - await async_client.floating_ips.actions.with_raw_response.create( - floating_ip="", - type="assign", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_create_overload_2(self, async_client: AsyncGradientAI) -> None: - action = await async_client.floating_ips.actions.create( - floating_ip="45.55.96.47", - droplet_id=758604968, - type="assign", - ) - assert_matches_type(ActionCreateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: - response = await async_client.floating_ips.actions.with_raw_response.create( - floating_ip="45.55.96.47", - droplet_id=758604968, - type="assign", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = await response.parse() - assert_matches_type(ActionCreateResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: - async with async_client.floating_ips.actions.with_streaming_response.create( - floating_ip="45.55.96.47", - droplet_id=758604968, - type="assign", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = await response.parse() - assert_matches_type(ActionCreateResponse, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_create_overload_2(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): - await async_client.floating_ips.actions.with_raw_response.create( - floating_ip="", - droplet_id=758604968, - type="assign", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: - action = await async_client.floating_ips.actions.retrieve( - action_id=36804636, - floating_ip="45.55.96.47", - ) - assert_matches_type(ActionRetrieveResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: - response = await async_client.floating_ips.actions.with_raw_response.retrieve( - action_id=36804636, - floating_ip="45.55.96.47", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = await response.parse() - assert_matches_type(ActionRetrieveResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: - async with async_client.floating_ips.actions.with_streaming_response.retrieve( - action_id=36804636, - floating_ip="45.55.96.47", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = await response.parse() - assert_matches_type(ActionRetrieveResponse, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): - await async_client.floating_ips.actions.with_raw_response.retrieve( - action_id=36804636, - floating_ip="", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: - action = await async_client.floating_ips.actions.list( - "192.168.1.1", - ) - assert_matches_type(ActionListResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: - response = await async_client.floating_ips.actions.with_raw_response.list( - "192.168.1.1", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = await response.parse() - assert_matches_type(ActionListResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: - async with async_client.floating_ips.actions.with_streaming_response.list( - "192.168.1.1", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = await response.parse() - assert_matches_type(ActionListResponse, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_list(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): - await async_client.floating_ips.actions.with_raw_response.list( - "", - ) diff --git a/tests/api_resources/images/__init__.py b/tests/api_resources/images/__init__.py deleted file mode 100644 index fd8019a9..00000000 --- a/tests/api_resources/images/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/images/test_actions.py b/tests/api_resources/images/test_actions.py deleted file mode 100644 index 8006bee1..00000000 --- a/tests/api_resources/images/test_actions.py +++ /dev/null @@ -1,321 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from gradientai import GradientAI, AsyncGradientAI -from tests.utils import assert_matches_type -from gradientai.types.images import ActionListResponse -from gradientai.types.shared import Action - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestActions: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_overload_1(self, client: GradientAI) -> None: - action = client.images.actions.create( - image_id=62137902, - type="convert", - ) - assert_matches_type(Action, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create_overload_1(self, client: GradientAI) -> None: - response = client.images.actions.with_raw_response.create( - image_id=62137902, - type="convert", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = response.parse() - assert_matches_type(Action, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create_overload_1(self, client: GradientAI) -> None: - with client.images.actions.with_streaming_response.create( - image_id=62137902, - type="convert", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = response.parse() - assert_matches_type(Action, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_create_overload_2(self, client: GradientAI) -> None: - action = client.images.actions.create( - image_id=62137902, - region="nyc3", - type="convert", - ) - assert_matches_type(Action, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create_overload_2(self, client: GradientAI) -> None: - response = client.images.actions.with_raw_response.create( - image_id=62137902, - region="nyc3", - type="convert", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = response.parse() - assert_matches_type(Action, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create_overload_2(self, client: GradientAI) -> None: - with client.images.actions.with_streaming_response.create( - image_id=62137902, - region="nyc3", - type="convert", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = response.parse() - assert_matches_type(Action, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: GradientAI) -> None: - action = client.images.actions.retrieve( - action_id=36804636, - image_id=62137902, - ) - assert_matches_type(Action, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: GradientAI) -> None: - response = client.images.actions.with_raw_response.retrieve( - action_id=36804636, - image_id=62137902, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = response.parse() - assert_matches_type(Action, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: GradientAI) -> None: - with client.images.actions.with_streaming_response.retrieve( - action_id=36804636, - image_id=62137902, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = response.parse() - assert_matches_type(Action, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: GradientAI) -> None: - action = client.images.actions.list( - 0, - ) - assert_matches_type(ActionListResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: - response = client.images.actions.with_raw_response.list( - 0, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = response.parse() - assert_matches_type(ActionListResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: - with client.images.actions.with_streaming_response.list( - 0, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = response.parse() - assert_matches_type(ActionListResponse, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - -class TestAsyncActions: - parametrize = pytest.mark.parametrize( - "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] - ) - - @pytest.mark.skip() - @parametrize - async def test_method_create_overload_1(self, async_client: AsyncGradientAI) -> None: - action = await async_client.images.actions.create( - image_id=62137902, - type="convert", - ) - assert_matches_type(Action, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: - response = await async_client.images.actions.with_raw_response.create( - image_id=62137902, - type="convert", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = await response.parse() - assert_matches_type(Action, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: - async with async_client.images.actions.with_streaming_response.create( - image_id=62137902, - type="convert", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = await response.parse() - assert_matches_type(Action, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_create_overload_2(self, async_client: AsyncGradientAI) -> None: - action = await async_client.images.actions.create( - image_id=62137902, - region="nyc3", - type="convert", - ) - assert_matches_type(Action, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: - response = await async_client.images.actions.with_raw_response.create( - image_id=62137902, - region="nyc3", - type="convert", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = await response.parse() - assert_matches_type(Action, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: - async with async_client.images.actions.with_streaming_response.create( - image_id=62137902, - region="nyc3", - type="convert", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = await response.parse() - assert_matches_type(Action, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: - action = await async_client.images.actions.retrieve( - action_id=36804636, - image_id=62137902, - ) - assert_matches_type(Action, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: - response = await async_client.images.actions.with_raw_response.retrieve( - action_id=36804636, - image_id=62137902, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = await response.parse() - assert_matches_type(Action, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: - async with async_client.images.actions.with_streaming_response.retrieve( - action_id=36804636, - image_id=62137902, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = await response.parse() - assert_matches_type(Action, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: - action = await async_client.images.actions.list( - 0, - ) - assert_matches_type(ActionListResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: - response = await async_client.images.actions.with_raw_response.list( - 0, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = await response.parse() - assert_matches_type(ActionListResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: - async with async_client.images.actions.with_streaming_response.list( - 0, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = await response.parse() - assert_matches_type(ActionListResponse, action, path=["response"]) - - assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/inference/test_api_keys.py b/tests/api_resources/inference/test_api_keys.py index 157a2e3d..90bf95b9 100644 --- a/tests/api_resources/inference/test_api_keys.py +++ b/tests/api_resources/inference/test_api_keys.py @@ -33,7 +33,7 @@ def test_method_create(self, client: GradientAI) -> None: @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: api_key = client.inference.api_keys.create( - name="Production Key", + name="name", ) assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) @@ -63,7 +63,7 @@ def test_streaming_response_create(self, client: GradientAI) -> None: @parametrize def test_method_update(self, client: GradientAI) -> None: api_key = client.inference.api_keys.update( - path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_api_key_uuid="api_key_uuid", ) assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) @@ -71,9 +71,9 @@ def test_method_update(self, client: GradientAI) -> None: @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: api_key = client.inference.api_keys.update( - path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', - body_api_key_uuid='"12345678-1234-1234-1234-123456789012"', - name='"Production Key"', + path_api_key_uuid="api_key_uuid", + body_api_key_uuid="api_key_uuid", + name="name", ) assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) @@ -81,7 +81,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_update(self, client: GradientAI) -> None: response = client.inference.api_keys.with_raw_response.update( - path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_api_key_uuid="api_key_uuid", ) assert response.is_closed is True @@ -93,7 +93,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: with client.inference.api_keys.with_streaming_response.update( - path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_api_key_uuid="api_key_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -248,7 +248,7 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: api_key = await async_client.inference.api_keys.create( - name="Production Key", + name="name", ) assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) @@ -278,7 +278,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: api_key = await async_client.inference.api_keys.update( - path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_api_key_uuid="api_key_uuid", ) assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) @@ -286,9 +286,9 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: api_key = await async_client.inference.api_keys.update( - path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', - body_api_key_uuid='"12345678-1234-1234-1234-123456789012"', - name='"Production Key"', + path_api_key_uuid="api_key_uuid", + body_api_key_uuid="api_key_uuid", + name="name", ) assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) @@ -296,7 +296,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: response = await async_client.inference.api_keys.with_raw_response.update( - path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_api_key_uuid="api_key_uuid", ) assert response.is_closed is True @@ -308,7 +308,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: async with async_client.inference.api_keys.with_streaming_response.update( - path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_api_key_uuid="api_key_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/knowledge_bases/test_data_sources.py b/tests/api_resources/knowledge_bases/test_data_sources.py index 55b056b8..9c466e2f 100644 --- a/tests/api_resources/knowledge_bases/test_data_sources.py +++ b/tests/api_resources/knowledge_bases/test_data_sources.py @@ -25,7 +25,7 @@ class TestDataSources: @parametrize def test_method_create(self, client: GradientAI) -> None: data_source = client.knowledge_bases.data_sources.create( - path_knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_knowledge_base_uuid="knowledge_base_uuid", ) assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) @@ -33,22 +33,22 @@ def test_method_create(self, client: GradientAI) -> None: @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: data_source = client.knowledge_bases.data_sources.create( - path_knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_knowledge_base_uuid="knowledge_base_uuid", aws_data_source={ - "bucket_name": '"example name"', - "item_path": '"example string"', - "key_id": '"123e4567-e89b-12d3-a456-426614174000"', - "region": '"example string"', - "secret_key": '"example string"', + "bucket_name": "bucket_name", + "item_path": "item_path", + "key_id": "key_id", + "region": "region", + "secret_key": "secret_key", }, - body_knowledge_base_uuid='"12345678-1234-1234-1234-123456789012"', + body_knowledge_base_uuid="knowledge_base_uuid", spaces_data_source={ - "bucket_name": '"example name"', - "item_path": '"example string"', - "region": '"example string"', + "bucket_name": "bucket_name", + "item_path": "item_path", + "region": "region", }, web_crawler_data_source={ - "base_url": '"example string"', + "base_url": "base_url", "crawling_option": "UNKNOWN", "embed_media": True, }, @@ -59,7 +59,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_create(self, client: GradientAI) -> None: response = client.knowledge_bases.data_sources.with_raw_response.create( - path_knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_knowledge_base_uuid="knowledge_base_uuid", ) assert response.is_closed is True @@ -71,7 +71,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @parametrize def test_streaming_response_create(self, client: GradientAI) -> None: with client.knowledge_bases.data_sources.with_streaming_response.create( - path_knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_knowledge_base_uuid="knowledge_base_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -95,7 +95,7 @@ def test_path_params_create(self, client: GradientAI) -> None: @parametrize def test_method_list(self, client: GradientAI) -> None: data_source = client.knowledge_bases.data_sources.list( - knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + knowledge_base_uuid="knowledge_base_uuid", ) assert_matches_type(DataSourceListResponse, data_source, path=["response"]) @@ -103,7 +103,7 @@ def test_method_list(self, client: GradientAI) -> None: @parametrize def test_method_list_with_all_params(self, client: GradientAI) -> None: data_source = client.knowledge_bases.data_sources.list( - knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + knowledge_base_uuid="knowledge_base_uuid", page=0, per_page=0, ) @@ -113,7 +113,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_list(self, client: GradientAI) -> None: response = client.knowledge_bases.data_sources.with_raw_response.list( - knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + knowledge_base_uuid="knowledge_base_uuid", ) assert response.is_closed is True @@ -125,7 +125,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @parametrize def test_streaming_response_list(self, client: GradientAI) -> None: with client.knowledge_bases.data_sources.with_streaming_response.list( - knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + knowledge_base_uuid="knowledge_base_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -147,8 +147,8 @@ def test_path_params_list(self, client: GradientAI) -> None: @parametrize def test_method_delete(self, client: GradientAI) -> None: data_source = client.knowledge_bases.data_sources.delete( - data_source_uuid='"123e4567-e89b-12d3-a456-426614174000"', - knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + data_source_uuid="data_source_uuid", + knowledge_base_uuid="knowledge_base_uuid", ) assert_matches_type(DataSourceDeleteResponse, data_source, path=["response"]) @@ -156,8 +156,8 @@ def test_method_delete(self, client: GradientAI) -> None: @parametrize def test_raw_response_delete(self, client: GradientAI) -> None: response = client.knowledge_bases.data_sources.with_raw_response.delete( - data_source_uuid='"123e4567-e89b-12d3-a456-426614174000"', - knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + data_source_uuid="data_source_uuid", + knowledge_base_uuid="knowledge_base_uuid", ) assert response.is_closed is True @@ -169,8 +169,8 @@ def test_raw_response_delete(self, client: GradientAI) -> None: @parametrize def test_streaming_response_delete(self, client: GradientAI) -> None: with client.knowledge_bases.data_sources.with_streaming_response.delete( - data_source_uuid='"123e4567-e89b-12d3-a456-426614174000"', - knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + data_source_uuid="data_source_uuid", + knowledge_base_uuid="knowledge_base_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -185,14 +185,14 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: def test_path_params_delete(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): client.knowledge_bases.data_sources.with_raw_response.delete( - data_source_uuid='"123e4567-e89b-12d3-a456-426614174000"', + data_source_uuid="data_source_uuid", knowledge_base_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `data_source_uuid` but received ''"): client.knowledge_bases.data_sources.with_raw_response.delete( data_source_uuid="", - knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + knowledge_base_uuid="knowledge_base_uuid", ) @@ -205,7 +205,7 @@ class TestAsyncDataSources: @parametrize async def test_method_create(self, async_client: AsyncGradientAI) -> None: data_source = await async_client.knowledge_bases.data_sources.create( - path_knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_knowledge_base_uuid="knowledge_base_uuid", ) assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) @@ -213,22 +213,22 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: data_source = await async_client.knowledge_bases.data_sources.create( - path_knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_knowledge_base_uuid="knowledge_base_uuid", aws_data_source={ - "bucket_name": '"example name"', - "item_path": '"example string"', - "key_id": '"123e4567-e89b-12d3-a456-426614174000"', - "region": '"example string"', - "secret_key": '"example string"', + "bucket_name": "bucket_name", + "item_path": "item_path", + "key_id": "key_id", + "region": "region", + "secret_key": "secret_key", }, - body_knowledge_base_uuid='"12345678-1234-1234-1234-123456789012"', + body_knowledge_base_uuid="knowledge_base_uuid", spaces_data_source={ - "bucket_name": '"example name"', - "item_path": '"example string"', - "region": '"example string"', + "bucket_name": "bucket_name", + "item_path": "item_path", + "region": "region", }, web_crawler_data_source={ - "base_url": '"example string"', + "base_url": "base_url", "crawling_option": "UNKNOWN", "embed_media": True, }, @@ -239,7 +239,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI @parametrize async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: response = await async_client.knowledge_bases.data_sources.with_raw_response.create( - path_knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_knowledge_base_uuid="knowledge_base_uuid", ) assert response.is_closed is True @@ -251,7 +251,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: async with async_client.knowledge_bases.data_sources.with_streaming_response.create( - path_knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_knowledge_base_uuid="knowledge_base_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -275,7 +275,7 @@ async def test_path_params_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_list(self, async_client: AsyncGradientAI) -> None: data_source = await async_client.knowledge_bases.data_sources.list( - knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + knowledge_base_uuid="knowledge_base_uuid", ) assert_matches_type(DataSourceListResponse, data_source, path=["response"]) @@ -283,7 +283,7 @@ async def test_method_list(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: data_source = await async_client.knowledge_bases.data_sources.list( - knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + knowledge_base_uuid="knowledge_base_uuid", page=0, per_page=0, ) @@ -293,7 +293,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @parametrize async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: response = await async_client.knowledge_bases.data_sources.with_raw_response.list( - knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + knowledge_base_uuid="knowledge_base_uuid", ) assert response.is_closed is True @@ -305,7 +305,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: async with async_client.knowledge_bases.data_sources.with_streaming_response.list( - knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + knowledge_base_uuid="knowledge_base_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -327,8 +327,8 @@ async def test_path_params_list(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_delete(self, async_client: AsyncGradientAI) -> None: data_source = await async_client.knowledge_bases.data_sources.delete( - data_source_uuid='"123e4567-e89b-12d3-a456-426614174000"', - knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + data_source_uuid="data_source_uuid", + knowledge_base_uuid="knowledge_base_uuid", ) assert_matches_type(DataSourceDeleteResponse, data_source, path=["response"]) @@ -336,8 +336,8 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: response = await async_client.knowledge_bases.data_sources.with_raw_response.delete( - data_source_uuid='"123e4567-e89b-12d3-a456-426614174000"', - knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + data_source_uuid="data_source_uuid", + knowledge_base_uuid="knowledge_base_uuid", ) assert response.is_closed is True @@ -349,8 +349,8 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: async with async_client.knowledge_bases.data_sources.with_streaming_response.delete( - data_source_uuid='"123e4567-e89b-12d3-a456-426614174000"', - knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + data_source_uuid="data_source_uuid", + knowledge_base_uuid="knowledge_base_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -365,12 +365,12 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): await async_client.knowledge_bases.data_sources.with_raw_response.delete( - data_source_uuid='"123e4567-e89b-12d3-a456-426614174000"', + data_source_uuid="data_source_uuid", knowledge_base_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `data_source_uuid` but received ''"): await async_client.knowledge_bases.data_sources.with_raw_response.delete( data_source_uuid="", - knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + knowledge_base_uuid="knowledge_base_uuid", ) diff --git a/tests/api_resources/knowledge_bases/test_indexing_jobs.py b/tests/api_resources/knowledge_bases/test_indexing_jobs.py index ed32d7f8..8bf1829f 100644 --- a/tests/api_resources/knowledge_bases/test_indexing_jobs.py +++ b/tests/api_resources/knowledge_bases/test_indexing_jobs.py @@ -33,8 +33,8 @@ def test_method_create(self, client: GradientAI) -> None: @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: indexing_job = client.knowledge_bases.indexing_jobs.create( - data_source_uuids=["example string"], - knowledge_base_uuid='"12345678-1234-1234-1234-123456789012"', + data_source_uuids=["string"], + knowledge_base_uuid="knowledge_base_uuid", ) assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) @@ -185,7 +185,7 @@ def test_path_params_retrieve_data_sources(self, client: GradientAI) -> None: @parametrize def test_method_update_cancel(self, client: GradientAI) -> None: indexing_job = client.knowledge_bases.indexing_jobs.update_cancel( - path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_uuid="uuid", ) assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) @@ -193,8 +193,8 @@ def test_method_update_cancel(self, client: GradientAI) -> None: @parametrize def test_method_update_cancel_with_all_params(self, client: GradientAI) -> None: indexing_job = client.knowledge_bases.indexing_jobs.update_cancel( - path_uuid='"123e4567-e89b-12d3-a456-426614174000"', - body_uuid='"12345678-1234-1234-1234-123456789012"', + path_uuid="uuid", + body_uuid="uuid", ) assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) @@ -202,7 +202,7 @@ def test_method_update_cancel_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_update_cancel(self, client: GradientAI) -> None: response = client.knowledge_bases.indexing_jobs.with_raw_response.update_cancel( - path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_uuid="uuid", ) assert response.is_closed is True @@ -214,7 +214,7 @@ def test_raw_response_update_cancel(self, client: GradientAI) -> None: @parametrize def test_streaming_response_update_cancel(self, client: GradientAI) -> None: with client.knowledge_bases.indexing_jobs.with_streaming_response.update_cancel( - path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_uuid="uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -248,8 +248,8 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: indexing_job = await async_client.knowledge_bases.indexing_jobs.create( - data_source_uuids=["example string"], - knowledge_base_uuid='"12345678-1234-1234-1234-123456789012"', + data_source_uuids=["string"], + knowledge_base_uuid="knowledge_base_uuid", ) assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) @@ -400,7 +400,7 @@ async def test_path_params_retrieve_data_sources(self, async_client: AsyncGradie @parametrize async def test_method_update_cancel(self, async_client: AsyncGradientAI) -> None: indexing_job = await async_client.knowledge_bases.indexing_jobs.update_cancel( - path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_uuid="uuid", ) assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) @@ -408,8 +408,8 @@ async def test_method_update_cancel(self, async_client: AsyncGradientAI) -> None @parametrize async def test_method_update_cancel_with_all_params(self, async_client: AsyncGradientAI) -> None: indexing_job = await async_client.knowledge_bases.indexing_jobs.update_cancel( - path_uuid='"123e4567-e89b-12d3-a456-426614174000"', - body_uuid='"12345678-1234-1234-1234-123456789012"', + path_uuid="uuid", + body_uuid="uuid", ) assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) @@ -417,7 +417,7 @@ async def test_method_update_cancel_with_all_params(self, async_client: AsyncGra @parametrize async def test_raw_response_update_cancel(self, async_client: AsyncGradientAI) -> None: response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.update_cancel( - path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_uuid="uuid", ) assert response.is_closed is True @@ -429,7 +429,7 @@ async def test_raw_response_update_cancel(self, async_client: AsyncGradientAI) - @parametrize async def test_streaming_response_update_cancel(self, async_client: AsyncGradientAI) -> None: async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.update_cancel( - path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_uuid="uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/load_balancers/__init__.py b/tests/api_resources/load_balancers/__init__.py deleted file mode 100644 index fd8019a9..00000000 --- a/tests/api_resources/load_balancers/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/load_balancers/test_droplets.py b/tests/api_resources/load_balancers/test_droplets.py deleted file mode 100644 index 08ccf009..00000000 --- a/tests/api_resources/load_balancers/test_droplets.py +++ /dev/null @@ -1,206 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from gradientai import GradientAI, AsyncGradientAI - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestDroplets: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_add(self, client: GradientAI) -> None: - droplet = client.load_balancers.droplets.add( - lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", - droplet_ids=[3164444, 3164445], - ) - assert droplet is None - - @pytest.mark.skip() - @parametrize - def test_raw_response_add(self, client: GradientAI) -> None: - response = client.load_balancers.droplets.with_raw_response.add( - lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", - droplet_ids=[3164444, 3164445], - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - droplet = response.parse() - assert droplet is None - - @pytest.mark.skip() - @parametrize - def test_streaming_response_add(self, client: GradientAI) -> None: - with client.load_balancers.droplets.with_streaming_response.add( - lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", - droplet_ids=[3164444, 3164445], - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - droplet = response.parse() - assert droplet is None - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_add(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): - client.load_balancers.droplets.with_raw_response.add( - lb_id="", - droplet_ids=[3164444, 3164445], - ) - - @pytest.mark.skip() - @parametrize - def test_method_remove(self, client: GradientAI) -> None: - droplet = client.load_balancers.droplets.remove( - lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", - droplet_ids=[3164444, 3164445], - ) - assert droplet is None - - @pytest.mark.skip() - @parametrize - def test_raw_response_remove(self, client: GradientAI) -> None: - response = client.load_balancers.droplets.with_raw_response.remove( - lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", - droplet_ids=[3164444, 3164445], - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - droplet = response.parse() - assert droplet is None - - @pytest.mark.skip() - @parametrize - def test_streaming_response_remove(self, client: GradientAI) -> None: - with client.load_balancers.droplets.with_streaming_response.remove( - lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", - droplet_ids=[3164444, 3164445], - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - droplet = response.parse() - assert droplet is None - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_remove(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): - client.load_balancers.droplets.with_raw_response.remove( - lb_id="", - droplet_ids=[3164444, 3164445], - ) - - -class TestAsyncDroplets: - parametrize = pytest.mark.parametrize( - "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] - ) - - @pytest.mark.skip() - @parametrize - async def test_method_add(self, async_client: AsyncGradientAI) -> None: - droplet = await async_client.load_balancers.droplets.add( - lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", - droplet_ids=[3164444, 3164445], - ) - assert droplet is None - - @pytest.mark.skip() - @parametrize - async def test_raw_response_add(self, async_client: AsyncGradientAI) -> None: - response = await async_client.load_balancers.droplets.with_raw_response.add( - lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", - droplet_ids=[3164444, 3164445], - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - droplet = await response.parse() - assert droplet is None - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_add(self, async_client: AsyncGradientAI) -> None: - async with async_client.load_balancers.droplets.with_streaming_response.add( - lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", - droplet_ids=[3164444, 3164445], - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - droplet = await response.parse() - assert droplet is None - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_add(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): - await async_client.load_balancers.droplets.with_raw_response.add( - lb_id="", - droplet_ids=[3164444, 3164445], - ) - - @pytest.mark.skip() - @parametrize - async def test_method_remove(self, async_client: AsyncGradientAI) -> None: - droplet = await async_client.load_balancers.droplets.remove( - lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", - droplet_ids=[3164444, 3164445], - ) - assert droplet is None - - @pytest.mark.skip() - @parametrize - async def test_raw_response_remove(self, async_client: AsyncGradientAI) -> None: - response = await async_client.load_balancers.droplets.with_raw_response.remove( - lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", - droplet_ids=[3164444, 3164445], - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - droplet = await response.parse() - assert droplet is None - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_remove(self, async_client: AsyncGradientAI) -> None: - async with async_client.load_balancers.droplets.with_streaming_response.remove( - lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", - droplet_ids=[3164444, 3164445], - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - droplet = await response.parse() - assert droplet is None - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_remove(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): - await async_client.load_balancers.droplets.with_raw_response.remove( - lb_id="", - droplet_ids=[3164444, 3164445], - ) diff --git a/tests/api_resources/load_balancers/test_forwarding_rules.py b/tests/api_resources/load_balancers/test_forwarding_rules.py deleted file mode 100644 index 3acf8287..00000000 --- a/tests/api_resources/load_balancers/test_forwarding_rules.py +++ /dev/null @@ -1,318 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from gradientai import GradientAI, AsyncGradientAI - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestForwardingRules: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_add(self, client: GradientAI) -> None: - forwarding_rule = client.load_balancers.forwarding_rules.add( - lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - } - ], - ) - assert forwarding_rule is None - - @pytest.mark.skip() - @parametrize - def test_raw_response_add(self, client: GradientAI) -> None: - response = client.load_balancers.forwarding_rules.with_raw_response.add( - lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - } - ], - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - forwarding_rule = response.parse() - assert forwarding_rule is None - - @pytest.mark.skip() - @parametrize - def test_streaming_response_add(self, client: GradientAI) -> None: - with client.load_balancers.forwarding_rules.with_streaming_response.add( - lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - } - ], - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - forwarding_rule = response.parse() - assert forwarding_rule is None - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_add(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): - client.load_balancers.forwarding_rules.with_raw_response.add( - lb_id="", - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - } - ], - ) - - @pytest.mark.skip() - @parametrize - def test_method_remove(self, client: GradientAI) -> None: - forwarding_rule = client.load_balancers.forwarding_rules.remove( - lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - } - ], - ) - assert forwarding_rule is None - - @pytest.mark.skip() - @parametrize - def test_raw_response_remove(self, client: GradientAI) -> None: - response = client.load_balancers.forwarding_rules.with_raw_response.remove( - lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - } - ], - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - forwarding_rule = response.parse() - assert forwarding_rule is None - - @pytest.mark.skip() - @parametrize - def test_streaming_response_remove(self, client: GradientAI) -> None: - with client.load_balancers.forwarding_rules.with_streaming_response.remove( - lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - } - ], - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - forwarding_rule = response.parse() - assert forwarding_rule is None - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_remove(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): - client.load_balancers.forwarding_rules.with_raw_response.remove( - lb_id="", - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - } - ], - ) - - -class TestAsyncForwardingRules: - parametrize = pytest.mark.parametrize( - "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] - ) - - @pytest.mark.skip() - @parametrize - async def test_method_add(self, async_client: AsyncGradientAI) -> None: - forwarding_rule = await async_client.load_balancers.forwarding_rules.add( - lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - } - ], - ) - assert forwarding_rule is None - - @pytest.mark.skip() - @parametrize - async def test_raw_response_add(self, async_client: AsyncGradientAI) -> None: - response = await async_client.load_balancers.forwarding_rules.with_raw_response.add( - lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - } - ], - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - forwarding_rule = await response.parse() - assert forwarding_rule is None - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_add(self, async_client: AsyncGradientAI) -> None: - async with async_client.load_balancers.forwarding_rules.with_streaming_response.add( - lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - } - ], - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - forwarding_rule = await response.parse() - assert forwarding_rule is None - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_add(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): - await async_client.load_balancers.forwarding_rules.with_raw_response.add( - lb_id="", - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - } - ], - ) - - @pytest.mark.skip() - @parametrize - async def test_method_remove(self, async_client: AsyncGradientAI) -> None: - forwarding_rule = await async_client.load_balancers.forwarding_rules.remove( - lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - } - ], - ) - assert forwarding_rule is None - - @pytest.mark.skip() - @parametrize - async def test_raw_response_remove(self, async_client: AsyncGradientAI) -> None: - response = await async_client.load_balancers.forwarding_rules.with_raw_response.remove( - lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - } - ], - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - forwarding_rule = await response.parse() - assert forwarding_rule is None - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_remove(self, async_client: AsyncGradientAI) -> None: - async with async_client.load_balancers.forwarding_rules.with_streaming_response.remove( - lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - } - ], - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - forwarding_rule = await response.parse() - assert forwarding_rule is None - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_remove(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): - await async_client.load_balancers.forwarding_rules.with_raw_response.remove( - lb_id="", - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - } - ], - ) diff --git a/tests/api_resources/models/providers/test_anthropic.py b/tests/api_resources/models/providers/test_anthropic.py index c61a97ea..79bfcdc3 100644 --- a/tests/api_resources/models/providers/test_anthropic.py +++ b/tests/api_resources/models/providers/test_anthropic.py @@ -34,8 +34,8 @@ def test_method_create(self, client: GradientAI) -> None: @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: anthropic = client.models.providers.anthropic.create( - api_key='"sk-ant-12345678901234567890123456789012"', - name='"Production Key"', + api_key="api_key", + name="name", ) assert_matches_type(AnthropicCreateResponse, anthropic, path=["response"]) @@ -107,7 +107,7 @@ def test_path_params_retrieve(self, client: GradientAI) -> None: @parametrize def test_method_update(self, client: GradientAI) -> None: anthropic = client.models.providers.anthropic.update( - path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_api_key_uuid="api_key_uuid", ) assert_matches_type(AnthropicUpdateResponse, anthropic, path=["response"]) @@ -115,10 +115,10 @@ def test_method_update(self, client: GradientAI) -> None: @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: anthropic = client.models.providers.anthropic.update( - path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', - api_key='"sk-ant-12345678901234567890123456789012"', - body_api_key_uuid='"12345678-1234-1234-1234-123456789012"', - name='"Production Key"', + path_api_key_uuid="api_key_uuid", + api_key="api_key", + body_api_key_uuid="api_key_uuid", + name="name", ) assert_matches_type(AnthropicUpdateResponse, anthropic, path=["response"]) @@ -126,7 +126,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_update(self, client: GradientAI) -> None: response = client.models.providers.anthropic.with_raw_response.update( - path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_api_key_uuid="api_key_uuid", ) assert response.is_closed is True @@ -138,7 +138,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: with client.models.providers.anthropic.with_streaming_response.update( - path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_api_key_uuid="api_key_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -239,7 +239,7 @@ def test_path_params_delete(self, client: GradientAI) -> None: @parametrize def test_method_list_agents(self, client: GradientAI) -> None: anthropic = client.models.providers.anthropic.list_agents( - uuid='"123e4567-e89b-12d3-a456-426614174000"', + uuid="uuid", ) assert_matches_type(AnthropicListAgentsResponse, anthropic, path=["response"]) @@ -247,7 +247,7 @@ def test_method_list_agents(self, client: GradientAI) -> None: @parametrize def test_method_list_agents_with_all_params(self, client: GradientAI) -> None: anthropic = client.models.providers.anthropic.list_agents( - uuid='"123e4567-e89b-12d3-a456-426614174000"', + uuid="uuid", page=0, per_page=0, ) @@ -257,7 +257,7 @@ def test_method_list_agents_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_list_agents(self, client: GradientAI) -> None: response = client.models.providers.anthropic.with_raw_response.list_agents( - uuid='"123e4567-e89b-12d3-a456-426614174000"', + uuid="uuid", ) assert response.is_closed is True @@ -269,7 +269,7 @@ def test_raw_response_list_agents(self, client: GradientAI) -> None: @parametrize def test_streaming_response_list_agents(self, client: GradientAI) -> None: with client.models.providers.anthropic.with_streaming_response.list_agents( - uuid='"123e4567-e89b-12d3-a456-426614174000"', + uuid="uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -303,8 +303,8 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: anthropic = await async_client.models.providers.anthropic.create( - api_key='"sk-ant-12345678901234567890123456789012"', - name='"Production Key"', + api_key="api_key", + name="name", ) assert_matches_type(AnthropicCreateResponse, anthropic, path=["response"]) @@ -376,7 +376,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: anthropic = await async_client.models.providers.anthropic.update( - path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_api_key_uuid="api_key_uuid", ) assert_matches_type(AnthropicUpdateResponse, anthropic, path=["response"]) @@ -384,10 +384,10 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: anthropic = await async_client.models.providers.anthropic.update( - path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', - api_key='"sk-ant-12345678901234567890123456789012"', - body_api_key_uuid='"12345678-1234-1234-1234-123456789012"', - name='"Production Key"', + path_api_key_uuid="api_key_uuid", + api_key="api_key", + body_api_key_uuid="api_key_uuid", + name="name", ) assert_matches_type(AnthropicUpdateResponse, anthropic, path=["response"]) @@ -395,7 +395,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: response = await async_client.models.providers.anthropic.with_raw_response.update( - path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_api_key_uuid="api_key_uuid", ) assert response.is_closed is True @@ -407,7 +407,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: async with async_client.models.providers.anthropic.with_streaming_response.update( - path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_api_key_uuid="api_key_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -508,7 +508,7 @@ async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_list_agents(self, async_client: AsyncGradientAI) -> None: anthropic = await async_client.models.providers.anthropic.list_agents( - uuid='"123e4567-e89b-12d3-a456-426614174000"', + uuid="uuid", ) assert_matches_type(AnthropicListAgentsResponse, anthropic, path=["response"]) @@ -516,7 +516,7 @@ async def test_method_list_agents(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_list_agents_with_all_params(self, async_client: AsyncGradientAI) -> None: anthropic = await async_client.models.providers.anthropic.list_agents( - uuid='"123e4567-e89b-12d3-a456-426614174000"', + uuid="uuid", page=0, per_page=0, ) @@ -526,7 +526,7 @@ async def test_method_list_agents_with_all_params(self, async_client: AsyncGradi @parametrize async def test_raw_response_list_agents(self, async_client: AsyncGradientAI) -> None: response = await async_client.models.providers.anthropic.with_raw_response.list_agents( - uuid='"123e4567-e89b-12d3-a456-426614174000"', + uuid="uuid", ) assert response.is_closed is True @@ -538,7 +538,7 @@ async def test_raw_response_list_agents(self, async_client: AsyncGradientAI) -> @parametrize async def test_streaming_response_list_agents(self, async_client: AsyncGradientAI) -> None: async with async_client.models.providers.anthropic.with_streaming_response.list_agents( - uuid='"123e4567-e89b-12d3-a456-426614174000"', + uuid="uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/models/providers/test_openai.py b/tests/api_resources/models/providers/test_openai.py index 7fde1a69..2640601e 100644 --- a/tests/api_resources/models/providers/test_openai.py +++ b/tests/api_resources/models/providers/test_openai.py @@ -34,8 +34,8 @@ def test_method_create(self, client: GradientAI) -> None: @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: openai = client.models.providers.openai.create( - api_key='"sk-proj--123456789098765432123456789"', - name='"Production Key"', + api_key="api_key", + name="name", ) assert_matches_type(OpenAICreateResponse, openai, path=["response"]) @@ -107,7 +107,7 @@ def test_path_params_retrieve(self, client: GradientAI) -> None: @parametrize def test_method_update(self, client: GradientAI) -> None: openai = client.models.providers.openai.update( - path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_api_key_uuid="api_key_uuid", ) assert_matches_type(OpenAIUpdateResponse, openai, path=["response"]) @@ -115,10 +115,10 @@ def test_method_update(self, client: GradientAI) -> None: @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: openai = client.models.providers.openai.update( - path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', - api_key='"sk-ant-12345678901234567890123456789012"', - body_api_key_uuid='"12345678-1234-1234-1234-123456789012"', - name='"Production Key"', + path_api_key_uuid="api_key_uuid", + api_key="api_key", + body_api_key_uuid="api_key_uuid", + name="name", ) assert_matches_type(OpenAIUpdateResponse, openai, path=["response"]) @@ -126,7 +126,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_update(self, client: GradientAI) -> None: response = client.models.providers.openai.with_raw_response.update( - path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_api_key_uuid="api_key_uuid", ) assert response.is_closed is True @@ -138,7 +138,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: with client.models.providers.openai.with_streaming_response.update( - path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_api_key_uuid="api_key_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -239,7 +239,7 @@ def test_path_params_delete(self, client: GradientAI) -> None: @parametrize def test_method_retrieve_agents(self, client: GradientAI) -> None: openai = client.models.providers.openai.retrieve_agents( - uuid='"123e4567-e89b-12d3-a456-426614174000"', + uuid="uuid", ) assert_matches_type(OpenAIRetrieveAgentsResponse, openai, path=["response"]) @@ -247,7 +247,7 @@ def test_method_retrieve_agents(self, client: GradientAI) -> None: @parametrize def test_method_retrieve_agents_with_all_params(self, client: GradientAI) -> None: openai = client.models.providers.openai.retrieve_agents( - uuid='"123e4567-e89b-12d3-a456-426614174000"', + uuid="uuid", page=0, per_page=0, ) @@ -257,7 +257,7 @@ def test_method_retrieve_agents_with_all_params(self, client: GradientAI) -> Non @parametrize def test_raw_response_retrieve_agents(self, client: GradientAI) -> None: response = client.models.providers.openai.with_raw_response.retrieve_agents( - uuid='"123e4567-e89b-12d3-a456-426614174000"', + uuid="uuid", ) assert response.is_closed is True @@ -269,7 +269,7 @@ def test_raw_response_retrieve_agents(self, client: GradientAI) -> None: @parametrize def test_streaming_response_retrieve_agents(self, client: GradientAI) -> None: with client.models.providers.openai.with_streaming_response.retrieve_agents( - uuid='"123e4567-e89b-12d3-a456-426614174000"', + uuid="uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -303,8 +303,8 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: openai = await async_client.models.providers.openai.create( - api_key='"sk-proj--123456789098765432123456789"', - name='"Production Key"', + api_key="api_key", + name="name", ) assert_matches_type(OpenAICreateResponse, openai, path=["response"]) @@ -376,7 +376,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: openai = await async_client.models.providers.openai.update( - path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_api_key_uuid="api_key_uuid", ) assert_matches_type(OpenAIUpdateResponse, openai, path=["response"]) @@ -384,10 +384,10 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: openai = await async_client.models.providers.openai.update( - path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', - api_key='"sk-ant-12345678901234567890123456789012"', - body_api_key_uuid='"12345678-1234-1234-1234-123456789012"', - name='"Production Key"', + path_api_key_uuid="api_key_uuid", + api_key="api_key", + body_api_key_uuid="api_key_uuid", + name="name", ) assert_matches_type(OpenAIUpdateResponse, openai, path=["response"]) @@ -395,7 +395,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: response = await async_client.models.providers.openai.with_raw_response.update( - path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_api_key_uuid="api_key_uuid", ) assert response.is_closed is True @@ -407,7 +407,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: async with async_client.models.providers.openai.with_streaming_response.update( - path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_api_key_uuid="api_key_uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -508,7 +508,7 @@ async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_retrieve_agents(self, async_client: AsyncGradientAI) -> None: openai = await async_client.models.providers.openai.retrieve_agents( - uuid='"123e4567-e89b-12d3-a456-426614174000"', + uuid="uuid", ) assert_matches_type(OpenAIRetrieveAgentsResponse, openai, path=["response"]) @@ -516,7 +516,7 @@ async def test_method_retrieve_agents(self, async_client: AsyncGradientAI) -> No @parametrize async def test_method_retrieve_agents_with_all_params(self, async_client: AsyncGradientAI) -> None: openai = await async_client.models.providers.openai.retrieve_agents( - uuid='"123e4567-e89b-12d3-a456-426614174000"', + uuid="uuid", page=0, per_page=0, ) @@ -526,7 +526,7 @@ async def test_method_retrieve_agents_with_all_params(self, async_client: AsyncG @parametrize async def test_raw_response_retrieve_agents(self, async_client: AsyncGradientAI) -> None: response = await async_client.models.providers.openai.with_raw_response.retrieve_agents( - uuid='"123e4567-e89b-12d3-a456-426614174000"', + uuid="uuid", ) assert response.is_closed is True @@ -538,7 +538,7 @@ async def test_raw_response_retrieve_agents(self, async_client: AsyncGradientAI) @parametrize async def test_streaming_response_retrieve_agents(self, async_client: AsyncGradientAI) -> None: async with async_client.models.providers.openai.with_streaming_response.retrieve_agents( - uuid='"123e4567-e89b-12d3-a456-426614174000"', + uuid="uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/test_account.py b/tests/api_resources/test_account.py deleted file mode 100644 index d6ee9b10..00000000 --- a/tests/api_resources/test_account.py +++ /dev/null @@ -1,80 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from gradientai import GradientAI, AsyncGradientAI -from tests.utils import assert_matches_type -from gradientai.types import AccountRetrieveResponse - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestAccount: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: GradientAI) -> None: - account = client.account.retrieve() - assert_matches_type(AccountRetrieveResponse, account, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: GradientAI) -> None: - response = client.account.with_raw_response.retrieve() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - account = response.parse() - assert_matches_type(AccountRetrieveResponse, account, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: GradientAI) -> None: - with client.account.with_streaming_response.retrieve() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - account = response.parse() - assert_matches_type(AccountRetrieveResponse, account, path=["response"]) - - assert cast(Any, response.is_closed) is True - - -class TestAsyncAccount: - parametrize = pytest.mark.parametrize( - "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] - ) - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: - account = await async_client.account.retrieve() - assert_matches_type(AccountRetrieveResponse, account, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: - response = await async_client.account.with_raw_response.retrieve() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - account = await response.parse() - assert_matches_type(AccountRetrieveResponse, account, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: - async with async_client.account.with_streaming_response.retrieve() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - account = await response.parse() - assert_matches_type(AccountRetrieveResponse, account, path=["response"]) - - assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_agents.py b/tests/api_resources/test_agents.py index 8a6a7d69..2cc0e080 100644 --- a/tests/api_resources/test_agents.py +++ b/tests/api_resources/test_agents.py @@ -34,16 +34,16 @@ def test_method_create(self, client: GradientAI) -> None: @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: agent = client.agents.create( - anthropic_key_uuid='"12345678-1234-1234-1234-123456789012"', - description='"My Agent Description"', - instruction='"You are an agent who thinks deeply about the world"', - knowledge_base_uuid=["example string"], - model_uuid='"12345678-1234-1234-1234-123456789012"', - name='"My Agent"', - openai_key_uuid='"12345678-1234-1234-1234-123456789012"', - project_id='"12345678-1234-1234-1234-123456789012"', - region='"tor1"', - tags=["example string"], + anthropic_key_uuid="anthropic_key_uuid", + description="description", + instruction="instruction", + knowledge_base_uuid=["string"], + model_uuid="model_uuid", + name="name", + openai_key_uuid="open_ai_key_uuid", + project_id="project_id", + region="region", + tags=["string"], ) assert_matches_type(AgentCreateResponse, agent, path=["response"]) @@ -115,7 +115,7 @@ def test_path_params_retrieve(self, client: GradientAI) -> None: @parametrize def test_method_update(self, client: GradientAI) -> None: agent = client.agents.update( - path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_uuid="uuid", ) assert_matches_type(AgentUpdateResponse, agent, path=["response"]) @@ -123,23 +123,22 @@ def test_method_update(self, client: GradientAI) -> None: @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: agent = client.agents.update( - path_uuid='"123e4567-e89b-12d3-a456-426614174000"', - anthropic_key_uuid='"12345678-1234-1234-1234-123456789012"', - conversation_logs_enabled=True, - description='"My Agent Description"', - instruction='"You are an agent who thinks deeply about the world"', - k=5, - max_tokens=100, - model_uuid='"12345678-1234-1234-1234-123456789012"', - name='"My New Agent Name"', - openai_key_uuid='"12345678-1234-1234-1234-123456789012"', - project_id='"12345678-1234-1234-1234-123456789012"', + path_uuid="uuid", + anthropic_key_uuid="anthropic_key_uuid", + description="description", + instruction="instruction", + k=0, + max_tokens=0, + model_uuid="model_uuid", + name="name", + openai_key_uuid="open_ai_key_uuid", + project_id="project_id", provide_citations=True, retrieval_method="RETRIEVAL_METHOD_UNKNOWN", - tags=["example string"], - temperature=0.7, - top_p=0.9, - body_uuid='"12345678-1234-1234-1234-123456789012"', + tags=["string"], + temperature=0, + top_p=0, + body_uuid="uuid", ) assert_matches_type(AgentUpdateResponse, agent, path=["response"]) @@ -147,7 +146,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_update(self, client: GradientAI) -> None: response = client.agents.with_raw_response.update( - path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_uuid="uuid", ) assert response.is_closed is True @@ -159,7 +158,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: with client.agents.with_streaming_response.update( - path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_uuid="uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -261,7 +260,7 @@ def test_path_params_delete(self, client: GradientAI) -> None: @parametrize def test_method_update_status(self, client: GradientAI) -> None: agent = client.agents.update_status( - path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_uuid="uuid", ) assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) @@ -269,8 +268,8 @@ def test_method_update_status(self, client: GradientAI) -> None: @parametrize def test_method_update_status_with_all_params(self, client: GradientAI) -> None: agent = client.agents.update_status( - path_uuid='"123e4567-e89b-12d3-a456-426614174000"', - body_uuid='"12345678-1234-1234-1234-123456789012"', + path_uuid="uuid", + body_uuid="uuid", visibility="VISIBILITY_UNKNOWN", ) assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) @@ -279,7 +278,7 @@ def test_method_update_status_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_update_status(self, client: GradientAI) -> None: response = client.agents.with_raw_response.update_status( - path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_uuid="uuid", ) assert response.is_closed is True @@ -291,7 +290,7 @@ def test_raw_response_update_status(self, client: GradientAI) -> None: @parametrize def test_streaming_response_update_status(self, client: GradientAI) -> None: with client.agents.with_streaming_response.update_status( - path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_uuid="uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -325,16 +324,16 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: agent = await async_client.agents.create( - anthropic_key_uuid='"12345678-1234-1234-1234-123456789012"', - description='"My Agent Description"', - instruction='"You are an agent who thinks deeply about the world"', - knowledge_base_uuid=["example string"], - model_uuid='"12345678-1234-1234-1234-123456789012"', - name='"My Agent"', - openai_key_uuid='"12345678-1234-1234-1234-123456789012"', - project_id='"12345678-1234-1234-1234-123456789012"', - region='"tor1"', - tags=["example string"], + anthropic_key_uuid="anthropic_key_uuid", + description="description", + instruction="instruction", + knowledge_base_uuid=["string"], + model_uuid="model_uuid", + name="name", + openai_key_uuid="open_ai_key_uuid", + project_id="project_id", + region="region", + tags=["string"], ) assert_matches_type(AgentCreateResponse, agent, path=["response"]) @@ -406,7 +405,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: agent = await async_client.agents.update( - path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_uuid="uuid", ) assert_matches_type(AgentUpdateResponse, agent, path=["response"]) @@ -414,23 +413,22 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: agent = await async_client.agents.update( - path_uuid='"123e4567-e89b-12d3-a456-426614174000"', - anthropic_key_uuid='"12345678-1234-1234-1234-123456789012"', - conversation_logs_enabled=True, - description='"My Agent Description"', - instruction='"You are an agent who thinks deeply about the world"', - k=5, - max_tokens=100, - model_uuid='"12345678-1234-1234-1234-123456789012"', - name='"My New Agent Name"', - openai_key_uuid='"12345678-1234-1234-1234-123456789012"', - project_id='"12345678-1234-1234-1234-123456789012"', + path_uuid="uuid", + anthropic_key_uuid="anthropic_key_uuid", + description="description", + instruction="instruction", + k=0, + max_tokens=0, + model_uuid="model_uuid", + name="name", + openai_key_uuid="open_ai_key_uuid", + project_id="project_id", provide_citations=True, retrieval_method="RETRIEVAL_METHOD_UNKNOWN", - tags=["example string"], - temperature=0.7, - top_p=0.9, - body_uuid='"12345678-1234-1234-1234-123456789012"', + tags=["string"], + temperature=0, + top_p=0, + body_uuid="uuid", ) assert_matches_type(AgentUpdateResponse, agent, path=["response"]) @@ -438,7 +436,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.with_raw_response.update( - path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_uuid="uuid", ) assert response.is_closed is True @@ -450,7 +448,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.with_streaming_response.update( - path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_uuid="uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -552,7 +550,7 @@ async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_update_status(self, async_client: AsyncGradientAI) -> None: agent = await async_client.agents.update_status( - path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_uuid="uuid", ) assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) @@ -560,8 +558,8 @@ async def test_method_update_status(self, async_client: AsyncGradientAI) -> None @parametrize async def test_method_update_status_with_all_params(self, async_client: AsyncGradientAI) -> None: agent = await async_client.agents.update_status( - path_uuid='"123e4567-e89b-12d3-a456-426614174000"', - body_uuid='"12345678-1234-1234-1234-123456789012"', + path_uuid="uuid", + body_uuid="uuid", visibility="VISIBILITY_UNKNOWN", ) assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) @@ -570,7 +568,7 @@ async def test_method_update_status_with_all_params(self, async_client: AsyncGra @parametrize async def test_raw_response_update_status(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.with_raw_response.update_status( - path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_uuid="uuid", ) assert response.is_closed is True @@ -582,7 +580,7 @@ async def test_raw_response_update_status(self, async_client: AsyncGradientAI) - @parametrize async def test_streaming_response_update_status(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.with_streaming_response.update_status( - path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_uuid="uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/test_droplets.py b/tests/api_resources/test_droplets.py deleted file mode 100644 index e6d3b17d..00000000 --- a/tests/api_resources/test_droplets.py +++ /dev/null @@ -1,912 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from gradientai import GradientAI, AsyncGradientAI -from tests.utils import assert_matches_type -from gradientai.types import ( - DropletListResponse, - DropletCreateResponse, - DropletRetrieveResponse, - DropletListKernelsResponse, - DropletListFirewallsResponse, - DropletListNeighborsResponse, - DropletListSnapshotsResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestDroplets: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_overload_1(self, client: GradientAI) -> None: - droplet = client.droplets.create( - image="ubuntu-20-04-x64", - name="example.com", - size="s-1vcpu-1gb", - ) - assert_matches_type(DropletCreateResponse, droplet, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params_overload_1(self, client: GradientAI) -> None: - droplet = client.droplets.create( - image="ubuntu-20-04-x64", - name="example.com", - size="s-1vcpu-1gb", - backup_policy={ - "hour": 0, - "plan": "daily", - "weekday": "SUN", - }, - backups=True, - ipv6=True, - monitoring=True, - private_networking=True, - region="nyc3", - ssh_keys=[289794, "3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], - tags=["env:prod", "web"], - user_data="#cloud-config\nruncmd:\n - touch /test.txt\n", - volumes=["12e97116-7280-11ed-b3d0-0a58ac146812"], - vpc_uuid="760e09ef-dc84-11e8-981e-3cfdfeaae000", - with_droplet_agent=True, - ) - assert_matches_type(DropletCreateResponse, droplet, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create_overload_1(self, client: GradientAI) -> None: - response = client.droplets.with_raw_response.create( - image="ubuntu-20-04-x64", - name="example.com", - size="s-1vcpu-1gb", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - droplet = response.parse() - assert_matches_type(DropletCreateResponse, droplet, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create_overload_1(self, client: GradientAI) -> None: - with client.droplets.with_streaming_response.create( - image="ubuntu-20-04-x64", - name="example.com", - size="s-1vcpu-1gb", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - droplet = response.parse() - assert_matches_type(DropletCreateResponse, droplet, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_create_overload_2(self, client: GradientAI) -> None: - droplet = client.droplets.create( - image="ubuntu-20-04-x64", - names=["sub-01.example.com", "sub-02.example.com"], - size="s-1vcpu-1gb", - ) - assert_matches_type(DropletCreateResponse, droplet, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params_overload_2(self, client: GradientAI) -> None: - droplet = client.droplets.create( - image="ubuntu-20-04-x64", - names=["sub-01.example.com", "sub-02.example.com"], - size="s-1vcpu-1gb", - backup_policy={ - "hour": 0, - "plan": "daily", - "weekday": "SUN", - }, - backups=True, - ipv6=True, - monitoring=True, - private_networking=True, - region="nyc3", - ssh_keys=[289794, "3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], - tags=["env:prod", "web"], - user_data="#cloud-config\nruncmd:\n - touch /test.txt\n", - volumes=["12e97116-7280-11ed-b3d0-0a58ac146812"], - vpc_uuid="760e09ef-dc84-11e8-981e-3cfdfeaae000", - with_droplet_agent=True, - ) - assert_matches_type(DropletCreateResponse, droplet, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create_overload_2(self, client: GradientAI) -> None: - response = client.droplets.with_raw_response.create( - image="ubuntu-20-04-x64", - names=["sub-01.example.com", "sub-02.example.com"], - size="s-1vcpu-1gb", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - droplet = response.parse() - assert_matches_type(DropletCreateResponse, droplet, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create_overload_2(self, client: GradientAI) -> None: - with client.droplets.with_streaming_response.create( - image="ubuntu-20-04-x64", - names=["sub-01.example.com", "sub-02.example.com"], - size="s-1vcpu-1gb", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - droplet = response.parse() - assert_matches_type(DropletCreateResponse, droplet, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: GradientAI) -> None: - droplet = client.droplets.retrieve( - 1, - ) - assert_matches_type(DropletRetrieveResponse, droplet, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: GradientAI) -> None: - response = client.droplets.with_raw_response.retrieve( - 1, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - droplet = response.parse() - assert_matches_type(DropletRetrieveResponse, droplet, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: GradientAI) -> None: - with client.droplets.with_streaming_response.retrieve( - 1, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - droplet = response.parse() - assert_matches_type(DropletRetrieveResponse, droplet, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: GradientAI) -> None: - droplet = client.droplets.list() - assert_matches_type(DropletListResponse, droplet, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: - droplet = client.droplets.list( - name="name", - page=1, - per_page=1, - tag_name="tag_name", - type="droplets", - ) - assert_matches_type(DropletListResponse, droplet, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: - response = client.droplets.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - droplet = response.parse() - assert_matches_type(DropletListResponse, droplet, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: - with client.droplets.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - droplet = response.parse() - assert_matches_type(DropletListResponse, droplet, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_delete(self, client: GradientAI) -> None: - droplet = client.droplets.delete( - 1, - ) - assert droplet is None - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete(self, client: GradientAI) -> None: - response = client.droplets.with_raw_response.delete( - 1, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - droplet = response.parse() - assert droplet is None - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete(self, client: GradientAI) -> None: - with client.droplets.with_streaming_response.delete( - 1, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - droplet = response.parse() - assert droplet is None - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_delete_by_tag(self, client: GradientAI) -> None: - droplet = client.droplets.delete_by_tag( - tag_name="tag_name", - ) - assert droplet is None - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete_by_tag(self, client: GradientAI) -> None: - response = client.droplets.with_raw_response.delete_by_tag( - tag_name="tag_name", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - droplet = response.parse() - assert droplet is None - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete_by_tag(self, client: GradientAI) -> None: - with client.droplets.with_streaming_response.delete_by_tag( - tag_name="tag_name", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - droplet = response.parse() - assert droplet is None - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_list_firewalls(self, client: GradientAI) -> None: - droplet = client.droplets.list_firewalls( - droplet_id=3164444, - ) - assert_matches_type(DropletListFirewallsResponse, droplet, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_firewalls_with_all_params(self, client: GradientAI) -> None: - droplet = client.droplets.list_firewalls( - droplet_id=3164444, - page=1, - per_page=1, - ) - assert_matches_type(DropletListFirewallsResponse, droplet, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list_firewalls(self, client: GradientAI) -> None: - response = client.droplets.with_raw_response.list_firewalls( - droplet_id=3164444, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - droplet = response.parse() - assert_matches_type(DropletListFirewallsResponse, droplet, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list_firewalls(self, client: GradientAI) -> None: - with client.droplets.with_streaming_response.list_firewalls( - droplet_id=3164444, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - droplet = response.parse() - assert_matches_type(DropletListFirewallsResponse, droplet, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_list_kernels(self, client: GradientAI) -> None: - droplet = client.droplets.list_kernels( - droplet_id=3164444, - ) - assert_matches_type(DropletListKernelsResponse, droplet, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_kernels_with_all_params(self, client: GradientAI) -> None: - droplet = client.droplets.list_kernels( - droplet_id=3164444, - page=1, - per_page=1, - ) - assert_matches_type(DropletListKernelsResponse, droplet, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list_kernels(self, client: GradientAI) -> None: - response = client.droplets.with_raw_response.list_kernels( - droplet_id=3164444, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - droplet = response.parse() - assert_matches_type(DropletListKernelsResponse, droplet, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list_kernels(self, client: GradientAI) -> None: - with client.droplets.with_streaming_response.list_kernels( - droplet_id=3164444, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - droplet = response.parse() - assert_matches_type(DropletListKernelsResponse, droplet, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_list_neighbors(self, client: GradientAI) -> None: - droplet = client.droplets.list_neighbors( - 1, - ) - assert_matches_type(DropletListNeighborsResponse, droplet, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list_neighbors(self, client: GradientAI) -> None: - response = client.droplets.with_raw_response.list_neighbors( - 1, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - droplet = response.parse() - assert_matches_type(DropletListNeighborsResponse, droplet, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list_neighbors(self, client: GradientAI) -> None: - with client.droplets.with_streaming_response.list_neighbors( - 1, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - droplet = response.parse() - assert_matches_type(DropletListNeighborsResponse, droplet, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_list_snapshots(self, client: GradientAI) -> None: - droplet = client.droplets.list_snapshots( - droplet_id=3164444, - ) - assert_matches_type(DropletListSnapshotsResponse, droplet, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_snapshots_with_all_params(self, client: GradientAI) -> None: - droplet = client.droplets.list_snapshots( - droplet_id=3164444, - page=1, - per_page=1, - ) - assert_matches_type(DropletListSnapshotsResponse, droplet, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list_snapshots(self, client: GradientAI) -> None: - response = client.droplets.with_raw_response.list_snapshots( - droplet_id=3164444, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - droplet = response.parse() - assert_matches_type(DropletListSnapshotsResponse, droplet, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list_snapshots(self, client: GradientAI) -> None: - with client.droplets.with_streaming_response.list_snapshots( - droplet_id=3164444, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - droplet = response.parse() - assert_matches_type(DropletListSnapshotsResponse, droplet, path=["response"]) - - assert cast(Any, response.is_closed) is True - - -class TestAsyncDroplets: - parametrize = pytest.mark.parametrize( - "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] - ) - - @pytest.mark.skip() - @parametrize - async def test_method_create_overload_1(self, async_client: AsyncGradientAI) -> None: - droplet = await async_client.droplets.create( - image="ubuntu-20-04-x64", - name="example.com", - size="s-1vcpu-1gb", - ) - assert_matches_type(DropletCreateResponse, droplet, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params_overload_1(self, async_client: AsyncGradientAI) -> None: - droplet = await async_client.droplets.create( - image="ubuntu-20-04-x64", - name="example.com", - size="s-1vcpu-1gb", - backup_policy={ - "hour": 0, - "plan": "daily", - "weekday": "SUN", - }, - backups=True, - ipv6=True, - monitoring=True, - private_networking=True, - region="nyc3", - ssh_keys=[289794, "3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], - tags=["env:prod", "web"], - user_data="#cloud-config\nruncmd:\n - touch /test.txt\n", - volumes=["12e97116-7280-11ed-b3d0-0a58ac146812"], - vpc_uuid="760e09ef-dc84-11e8-981e-3cfdfeaae000", - with_droplet_agent=True, - ) - assert_matches_type(DropletCreateResponse, droplet, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: - response = await async_client.droplets.with_raw_response.create( - image="ubuntu-20-04-x64", - name="example.com", - size="s-1vcpu-1gb", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - droplet = await response.parse() - assert_matches_type(DropletCreateResponse, droplet, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: - async with async_client.droplets.with_streaming_response.create( - image="ubuntu-20-04-x64", - name="example.com", - size="s-1vcpu-1gb", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - droplet = await response.parse() - assert_matches_type(DropletCreateResponse, droplet, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_create_overload_2(self, async_client: AsyncGradientAI) -> None: - droplet = await async_client.droplets.create( - image="ubuntu-20-04-x64", - names=["sub-01.example.com", "sub-02.example.com"], - size="s-1vcpu-1gb", - ) - assert_matches_type(DropletCreateResponse, droplet, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params_overload_2(self, async_client: AsyncGradientAI) -> None: - droplet = await async_client.droplets.create( - image="ubuntu-20-04-x64", - names=["sub-01.example.com", "sub-02.example.com"], - size="s-1vcpu-1gb", - backup_policy={ - "hour": 0, - "plan": "daily", - "weekday": "SUN", - }, - backups=True, - ipv6=True, - monitoring=True, - private_networking=True, - region="nyc3", - ssh_keys=[289794, "3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], - tags=["env:prod", "web"], - user_data="#cloud-config\nruncmd:\n - touch /test.txt\n", - volumes=["12e97116-7280-11ed-b3d0-0a58ac146812"], - vpc_uuid="760e09ef-dc84-11e8-981e-3cfdfeaae000", - with_droplet_agent=True, - ) - assert_matches_type(DropletCreateResponse, droplet, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: - response = await async_client.droplets.with_raw_response.create( - image="ubuntu-20-04-x64", - names=["sub-01.example.com", "sub-02.example.com"], - size="s-1vcpu-1gb", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - droplet = await response.parse() - assert_matches_type(DropletCreateResponse, droplet, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: - async with async_client.droplets.with_streaming_response.create( - image="ubuntu-20-04-x64", - names=["sub-01.example.com", "sub-02.example.com"], - size="s-1vcpu-1gb", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - droplet = await response.parse() - assert_matches_type(DropletCreateResponse, droplet, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: - droplet = await async_client.droplets.retrieve( - 1, - ) - assert_matches_type(DropletRetrieveResponse, droplet, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: - response = await async_client.droplets.with_raw_response.retrieve( - 1, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - droplet = await response.parse() - assert_matches_type(DropletRetrieveResponse, droplet, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: - async with async_client.droplets.with_streaming_response.retrieve( - 1, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - droplet = await response.parse() - assert_matches_type(DropletRetrieveResponse, droplet, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: - droplet = await async_client.droplets.list() - assert_matches_type(DropletListResponse, droplet, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: - droplet = await async_client.droplets.list( - name="name", - page=1, - per_page=1, - tag_name="tag_name", - type="droplets", - ) - assert_matches_type(DropletListResponse, droplet, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: - response = await async_client.droplets.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - droplet = await response.parse() - assert_matches_type(DropletListResponse, droplet, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: - async with async_client.droplets.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - droplet = await response.parse() - assert_matches_type(DropletListResponse, droplet, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_delete(self, async_client: AsyncGradientAI) -> None: - droplet = await async_client.droplets.delete( - 1, - ) - assert droplet is None - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: - response = await async_client.droplets.with_raw_response.delete( - 1, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - droplet = await response.parse() - assert droplet is None - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: - async with async_client.droplets.with_streaming_response.delete( - 1, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - droplet = await response.parse() - assert droplet is None - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_delete_by_tag(self, async_client: AsyncGradientAI) -> None: - droplet = await async_client.droplets.delete_by_tag( - tag_name="tag_name", - ) - assert droplet is None - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete_by_tag(self, async_client: AsyncGradientAI) -> None: - response = await async_client.droplets.with_raw_response.delete_by_tag( - tag_name="tag_name", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - droplet = await response.parse() - assert droplet is None - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete_by_tag(self, async_client: AsyncGradientAI) -> None: - async with async_client.droplets.with_streaming_response.delete_by_tag( - tag_name="tag_name", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - droplet = await response.parse() - assert droplet is None - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_list_firewalls(self, async_client: AsyncGradientAI) -> None: - droplet = await async_client.droplets.list_firewalls( - droplet_id=3164444, - ) - assert_matches_type(DropletListFirewallsResponse, droplet, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_firewalls_with_all_params(self, async_client: AsyncGradientAI) -> None: - droplet = await async_client.droplets.list_firewalls( - droplet_id=3164444, - page=1, - per_page=1, - ) - assert_matches_type(DropletListFirewallsResponse, droplet, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list_firewalls(self, async_client: AsyncGradientAI) -> None: - response = await async_client.droplets.with_raw_response.list_firewalls( - droplet_id=3164444, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - droplet = await response.parse() - assert_matches_type(DropletListFirewallsResponse, droplet, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list_firewalls(self, async_client: AsyncGradientAI) -> None: - async with async_client.droplets.with_streaming_response.list_firewalls( - droplet_id=3164444, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - droplet = await response.parse() - assert_matches_type(DropletListFirewallsResponse, droplet, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_list_kernels(self, async_client: AsyncGradientAI) -> None: - droplet = await async_client.droplets.list_kernels( - droplet_id=3164444, - ) - assert_matches_type(DropletListKernelsResponse, droplet, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_kernels_with_all_params(self, async_client: AsyncGradientAI) -> None: - droplet = await async_client.droplets.list_kernels( - droplet_id=3164444, - page=1, - per_page=1, - ) - assert_matches_type(DropletListKernelsResponse, droplet, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list_kernels(self, async_client: AsyncGradientAI) -> None: - response = await async_client.droplets.with_raw_response.list_kernels( - droplet_id=3164444, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - droplet = await response.parse() - assert_matches_type(DropletListKernelsResponse, droplet, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list_kernels(self, async_client: AsyncGradientAI) -> None: - async with async_client.droplets.with_streaming_response.list_kernels( - droplet_id=3164444, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - droplet = await response.parse() - assert_matches_type(DropletListKernelsResponse, droplet, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_list_neighbors(self, async_client: AsyncGradientAI) -> None: - droplet = await async_client.droplets.list_neighbors( - 1, - ) - assert_matches_type(DropletListNeighborsResponse, droplet, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list_neighbors(self, async_client: AsyncGradientAI) -> None: - response = await async_client.droplets.with_raw_response.list_neighbors( - 1, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - droplet = await response.parse() - assert_matches_type(DropletListNeighborsResponse, droplet, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list_neighbors(self, async_client: AsyncGradientAI) -> None: - async with async_client.droplets.with_streaming_response.list_neighbors( - 1, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - droplet = await response.parse() - assert_matches_type(DropletListNeighborsResponse, droplet, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_list_snapshots(self, async_client: AsyncGradientAI) -> None: - droplet = await async_client.droplets.list_snapshots( - droplet_id=3164444, - ) - assert_matches_type(DropletListSnapshotsResponse, droplet, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_snapshots_with_all_params(self, async_client: AsyncGradientAI) -> None: - droplet = await async_client.droplets.list_snapshots( - droplet_id=3164444, - page=1, - per_page=1, - ) - assert_matches_type(DropletListSnapshotsResponse, droplet, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list_snapshots(self, async_client: AsyncGradientAI) -> None: - response = await async_client.droplets.with_raw_response.list_snapshots( - droplet_id=3164444, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - droplet = await response.parse() - assert_matches_type(DropletListSnapshotsResponse, droplet, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list_snapshots(self, async_client: AsyncGradientAI) -> None: - async with async_client.droplets.with_streaming_response.list_snapshots( - droplet_id=3164444, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - droplet = await response.parse() - assert_matches_type(DropletListSnapshotsResponse, droplet, path=["response"]) - - assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_firewalls.py b/tests/api_resources/test_firewalls.py deleted file mode 100644 index f41d1b9f..00000000 --- a/tests/api_resources/test_firewalls.py +++ /dev/null @@ -1,617 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from gradientai import GradientAI, AsyncGradientAI -from tests.utils import assert_matches_type -from gradientai.types import ( - FirewallListResponse, - FirewallCreateResponse, - FirewallUpdateResponse, - FirewallRetrieveResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestFirewalls: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create(self, client: GradientAI) -> None: - firewall = client.firewalls.create() - assert_matches_type(FirewallCreateResponse, firewall, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params(self, client: GradientAI) -> None: - firewall = client.firewalls.create( - body={ - "droplet_ids": [8043964], - "inbound_rules": [ - { - "ports": "80", - "protocol": "tcp", - "sources": { - "addresses": ["1.2.3.4", "18.0.0.0/8"], - "droplet_ids": [8043964], - "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], - "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], - "tags": ["base-image", "prod"], - }, - }, - { - "ports": "22", - "protocol": "tcp", - "sources": { - "addresses": ["18.0.0.0/8"], - "droplet_ids": [8043964], - "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], - "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], - "tags": ["gateway"], - }, - }, - ], - "name": "firewall", - "outbound_rules": [ - { - "destinations": { - "addresses": ["0.0.0.0/0", "::/0"], - "droplet_ids": [8043964], - "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], - "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], - "tags": ["base-image", "prod"], - }, - "ports": "80", - "protocol": "tcp", - } - ], - "tags": ["base-image", "prod"], - }, - ) - assert_matches_type(FirewallCreateResponse, firewall, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create(self, client: GradientAI) -> None: - response = client.firewalls.with_raw_response.create() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - firewall = response.parse() - assert_matches_type(FirewallCreateResponse, firewall, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create(self, client: GradientAI) -> None: - with client.firewalls.with_streaming_response.create() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - firewall = response.parse() - assert_matches_type(FirewallCreateResponse, firewall, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: GradientAI) -> None: - firewall = client.firewalls.retrieve( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - assert_matches_type(FirewallRetrieveResponse, firewall, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: GradientAI) -> None: - response = client.firewalls.with_raw_response.retrieve( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - firewall = response.parse() - assert_matches_type(FirewallRetrieveResponse, firewall, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: GradientAI) -> None: - with client.firewalls.with_streaming_response.retrieve( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - firewall = response.parse() - assert_matches_type(FirewallRetrieveResponse, firewall, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): - client.firewalls.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_update(self, client: GradientAI) -> None: - firewall = client.firewalls.update( - firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", - firewall={"name": "frontend-firewall"}, - ) - assert_matches_type(FirewallUpdateResponse, firewall, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_update_with_all_params(self, client: GradientAI) -> None: - firewall = client.firewalls.update( - firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", - firewall={ - "droplet_ids": [8043964], - "inbound_rules": [ - { - "ports": "8080", - "protocol": "tcp", - "sources": { - "addresses": ["1.2.3.4", "18.0.0.0/8"], - "droplet_ids": [8043964], - "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], - "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], - "tags": ["base-image", "prod"], - }, - }, - { - "ports": "22", - "protocol": "tcp", - "sources": { - "addresses": ["18.0.0.0/8"], - "droplet_ids": [8043964], - "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], - "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], - "tags": ["gateway"], - }, - }, - ], - "name": "frontend-firewall", - "outbound_rules": [ - { - "destinations": { - "addresses": ["0.0.0.0/0", "::/0"], - "droplet_ids": [8043964], - "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], - "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], - "tags": ["base-image", "prod"], - }, - "ports": "8080", - "protocol": "tcp", - } - ], - "tags": ["frontend"], - }, - ) - assert_matches_type(FirewallUpdateResponse, firewall, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_update(self, client: GradientAI) -> None: - response = client.firewalls.with_raw_response.update( - firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", - firewall={"name": "frontend-firewall"}, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - firewall = response.parse() - assert_matches_type(FirewallUpdateResponse, firewall, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_update(self, client: GradientAI) -> None: - with client.firewalls.with_streaming_response.update( - firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", - firewall={"name": "frontend-firewall"}, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - firewall = response.parse() - assert_matches_type(FirewallUpdateResponse, firewall, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_update(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): - client.firewalls.with_raw_response.update( - firewall_id="", - firewall={"name": "frontend-firewall"}, - ) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: GradientAI) -> None: - firewall = client.firewalls.list() - assert_matches_type(FirewallListResponse, firewall, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: - firewall = client.firewalls.list( - page=1, - per_page=1, - ) - assert_matches_type(FirewallListResponse, firewall, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: - response = client.firewalls.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - firewall = response.parse() - assert_matches_type(FirewallListResponse, firewall, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: - with client.firewalls.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - firewall = response.parse() - assert_matches_type(FirewallListResponse, firewall, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_delete(self, client: GradientAI) -> None: - firewall = client.firewalls.delete( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - assert firewall is None - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete(self, client: GradientAI) -> None: - response = client.firewalls.with_raw_response.delete( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - firewall = response.parse() - assert firewall is None - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete(self, client: GradientAI) -> None: - with client.firewalls.with_streaming_response.delete( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - firewall = response.parse() - assert firewall is None - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_delete(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): - client.firewalls.with_raw_response.delete( - "", - ) - - -class TestAsyncFirewalls: - parametrize = pytest.mark.parametrize( - "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] - ) - - @pytest.mark.skip() - @parametrize - async def test_method_create(self, async_client: AsyncGradientAI) -> None: - firewall = await async_client.firewalls.create() - assert_matches_type(FirewallCreateResponse, firewall, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: - firewall = await async_client.firewalls.create( - body={ - "droplet_ids": [8043964], - "inbound_rules": [ - { - "ports": "80", - "protocol": "tcp", - "sources": { - "addresses": ["1.2.3.4", "18.0.0.0/8"], - "droplet_ids": [8043964], - "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], - "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], - "tags": ["base-image", "prod"], - }, - }, - { - "ports": "22", - "protocol": "tcp", - "sources": { - "addresses": ["18.0.0.0/8"], - "droplet_ids": [8043964], - "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], - "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], - "tags": ["gateway"], - }, - }, - ], - "name": "firewall", - "outbound_rules": [ - { - "destinations": { - "addresses": ["0.0.0.0/0", "::/0"], - "droplet_ids": [8043964], - "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], - "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], - "tags": ["base-image", "prod"], - }, - "ports": "80", - "protocol": "tcp", - } - ], - "tags": ["base-image", "prod"], - }, - ) - assert_matches_type(FirewallCreateResponse, firewall, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: - response = await async_client.firewalls.with_raw_response.create() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - firewall = await response.parse() - assert_matches_type(FirewallCreateResponse, firewall, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: - async with async_client.firewalls.with_streaming_response.create() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - firewall = await response.parse() - assert_matches_type(FirewallCreateResponse, firewall, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: - firewall = await async_client.firewalls.retrieve( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - assert_matches_type(FirewallRetrieveResponse, firewall, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: - response = await async_client.firewalls.with_raw_response.retrieve( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - firewall = await response.parse() - assert_matches_type(FirewallRetrieveResponse, firewall, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: - async with async_client.firewalls.with_streaming_response.retrieve( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - firewall = await response.parse() - assert_matches_type(FirewallRetrieveResponse, firewall, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): - await async_client.firewalls.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_update(self, async_client: AsyncGradientAI) -> None: - firewall = await async_client.firewalls.update( - firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", - firewall={"name": "frontend-firewall"}, - ) - assert_matches_type(FirewallUpdateResponse, firewall, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: - firewall = await async_client.firewalls.update( - firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", - firewall={ - "droplet_ids": [8043964], - "inbound_rules": [ - { - "ports": "8080", - "protocol": "tcp", - "sources": { - "addresses": ["1.2.3.4", "18.0.0.0/8"], - "droplet_ids": [8043964], - "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], - "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], - "tags": ["base-image", "prod"], - }, - }, - { - "ports": "22", - "protocol": "tcp", - "sources": { - "addresses": ["18.0.0.0/8"], - "droplet_ids": [8043964], - "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], - "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], - "tags": ["gateway"], - }, - }, - ], - "name": "frontend-firewall", - "outbound_rules": [ - { - "destinations": { - "addresses": ["0.0.0.0/0", "::/0"], - "droplet_ids": [8043964], - "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], - "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], - "tags": ["base-image", "prod"], - }, - "ports": "8080", - "protocol": "tcp", - } - ], - "tags": ["frontend"], - }, - ) - assert_matches_type(FirewallUpdateResponse, firewall, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: - response = await async_client.firewalls.with_raw_response.update( - firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", - firewall={"name": "frontend-firewall"}, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - firewall = await response.parse() - assert_matches_type(FirewallUpdateResponse, firewall, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: - async with async_client.firewalls.with_streaming_response.update( - firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", - firewall={"name": "frontend-firewall"}, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - firewall = await response.parse() - assert_matches_type(FirewallUpdateResponse, firewall, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): - await async_client.firewalls.with_raw_response.update( - firewall_id="", - firewall={"name": "frontend-firewall"}, - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: - firewall = await async_client.firewalls.list() - assert_matches_type(FirewallListResponse, firewall, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: - firewall = await async_client.firewalls.list( - page=1, - per_page=1, - ) - assert_matches_type(FirewallListResponse, firewall, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: - response = await async_client.firewalls.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - firewall = await response.parse() - assert_matches_type(FirewallListResponse, firewall, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: - async with async_client.firewalls.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - firewall = await response.parse() - assert_matches_type(FirewallListResponse, firewall, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_delete(self, async_client: AsyncGradientAI) -> None: - firewall = await async_client.firewalls.delete( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - assert firewall is None - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: - response = await async_client.firewalls.with_raw_response.delete( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - firewall = await response.parse() - assert firewall is None - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: - async with async_client.firewalls.with_streaming_response.delete( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - firewall = await response.parse() - assert firewall is None - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): - await async_client.firewalls.with_raw_response.delete( - "", - ) diff --git a/tests/api_resources/test_floating_ips.py b/tests/api_resources/test_floating_ips.py deleted file mode 100644 index c9119fb9..00000000 --- a/tests/api_resources/test_floating_ips.py +++ /dev/null @@ -1,424 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from gradientai import GradientAI, AsyncGradientAI -from tests.utils import assert_matches_type -from gradientai.types import ( - FloatingIPListResponse, - FloatingIPCreateResponse, - FloatingIPRetrieveResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestFloatingIPs: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_overload_1(self, client: GradientAI) -> None: - floating_ip = client.floating_ips.create( - droplet_id=2457247, - ) - assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create_overload_1(self, client: GradientAI) -> None: - response = client.floating_ips.with_raw_response.create( - droplet_id=2457247, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - floating_ip = response.parse() - assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create_overload_1(self, client: GradientAI) -> None: - with client.floating_ips.with_streaming_response.create( - droplet_id=2457247, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - floating_ip = response.parse() - assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_create_overload_2(self, client: GradientAI) -> None: - floating_ip = client.floating_ips.create( - region="nyc3", - ) - assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params_overload_2(self, client: GradientAI) -> None: - floating_ip = client.floating_ips.create( - region="nyc3", - project_id="746c6152-2fa2-11ed-92d3-27aaa54e4988", - ) - assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create_overload_2(self, client: GradientAI) -> None: - response = client.floating_ips.with_raw_response.create( - region="nyc3", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - floating_ip = response.parse() - assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create_overload_2(self, client: GradientAI) -> None: - with client.floating_ips.with_streaming_response.create( - region="nyc3", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - floating_ip = response.parse() - assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: GradientAI) -> None: - floating_ip = client.floating_ips.retrieve( - "192.168.1.1", - ) - assert_matches_type(FloatingIPRetrieveResponse, floating_ip, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: GradientAI) -> None: - response = client.floating_ips.with_raw_response.retrieve( - "192.168.1.1", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - floating_ip = response.parse() - assert_matches_type(FloatingIPRetrieveResponse, floating_ip, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: GradientAI) -> None: - with client.floating_ips.with_streaming_response.retrieve( - "192.168.1.1", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - floating_ip = response.parse() - assert_matches_type(FloatingIPRetrieveResponse, floating_ip, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): - client.floating_ips.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: GradientAI) -> None: - floating_ip = client.floating_ips.list() - assert_matches_type(FloatingIPListResponse, floating_ip, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: - floating_ip = client.floating_ips.list( - page=1, - per_page=1, - ) - assert_matches_type(FloatingIPListResponse, floating_ip, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: - response = client.floating_ips.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - floating_ip = response.parse() - assert_matches_type(FloatingIPListResponse, floating_ip, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: - with client.floating_ips.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - floating_ip = response.parse() - assert_matches_type(FloatingIPListResponse, floating_ip, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_delete(self, client: GradientAI) -> None: - floating_ip = client.floating_ips.delete( - "192.168.1.1", - ) - assert floating_ip is None - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete(self, client: GradientAI) -> None: - response = client.floating_ips.with_raw_response.delete( - "192.168.1.1", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - floating_ip = response.parse() - assert floating_ip is None - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete(self, client: GradientAI) -> None: - with client.floating_ips.with_streaming_response.delete( - "192.168.1.1", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - floating_ip = response.parse() - assert floating_ip is None - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_delete(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): - client.floating_ips.with_raw_response.delete( - "", - ) - - -class TestAsyncFloatingIPs: - parametrize = pytest.mark.parametrize( - "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] - ) - - @pytest.mark.skip() - @parametrize - async def test_method_create_overload_1(self, async_client: AsyncGradientAI) -> None: - floating_ip = await async_client.floating_ips.create( - droplet_id=2457247, - ) - assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: - response = await async_client.floating_ips.with_raw_response.create( - droplet_id=2457247, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - floating_ip = await response.parse() - assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: - async with async_client.floating_ips.with_streaming_response.create( - droplet_id=2457247, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - floating_ip = await response.parse() - assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_create_overload_2(self, async_client: AsyncGradientAI) -> None: - floating_ip = await async_client.floating_ips.create( - region="nyc3", - ) - assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params_overload_2(self, async_client: AsyncGradientAI) -> None: - floating_ip = await async_client.floating_ips.create( - region="nyc3", - project_id="746c6152-2fa2-11ed-92d3-27aaa54e4988", - ) - assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: - response = await async_client.floating_ips.with_raw_response.create( - region="nyc3", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - floating_ip = await response.parse() - assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: - async with async_client.floating_ips.with_streaming_response.create( - region="nyc3", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - floating_ip = await response.parse() - assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: - floating_ip = await async_client.floating_ips.retrieve( - "192.168.1.1", - ) - assert_matches_type(FloatingIPRetrieveResponse, floating_ip, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: - response = await async_client.floating_ips.with_raw_response.retrieve( - "192.168.1.1", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - floating_ip = await response.parse() - assert_matches_type(FloatingIPRetrieveResponse, floating_ip, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: - async with async_client.floating_ips.with_streaming_response.retrieve( - "192.168.1.1", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - floating_ip = await response.parse() - assert_matches_type(FloatingIPRetrieveResponse, floating_ip, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): - await async_client.floating_ips.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: - floating_ip = await async_client.floating_ips.list() - assert_matches_type(FloatingIPListResponse, floating_ip, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: - floating_ip = await async_client.floating_ips.list( - page=1, - per_page=1, - ) - assert_matches_type(FloatingIPListResponse, floating_ip, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: - response = await async_client.floating_ips.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - floating_ip = await response.parse() - assert_matches_type(FloatingIPListResponse, floating_ip, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: - async with async_client.floating_ips.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - floating_ip = await response.parse() - assert_matches_type(FloatingIPListResponse, floating_ip, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_delete(self, async_client: AsyncGradientAI) -> None: - floating_ip = await async_client.floating_ips.delete( - "192.168.1.1", - ) - assert floating_ip is None - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: - response = await async_client.floating_ips.with_raw_response.delete( - "192.168.1.1", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - floating_ip = await response.parse() - assert floating_ip is None - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: - async with async_client.floating_ips.with_streaming_response.delete( - "192.168.1.1", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - floating_ip = await response.parse() - assert floating_ip is None - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): - await async_client.floating_ips.with_raw_response.delete( - "", - ) diff --git a/tests/api_resources/test_images.py b/tests/api_resources/test_images.py deleted file mode 100644 index 1da2a301..00000000 --- a/tests/api_resources/test_images.py +++ /dev/null @@ -1,417 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from gradientai import GradientAI, AsyncGradientAI -from tests.utils import assert_matches_type -from gradientai.types import ( - ImageListResponse, - ImageCreateResponse, - ImageUpdateResponse, - ImageRetrieveResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestImages: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create(self, client: GradientAI) -> None: - image = client.images.create() - assert_matches_type(ImageCreateResponse, image, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params(self, client: GradientAI) -> None: - image = client.images.create( - description=" ", - distribution="Ubuntu", - name="Nifty New Snapshot", - region="nyc3", - tags=["base-image", "prod"], - url="http://cloud-images.ubuntu.com/minimal/releases/bionic/release/ubuntu-18.04-minimal-cloudimg-amd64.img", - ) - assert_matches_type(ImageCreateResponse, image, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create(self, client: GradientAI) -> None: - response = client.images.with_raw_response.create() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - image = response.parse() - assert_matches_type(ImageCreateResponse, image, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create(self, client: GradientAI) -> None: - with client.images.with_streaming_response.create() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - image = response.parse() - assert_matches_type(ImageCreateResponse, image, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: GradientAI) -> None: - image = client.images.retrieve( - 0, - ) - assert_matches_type(ImageRetrieveResponse, image, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: GradientAI) -> None: - response = client.images.with_raw_response.retrieve( - 0, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - image = response.parse() - assert_matches_type(ImageRetrieveResponse, image, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: GradientAI) -> None: - with client.images.with_streaming_response.retrieve( - 0, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - image = response.parse() - assert_matches_type(ImageRetrieveResponse, image, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_update(self, client: GradientAI) -> None: - image = client.images.update( - image_id=62137902, - ) - assert_matches_type(ImageUpdateResponse, image, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_update_with_all_params(self, client: GradientAI) -> None: - image = client.images.update( - image_id=62137902, - description=" ", - distribution="Ubuntu", - name="Nifty New Snapshot", - ) - assert_matches_type(ImageUpdateResponse, image, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_update(self, client: GradientAI) -> None: - response = client.images.with_raw_response.update( - image_id=62137902, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - image = response.parse() - assert_matches_type(ImageUpdateResponse, image, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_update(self, client: GradientAI) -> None: - with client.images.with_streaming_response.update( - image_id=62137902, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - image = response.parse() - assert_matches_type(ImageUpdateResponse, image, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: GradientAI) -> None: - image = client.images.list() - assert_matches_type(ImageListResponse, image, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: - image = client.images.list( - page=1, - per_page=1, - private=True, - tag_name="tag_name", - type="application", - ) - assert_matches_type(ImageListResponse, image, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: - response = client.images.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - image = response.parse() - assert_matches_type(ImageListResponse, image, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: - with client.images.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - image = response.parse() - assert_matches_type(ImageListResponse, image, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_delete(self, client: GradientAI) -> None: - image = client.images.delete( - 0, - ) - assert image is None - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete(self, client: GradientAI) -> None: - response = client.images.with_raw_response.delete( - 0, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - image = response.parse() - assert image is None - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete(self, client: GradientAI) -> None: - with client.images.with_streaming_response.delete( - 0, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - image = response.parse() - assert image is None - - assert cast(Any, response.is_closed) is True - - -class TestAsyncImages: - parametrize = pytest.mark.parametrize( - "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] - ) - - @pytest.mark.skip() - @parametrize - async def test_method_create(self, async_client: AsyncGradientAI) -> None: - image = await async_client.images.create() - assert_matches_type(ImageCreateResponse, image, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: - image = await async_client.images.create( - description=" ", - distribution="Ubuntu", - name="Nifty New Snapshot", - region="nyc3", - tags=["base-image", "prod"], - url="http://cloud-images.ubuntu.com/minimal/releases/bionic/release/ubuntu-18.04-minimal-cloudimg-amd64.img", - ) - assert_matches_type(ImageCreateResponse, image, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: - response = await async_client.images.with_raw_response.create() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - image = await response.parse() - assert_matches_type(ImageCreateResponse, image, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: - async with async_client.images.with_streaming_response.create() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - image = await response.parse() - assert_matches_type(ImageCreateResponse, image, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: - image = await async_client.images.retrieve( - 0, - ) - assert_matches_type(ImageRetrieveResponse, image, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: - response = await async_client.images.with_raw_response.retrieve( - 0, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - image = await response.parse() - assert_matches_type(ImageRetrieveResponse, image, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: - async with async_client.images.with_streaming_response.retrieve( - 0, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - image = await response.parse() - assert_matches_type(ImageRetrieveResponse, image, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_update(self, async_client: AsyncGradientAI) -> None: - image = await async_client.images.update( - image_id=62137902, - ) - assert_matches_type(ImageUpdateResponse, image, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: - image = await async_client.images.update( - image_id=62137902, - description=" ", - distribution="Ubuntu", - name="Nifty New Snapshot", - ) - assert_matches_type(ImageUpdateResponse, image, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: - response = await async_client.images.with_raw_response.update( - image_id=62137902, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - image = await response.parse() - assert_matches_type(ImageUpdateResponse, image, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: - async with async_client.images.with_streaming_response.update( - image_id=62137902, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - image = await response.parse() - assert_matches_type(ImageUpdateResponse, image, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: - image = await async_client.images.list() - assert_matches_type(ImageListResponse, image, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: - image = await async_client.images.list( - page=1, - per_page=1, - private=True, - tag_name="tag_name", - type="application", - ) - assert_matches_type(ImageListResponse, image, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: - response = await async_client.images.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - image = await response.parse() - assert_matches_type(ImageListResponse, image, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: - async with async_client.images.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - image = await response.parse() - assert_matches_type(ImageListResponse, image, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_delete(self, async_client: AsyncGradientAI) -> None: - image = await async_client.images.delete( - 0, - ) - assert image is None - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: - response = await async_client.images.with_raw_response.delete( - 0, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - image = await response.parse() - assert image is None - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: - async with async_client.images.with_streaming_response.delete( - 0, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - image = await response.parse() - assert image is None - - assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_knowledge_bases.py b/tests/api_resources/test_knowledge_bases.py index 8a331b52..508820ce 100644 --- a/tests/api_resources/test_knowledge_bases.py +++ b/tests/api_resources/test_knowledge_bases.py @@ -33,42 +33,42 @@ def test_method_create(self, client: GradientAI) -> None: @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: knowledge_base = client.knowledge_bases.create( - database_id='"12345678-1234-1234-1234-123456789012"', + database_id="database_id", datasources=[ { "aws_data_source": { - "bucket_name": '"example name"', - "item_path": '"example string"', - "key_id": '"123e4567-e89b-12d3-a456-426614174000"', - "region": '"example string"', - "secret_key": '"example string"', + "bucket_name": "bucket_name", + "item_path": "item_path", + "key_id": "key_id", + "region": "region", + "secret_key": "secret_key", }, - "bucket_name": '"example name"', - "bucket_region": '"example string"', + "bucket_name": "bucket_name", + "bucket_region": "bucket_region", "file_upload_data_source": { - "original_file_name": '"example name"', - "size_in_bytes": '"12345"', - "stored_object_key": '"example string"', + "original_file_name": "original_file_name", + "size_in_bytes": "size_in_bytes", + "stored_object_key": "stored_object_key", }, - "item_path": '"example string"', + "item_path": "item_path", "spaces_data_source": { - "bucket_name": '"example name"', - "item_path": '"example string"', - "region": '"example string"', + "bucket_name": "bucket_name", + "item_path": "item_path", + "region": "region", }, "web_crawler_data_source": { - "base_url": '"example string"', + "base_url": "base_url", "crawling_option": "UNKNOWN", "embed_media": True, }, } ], - embedding_model_uuid='"12345678-1234-1234-1234-123456789012"', - name='"My Knowledge Base"', - project_id='"12345678-1234-1234-1234-123456789012"', - region='"tor1"', - tags=["example string"], - vpc_uuid='"12345678-1234-1234-1234-123456789012"', + embedding_model_uuid="embedding_model_uuid", + name="name", + project_id="project_id", + region="region", + tags=["string"], + vpc_uuid="vpc_uuid", ) assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) @@ -140,7 +140,7 @@ def test_path_params_retrieve(self, client: GradientAI) -> None: @parametrize def test_method_update(self, client: GradientAI) -> None: knowledge_base = client.knowledge_bases.update( - path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_uuid="uuid", ) assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) @@ -148,13 +148,13 @@ def test_method_update(self, client: GradientAI) -> None: @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: knowledge_base = client.knowledge_bases.update( - path_uuid='"123e4567-e89b-12d3-a456-426614174000"', - database_id='"12345678-1234-1234-1234-123456789012"', - embedding_model_uuid='"12345678-1234-1234-1234-123456789012"', - name='"My Knowledge Base"', - project_id='"12345678-1234-1234-1234-123456789012"', - tags=["example string"], - body_uuid='"12345678-1234-1234-1234-123456789012"', + path_uuid="uuid", + database_id="database_id", + embedding_model_uuid="embedding_model_uuid", + name="name", + project_id="project_id", + tags=["string"], + body_uuid="uuid", ) assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) @@ -162,7 +162,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_update(self, client: GradientAI) -> None: response = client.knowledge_bases.with_raw_response.update( - path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_uuid="uuid", ) assert response.is_closed is True @@ -174,7 +174,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: with client.knowledge_bases.with_streaming_response.update( - path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_uuid="uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -287,42 +287,42 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: knowledge_base = await async_client.knowledge_bases.create( - database_id='"12345678-1234-1234-1234-123456789012"', + database_id="database_id", datasources=[ { "aws_data_source": { - "bucket_name": '"example name"', - "item_path": '"example string"', - "key_id": '"123e4567-e89b-12d3-a456-426614174000"', - "region": '"example string"', - "secret_key": '"example string"', + "bucket_name": "bucket_name", + "item_path": "item_path", + "key_id": "key_id", + "region": "region", + "secret_key": "secret_key", }, - "bucket_name": '"example name"', - "bucket_region": '"example string"', + "bucket_name": "bucket_name", + "bucket_region": "bucket_region", "file_upload_data_source": { - "original_file_name": '"example name"', - "size_in_bytes": '"12345"', - "stored_object_key": '"example string"', + "original_file_name": "original_file_name", + "size_in_bytes": "size_in_bytes", + "stored_object_key": "stored_object_key", }, - "item_path": '"example string"', + "item_path": "item_path", "spaces_data_source": { - "bucket_name": '"example name"', - "item_path": '"example string"', - "region": '"example string"', + "bucket_name": "bucket_name", + "item_path": "item_path", + "region": "region", }, "web_crawler_data_source": { - "base_url": '"example string"', + "base_url": "base_url", "crawling_option": "UNKNOWN", "embed_media": True, }, } ], - embedding_model_uuid='"12345678-1234-1234-1234-123456789012"', - name='"My Knowledge Base"', - project_id='"12345678-1234-1234-1234-123456789012"', - region='"tor1"', - tags=["example string"], - vpc_uuid='"12345678-1234-1234-1234-123456789012"', + embedding_model_uuid="embedding_model_uuid", + name="name", + project_id="project_id", + region="region", + tags=["string"], + vpc_uuid="vpc_uuid", ) assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) @@ -394,7 +394,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: knowledge_base = await async_client.knowledge_bases.update( - path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_uuid="uuid", ) assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) @@ -402,13 +402,13 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: knowledge_base = await async_client.knowledge_bases.update( - path_uuid='"123e4567-e89b-12d3-a456-426614174000"', - database_id='"12345678-1234-1234-1234-123456789012"', - embedding_model_uuid='"12345678-1234-1234-1234-123456789012"', - name='"My Knowledge Base"', - project_id='"12345678-1234-1234-1234-123456789012"', - tags=["example string"], - body_uuid='"12345678-1234-1234-1234-123456789012"', + path_uuid="uuid", + database_id="database_id", + embedding_model_uuid="embedding_model_uuid", + name="name", + project_id="project_id", + tags=["string"], + body_uuid="uuid", ) assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) @@ -416,7 +416,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: response = await async_client.knowledge_bases.with_raw_response.update( - path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_uuid="uuid", ) assert response.is_closed is True @@ -428,7 +428,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: async with async_client.knowledge_bases.with_streaming_response.update( - path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_uuid="uuid", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/test_load_balancers.py b/tests/api_resources/test_load_balancers.py deleted file mode 100644 index 6beb02fc..00000000 --- a/tests/api_resources/test_load_balancers.py +++ /dev/null @@ -1,1443 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from gradientai import GradientAI, AsyncGradientAI -from tests.utils import assert_matches_type -from gradientai.types import ( - LoadBalancerListResponse, - LoadBalancerCreateResponse, - LoadBalancerUpdateResponse, - LoadBalancerRetrieveResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestLoadBalancers: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_overload_1(self, client: GradientAI) -> None: - load_balancer = client.load_balancers.create( - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - } - ], - ) - assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params_overload_1(self, client: GradientAI) -> None: - load_balancer = client.load_balancers.create( - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf", - "tls_passthrough": False, - } - ], - algorithm="round_robin", - disable_lets_encrypt_dns_records=True, - domains=[ - { - "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf", - "is_managed": True, - "name": "example.com", - } - ], - droplet_ids=[3164444, 3164445], - enable_backend_keepalive=True, - enable_proxy_protocol=True, - firewall={ - "allow": ["ip:1.2.3.4", "cidr:2.3.0.0/16"], - "deny": ["ip:1.2.3.4", "cidr:2.3.0.0/16"], - }, - glb_settings={ - "cdn": {"is_enabled": True}, - "failover_threshold": 50, - "region_priorities": { - "nyc1": 1, - "fra1": 2, - "sgp1": 3, - }, - "target_port": 80, - "target_protocol": "http", - }, - health_check={ - "check_interval_seconds": 10, - "healthy_threshold": 3, - "path": "/", - "port": 80, - "protocol": "http", - "response_timeout_seconds": 5, - "unhealthy_threshold": 5, - }, - http_idle_timeout_seconds=90, - name="example-lb-01", - network="EXTERNAL", - network_stack="IPV4", - project_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", - redirect_http_to_https=True, - region="nyc3", - size="lb-small", - size_unit=3, - sticky_sessions={ - "cookie_name": "DO-LB", - "cookie_ttl_seconds": 300, - "type": "cookies", - }, - target_load_balancer_ids=["7dbf91fe-cbdb-48dc-8290-c3a181554905", "996fa239-fac3-42a2-b9a1-9fa822268b7a"], - tls_cipher_policy="STRONG", - type="REGIONAL", - vpc_uuid="c33931f2-a26a-4e61-b85c-4e95a2ec431b", - ) - assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create_overload_1(self, client: GradientAI) -> None: - response = client.load_balancers.with_raw_response.create( - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - } - ], - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - load_balancer = response.parse() - assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create_overload_1(self, client: GradientAI) -> None: - with client.load_balancers.with_streaming_response.create( - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - } - ], - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - load_balancer = response.parse() - assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_create_overload_2(self, client: GradientAI) -> None: - load_balancer = client.load_balancers.create( - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - } - ], - ) - assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params_overload_2(self, client: GradientAI) -> None: - load_balancer = client.load_balancers.create( - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf", - "tls_passthrough": False, - } - ], - algorithm="round_robin", - disable_lets_encrypt_dns_records=True, - domains=[ - { - "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf", - "is_managed": True, - "name": "example.com", - } - ], - enable_backend_keepalive=True, - enable_proxy_protocol=True, - firewall={ - "allow": ["ip:1.2.3.4", "cidr:2.3.0.0/16"], - "deny": ["ip:1.2.3.4", "cidr:2.3.0.0/16"], - }, - glb_settings={ - "cdn": {"is_enabled": True}, - "failover_threshold": 50, - "region_priorities": { - "nyc1": 1, - "fra1": 2, - "sgp1": 3, - }, - "target_port": 80, - "target_protocol": "http", - }, - health_check={ - "check_interval_seconds": 10, - "healthy_threshold": 3, - "path": "/", - "port": 80, - "protocol": "http", - "response_timeout_seconds": 5, - "unhealthy_threshold": 5, - }, - http_idle_timeout_seconds=90, - name="example-lb-01", - network="EXTERNAL", - network_stack="IPV4", - project_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", - redirect_http_to_https=True, - region="nyc3", - size="lb-small", - size_unit=3, - sticky_sessions={ - "cookie_name": "DO-LB", - "cookie_ttl_seconds": 300, - "type": "cookies", - }, - tag="prod:web", - target_load_balancer_ids=["7dbf91fe-cbdb-48dc-8290-c3a181554905", "996fa239-fac3-42a2-b9a1-9fa822268b7a"], - tls_cipher_policy="STRONG", - type="REGIONAL", - vpc_uuid="c33931f2-a26a-4e61-b85c-4e95a2ec431b", - ) - assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create_overload_2(self, client: GradientAI) -> None: - response = client.load_balancers.with_raw_response.create( - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - } - ], - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - load_balancer = response.parse() - assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create_overload_2(self, client: GradientAI) -> None: - with client.load_balancers.with_streaming_response.create( - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - } - ], - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - load_balancer = response.parse() - assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: GradientAI) -> None: - load_balancer = client.load_balancers.retrieve( - "lb_id", - ) - assert_matches_type(LoadBalancerRetrieveResponse, load_balancer, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: GradientAI) -> None: - response = client.load_balancers.with_raw_response.retrieve( - "lb_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - load_balancer = response.parse() - assert_matches_type(LoadBalancerRetrieveResponse, load_balancer, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: GradientAI) -> None: - with client.load_balancers.with_streaming_response.retrieve( - "lb_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - load_balancer = response.parse() - assert_matches_type(LoadBalancerRetrieveResponse, load_balancer, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): - client.load_balancers.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_update_overload_1(self, client: GradientAI) -> None: - load_balancer = client.load_balancers.update( - lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - } - ], - ) - assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_update_with_all_params_overload_1(self, client: GradientAI) -> None: - load_balancer = client.load_balancers.update( - lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf", - "tls_passthrough": False, - } - ], - algorithm="round_robin", - disable_lets_encrypt_dns_records=True, - domains=[ - { - "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf", - "is_managed": True, - "name": "example.com", - } - ], - droplet_ids=[3164444, 3164445], - enable_backend_keepalive=True, - enable_proxy_protocol=True, - firewall={ - "allow": ["ip:1.2.3.4", "cidr:2.3.0.0/16"], - "deny": ["ip:1.2.3.4", "cidr:2.3.0.0/16"], - }, - glb_settings={ - "cdn": {"is_enabled": True}, - "failover_threshold": 50, - "region_priorities": { - "nyc1": 1, - "fra1": 2, - "sgp1": 3, - }, - "target_port": 80, - "target_protocol": "http", - }, - health_check={ - "check_interval_seconds": 10, - "healthy_threshold": 3, - "path": "/", - "port": 80, - "protocol": "http", - "response_timeout_seconds": 5, - "unhealthy_threshold": 5, - }, - http_idle_timeout_seconds=90, - name="example-lb-01", - network="EXTERNAL", - network_stack="IPV4", - project_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", - redirect_http_to_https=True, - region="nyc3", - size="lb-small", - size_unit=3, - sticky_sessions={ - "cookie_name": "DO-LB", - "cookie_ttl_seconds": 300, - "type": "cookies", - }, - target_load_balancer_ids=["7dbf91fe-cbdb-48dc-8290-c3a181554905", "996fa239-fac3-42a2-b9a1-9fa822268b7a"], - tls_cipher_policy="STRONG", - type="REGIONAL", - vpc_uuid="c33931f2-a26a-4e61-b85c-4e95a2ec431b", - ) - assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_update_overload_1(self, client: GradientAI) -> None: - response = client.load_balancers.with_raw_response.update( - lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - } - ], - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - load_balancer = response.parse() - assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_update_overload_1(self, client: GradientAI) -> None: - with client.load_balancers.with_streaming_response.update( - lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - } - ], - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - load_balancer = response.parse() - assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_update_overload_1(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): - client.load_balancers.with_raw_response.update( - lb_id="", - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - } - ], - ) - - @pytest.mark.skip() - @parametrize - def test_method_update_overload_2(self, client: GradientAI) -> None: - load_balancer = client.load_balancers.update( - lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - } - ], - ) - assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_update_with_all_params_overload_2(self, client: GradientAI) -> None: - load_balancer = client.load_balancers.update( - lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf", - "tls_passthrough": False, - } - ], - algorithm="round_robin", - disable_lets_encrypt_dns_records=True, - domains=[ - { - "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf", - "is_managed": True, - "name": "example.com", - } - ], - enable_backend_keepalive=True, - enable_proxy_protocol=True, - firewall={ - "allow": ["ip:1.2.3.4", "cidr:2.3.0.0/16"], - "deny": ["ip:1.2.3.4", "cidr:2.3.0.0/16"], - }, - glb_settings={ - "cdn": {"is_enabled": True}, - "failover_threshold": 50, - "region_priorities": { - "nyc1": 1, - "fra1": 2, - "sgp1": 3, - }, - "target_port": 80, - "target_protocol": "http", - }, - health_check={ - "check_interval_seconds": 10, - "healthy_threshold": 3, - "path": "/", - "port": 80, - "protocol": "http", - "response_timeout_seconds": 5, - "unhealthy_threshold": 5, - }, - http_idle_timeout_seconds=90, - name="example-lb-01", - network="EXTERNAL", - network_stack="IPV4", - project_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", - redirect_http_to_https=True, - region="nyc3", - size="lb-small", - size_unit=3, - sticky_sessions={ - "cookie_name": "DO-LB", - "cookie_ttl_seconds": 300, - "type": "cookies", - }, - tag="prod:web", - target_load_balancer_ids=["7dbf91fe-cbdb-48dc-8290-c3a181554905", "996fa239-fac3-42a2-b9a1-9fa822268b7a"], - tls_cipher_policy="STRONG", - type="REGIONAL", - vpc_uuid="c33931f2-a26a-4e61-b85c-4e95a2ec431b", - ) - assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_update_overload_2(self, client: GradientAI) -> None: - response = client.load_balancers.with_raw_response.update( - lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - } - ], - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - load_balancer = response.parse() - assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_update_overload_2(self, client: GradientAI) -> None: - with client.load_balancers.with_streaming_response.update( - lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - } - ], - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - load_balancer = response.parse() - assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_update_overload_2(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): - client.load_balancers.with_raw_response.update( - lb_id="", - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - } - ], - ) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: GradientAI) -> None: - load_balancer = client.load_balancers.list() - assert_matches_type(LoadBalancerListResponse, load_balancer, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: - load_balancer = client.load_balancers.list( - page=1, - per_page=1, - ) - assert_matches_type(LoadBalancerListResponse, load_balancer, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: - response = client.load_balancers.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - load_balancer = response.parse() - assert_matches_type(LoadBalancerListResponse, load_balancer, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: - with client.load_balancers.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - load_balancer = response.parse() - assert_matches_type(LoadBalancerListResponse, load_balancer, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_delete(self, client: GradientAI) -> None: - load_balancer = client.load_balancers.delete( - "lb_id", - ) - assert load_balancer is None - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete(self, client: GradientAI) -> None: - response = client.load_balancers.with_raw_response.delete( - "lb_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - load_balancer = response.parse() - assert load_balancer is None - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete(self, client: GradientAI) -> None: - with client.load_balancers.with_streaming_response.delete( - "lb_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - load_balancer = response.parse() - assert load_balancer is None - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_delete(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): - client.load_balancers.with_raw_response.delete( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_delete_cache(self, client: GradientAI) -> None: - load_balancer = client.load_balancers.delete_cache( - "lb_id", - ) - assert load_balancer is None - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete_cache(self, client: GradientAI) -> None: - response = client.load_balancers.with_raw_response.delete_cache( - "lb_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - load_balancer = response.parse() - assert load_balancer is None - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete_cache(self, client: GradientAI) -> None: - with client.load_balancers.with_streaming_response.delete_cache( - "lb_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - load_balancer = response.parse() - assert load_balancer is None - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_delete_cache(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): - client.load_balancers.with_raw_response.delete_cache( - "", - ) - - -class TestAsyncLoadBalancers: - parametrize = pytest.mark.parametrize( - "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] - ) - - @pytest.mark.skip() - @parametrize - async def test_method_create_overload_1(self, async_client: AsyncGradientAI) -> None: - load_balancer = await async_client.load_balancers.create( - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - } - ], - ) - assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params_overload_1(self, async_client: AsyncGradientAI) -> None: - load_balancer = await async_client.load_balancers.create( - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf", - "tls_passthrough": False, - } - ], - algorithm="round_robin", - disable_lets_encrypt_dns_records=True, - domains=[ - { - "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf", - "is_managed": True, - "name": "example.com", - } - ], - droplet_ids=[3164444, 3164445], - enable_backend_keepalive=True, - enable_proxy_protocol=True, - firewall={ - "allow": ["ip:1.2.3.4", "cidr:2.3.0.0/16"], - "deny": ["ip:1.2.3.4", "cidr:2.3.0.0/16"], - }, - glb_settings={ - "cdn": {"is_enabled": True}, - "failover_threshold": 50, - "region_priorities": { - "nyc1": 1, - "fra1": 2, - "sgp1": 3, - }, - "target_port": 80, - "target_protocol": "http", - }, - health_check={ - "check_interval_seconds": 10, - "healthy_threshold": 3, - "path": "/", - "port": 80, - "protocol": "http", - "response_timeout_seconds": 5, - "unhealthy_threshold": 5, - }, - http_idle_timeout_seconds=90, - name="example-lb-01", - network="EXTERNAL", - network_stack="IPV4", - project_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", - redirect_http_to_https=True, - region="nyc3", - size="lb-small", - size_unit=3, - sticky_sessions={ - "cookie_name": "DO-LB", - "cookie_ttl_seconds": 300, - "type": "cookies", - }, - target_load_balancer_ids=["7dbf91fe-cbdb-48dc-8290-c3a181554905", "996fa239-fac3-42a2-b9a1-9fa822268b7a"], - tls_cipher_policy="STRONG", - type="REGIONAL", - vpc_uuid="c33931f2-a26a-4e61-b85c-4e95a2ec431b", - ) - assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: - response = await async_client.load_balancers.with_raw_response.create( - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - } - ], - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - load_balancer = await response.parse() - assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: - async with async_client.load_balancers.with_streaming_response.create( - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - } - ], - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - load_balancer = await response.parse() - assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_create_overload_2(self, async_client: AsyncGradientAI) -> None: - load_balancer = await async_client.load_balancers.create( - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - } - ], - ) - assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params_overload_2(self, async_client: AsyncGradientAI) -> None: - load_balancer = await async_client.load_balancers.create( - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf", - "tls_passthrough": False, - } - ], - algorithm="round_robin", - disable_lets_encrypt_dns_records=True, - domains=[ - { - "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf", - "is_managed": True, - "name": "example.com", - } - ], - enable_backend_keepalive=True, - enable_proxy_protocol=True, - firewall={ - "allow": ["ip:1.2.3.4", "cidr:2.3.0.0/16"], - "deny": ["ip:1.2.3.4", "cidr:2.3.0.0/16"], - }, - glb_settings={ - "cdn": {"is_enabled": True}, - "failover_threshold": 50, - "region_priorities": { - "nyc1": 1, - "fra1": 2, - "sgp1": 3, - }, - "target_port": 80, - "target_protocol": "http", - }, - health_check={ - "check_interval_seconds": 10, - "healthy_threshold": 3, - "path": "/", - "port": 80, - "protocol": "http", - "response_timeout_seconds": 5, - "unhealthy_threshold": 5, - }, - http_idle_timeout_seconds=90, - name="example-lb-01", - network="EXTERNAL", - network_stack="IPV4", - project_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", - redirect_http_to_https=True, - region="nyc3", - size="lb-small", - size_unit=3, - sticky_sessions={ - "cookie_name": "DO-LB", - "cookie_ttl_seconds": 300, - "type": "cookies", - }, - tag="prod:web", - target_load_balancer_ids=["7dbf91fe-cbdb-48dc-8290-c3a181554905", "996fa239-fac3-42a2-b9a1-9fa822268b7a"], - tls_cipher_policy="STRONG", - type="REGIONAL", - vpc_uuid="c33931f2-a26a-4e61-b85c-4e95a2ec431b", - ) - assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: - response = await async_client.load_balancers.with_raw_response.create( - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - } - ], - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - load_balancer = await response.parse() - assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: - async with async_client.load_balancers.with_streaming_response.create( - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - } - ], - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - load_balancer = await response.parse() - assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: - load_balancer = await async_client.load_balancers.retrieve( - "lb_id", - ) - assert_matches_type(LoadBalancerRetrieveResponse, load_balancer, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: - response = await async_client.load_balancers.with_raw_response.retrieve( - "lb_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - load_balancer = await response.parse() - assert_matches_type(LoadBalancerRetrieveResponse, load_balancer, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: - async with async_client.load_balancers.with_streaming_response.retrieve( - "lb_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - load_balancer = await response.parse() - assert_matches_type(LoadBalancerRetrieveResponse, load_balancer, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): - await async_client.load_balancers.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_update_overload_1(self, async_client: AsyncGradientAI) -> None: - load_balancer = await async_client.load_balancers.update( - lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - } - ], - ) - assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_update_with_all_params_overload_1(self, async_client: AsyncGradientAI) -> None: - load_balancer = await async_client.load_balancers.update( - lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf", - "tls_passthrough": False, - } - ], - algorithm="round_robin", - disable_lets_encrypt_dns_records=True, - domains=[ - { - "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf", - "is_managed": True, - "name": "example.com", - } - ], - droplet_ids=[3164444, 3164445], - enable_backend_keepalive=True, - enable_proxy_protocol=True, - firewall={ - "allow": ["ip:1.2.3.4", "cidr:2.3.0.0/16"], - "deny": ["ip:1.2.3.4", "cidr:2.3.0.0/16"], - }, - glb_settings={ - "cdn": {"is_enabled": True}, - "failover_threshold": 50, - "region_priorities": { - "nyc1": 1, - "fra1": 2, - "sgp1": 3, - }, - "target_port": 80, - "target_protocol": "http", - }, - health_check={ - "check_interval_seconds": 10, - "healthy_threshold": 3, - "path": "/", - "port": 80, - "protocol": "http", - "response_timeout_seconds": 5, - "unhealthy_threshold": 5, - }, - http_idle_timeout_seconds=90, - name="example-lb-01", - network="EXTERNAL", - network_stack="IPV4", - project_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", - redirect_http_to_https=True, - region="nyc3", - size="lb-small", - size_unit=3, - sticky_sessions={ - "cookie_name": "DO-LB", - "cookie_ttl_seconds": 300, - "type": "cookies", - }, - target_load_balancer_ids=["7dbf91fe-cbdb-48dc-8290-c3a181554905", "996fa239-fac3-42a2-b9a1-9fa822268b7a"], - tls_cipher_policy="STRONG", - type="REGIONAL", - vpc_uuid="c33931f2-a26a-4e61-b85c-4e95a2ec431b", - ) - assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_update_overload_1(self, async_client: AsyncGradientAI) -> None: - response = await async_client.load_balancers.with_raw_response.update( - lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - } - ], - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - load_balancer = await response.parse() - assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_update_overload_1(self, async_client: AsyncGradientAI) -> None: - async with async_client.load_balancers.with_streaming_response.update( - lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - } - ], - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - load_balancer = await response.parse() - assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_update_overload_1(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): - await async_client.load_balancers.with_raw_response.update( - lb_id="", - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - } - ], - ) - - @pytest.mark.skip() - @parametrize - async def test_method_update_overload_2(self, async_client: AsyncGradientAI) -> None: - load_balancer = await async_client.load_balancers.update( - lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - } - ], - ) - assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_update_with_all_params_overload_2(self, async_client: AsyncGradientAI) -> None: - load_balancer = await async_client.load_balancers.update( - lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf", - "tls_passthrough": False, - } - ], - algorithm="round_robin", - disable_lets_encrypt_dns_records=True, - domains=[ - { - "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf", - "is_managed": True, - "name": "example.com", - } - ], - enable_backend_keepalive=True, - enable_proxy_protocol=True, - firewall={ - "allow": ["ip:1.2.3.4", "cidr:2.3.0.0/16"], - "deny": ["ip:1.2.3.4", "cidr:2.3.0.0/16"], - }, - glb_settings={ - "cdn": {"is_enabled": True}, - "failover_threshold": 50, - "region_priorities": { - "nyc1": 1, - "fra1": 2, - "sgp1": 3, - }, - "target_port": 80, - "target_protocol": "http", - }, - health_check={ - "check_interval_seconds": 10, - "healthy_threshold": 3, - "path": "/", - "port": 80, - "protocol": "http", - "response_timeout_seconds": 5, - "unhealthy_threshold": 5, - }, - http_idle_timeout_seconds=90, - name="example-lb-01", - network="EXTERNAL", - network_stack="IPV4", - project_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", - redirect_http_to_https=True, - region="nyc3", - size="lb-small", - size_unit=3, - sticky_sessions={ - "cookie_name": "DO-LB", - "cookie_ttl_seconds": 300, - "type": "cookies", - }, - tag="prod:web", - target_load_balancer_ids=["7dbf91fe-cbdb-48dc-8290-c3a181554905", "996fa239-fac3-42a2-b9a1-9fa822268b7a"], - tls_cipher_policy="STRONG", - type="REGIONAL", - vpc_uuid="c33931f2-a26a-4e61-b85c-4e95a2ec431b", - ) - assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_update_overload_2(self, async_client: AsyncGradientAI) -> None: - response = await async_client.load_balancers.with_raw_response.update( - lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - } - ], - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - load_balancer = await response.parse() - assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_update_overload_2(self, async_client: AsyncGradientAI) -> None: - async with async_client.load_balancers.with_streaming_response.update( - lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - } - ], - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - load_balancer = await response.parse() - assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_update_overload_2(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): - await async_client.load_balancers.with_raw_response.update( - lb_id="", - forwarding_rules=[ - { - "entry_port": 443, - "entry_protocol": "https", - "target_port": 80, - "target_protocol": "http", - } - ], - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: - load_balancer = await async_client.load_balancers.list() - assert_matches_type(LoadBalancerListResponse, load_balancer, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: - load_balancer = await async_client.load_balancers.list( - page=1, - per_page=1, - ) - assert_matches_type(LoadBalancerListResponse, load_balancer, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: - response = await async_client.load_balancers.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - load_balancer = await response.parse() - assert_matches_type(LoadBalancerListResponse, load_balancer, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: - async with async_client.load_balancers.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - load_balancer = await response.parse() - assert_matches_type(LoadBalancerListResponse, load_balancer, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_delete(self, async_client: AsyncGradientAI) -> None: - load_balancer = await async_client.load_balancers.delete( - "lb_id", - ) - assert load_balancer is None - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: - response = await async_client.load_balancers.with_raw_response.delete( - "lb_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - load_balancer = await response.parse() - assert load_balancer is None - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: - async with async_client.load_balancers.with_streaming_response.delete( - "lb_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - load_balancer = await response.parse() - assert load_balancer is None - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): - await async_client.load_balancers.with_raw_response.delete( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_delete_cache(self, async_client: AsyncGradientAI) -> None: - load_balancer = await async_client.load_balancers.delete_cache( - "lb_id", - ) - assert load_balancer is None - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete_cache(self, async_client: AsyncGradientAI) -> None: - response = await async_client.load_balancers.with_raw_response.delete_cache( - "lb_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - load_balancer = await response.parse() - assert load_balancer is None - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete_cache(self, async_client: AsyncGradientAI) -> None: - async with async_client.load_balancers.with_streaming_response.delete_cache( - "lb_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - load_balancer = await response.parse() - assert load_balancer is None - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_delete_cache(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): - await async_client.load_balancers.with_raw_response.delete_cache( - "", - ) diff --git a/tests/api_resources/test_models.py b/tests/api_resources/test_models.py index fe837973..5e119f71 100644 --- a/tests/api_resources/test_models.py +++ b/tests/api_resources/test_models.py @@ -9,7 +9,7 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types import ModelListResponse, ModelRetrieveResponse +from gradientai.types import ModelListResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -19,50 +19,19 @@ class TestModels: @pytest.mark.skip() @parametrize - def test_method_retrieve(self, client: GradientAI) -> None: - model = client.models.retrieve( - "llama3-8b-instruct", - ) - assert_matches_type(ModelRetrieveResponse, model, path=["response"]) + def test_method_list(self, client: GradientAI) -> None: + model = client.models.list() + assert_matches_type(ModelListResponse, model, path=["response"]) @pytest.mark.skip() @parametrize - def test_raw_response_retrieve(self, client: GradientAI) -> None: - response = client.models.with_raw_response.retrieve( - "llama3-8b-instruct", + def test_method_list_with_all_params(self, client: GradientAI) -> None: + model = client.models.list( + page=0, + per_page=0, + public_only=True, + usecases=["MODEL_USECASE_UNKNOWN"], ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - model = response.parse() - assert_matches_type(ModelRetrieveResponse, model, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: GradientAI) -> None: - with client.models.with_streaming_response.retrieve( - "llama3-8b-instruct", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - model = response.parse() - assert_matches_type(ModelRetrieveResponse, model, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `model` but received ''"): - client.models.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: GradientAI) -> None: - model = client.models.list() assert_matches_type(ModelListResponse, model, path=["response"]) @pytest.mark.skip() @@ -95,50 +64,19 @@ class TestAsyncModels: @pytest.mark.skip() @parametrize - async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: - model = await async_client.models.retrieve( - "llama3-8b-instruct", - ) - assert_matches_type(ModelRetrieveResponse, model, path=["response"]) + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + model = await async_client.models.list() + assert_matches_type(ModelListResponse, model, path=["response"]) @pytest.mark.skip() @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: - response = await async_client.models.with_raw_response.retrieve( - "llama3-8b-instruct", + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + model = await async_client.models.list( + page=0, + per_page=0, + public_only=True, + usecases=["MODEL_USECASE_UNKNOWN"], ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - model = await response.parse() - assert_matches_type(ModelRetrieveResponse, model, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: - async with async_client.models.with_streaming_response.retrieve( - "llama3-8b-instruct", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - model = await response.parse() - assert_matches_type(ModelRetrieveResponse, model, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `model` but received ''"): - await async_client.models.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: - model = await async_client.models.list() assert_matches_type(ModelListResponse, model, path=["response"]) @pytest.mark.skip() diff --git a/tests/api_resources/test_regions.py b/tests/api_resources/test_regions.py index 4f232293..8e25617f 100644 --- a/tests/api_resources/test_regions.py +++ b/tests/api_resources/test_regions.py @@ -27,8 +27,8 @@ def test_method_list(self, client: GradientAI) -> None: @parametrize def test_method_list_with_all_params(self, client: GradientAI) -> None: region = client.regions.list( - page=1, - per_page=1, + serves_batch=True, + serves_inference=True, ) assert_matches_type(RegionListResponse, region, path=["response"]) @@ -70,8 +70,8 @@ async def test_method_list(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: region = await async_client.regions.list( - page=1, - per_page=1, + serves_batch=True, + serves_inference=True, ) assert_matches_type(RegionListResponse, region, path=["response"]) diff --git a/tests/api_resources/test_sizes.py b/tests/api_resources/test_sizes.py deleted file mode 100644 index ea03f23b..00000000 --- a/tests/api_resources/test_sizes.py +++ /dev/null @@ -1,98 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from gradientai import GradientAI, AsyncGradientAI -from tests.utils import assert_matches_type -from gradientai.types import SizeListResponse - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestSizes: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: GradientAI) -> None: - size = client.sizes.list() - assert_matches_type(SizeListResponse, size, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: - size = client.sizes.list( - page=1, - per_page=1, - ) - assert_matches_type(SizeListResponse, size, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: - response = client.sizes.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - size = response.parse() - assert_matches_type(SizeListResponse, size, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: - with client.sizes.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - size = response.parse() - assert_matches_type(SizeListResponse, size, path=["response"]) - - assert cast(Any, response.is_closed) is True - - -class TestAsyncSizes: - parametrize = pytest.mark.parametrize( - "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: - size = await async_client.sizes.list() - assert_matches_type(SizeListResponse, size, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: - size = await async_client.sizes.list( - page=1, - per_page=1, - ) - assert_matches_type(SizeListResponse, size, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: - response = await async_client.sizes.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - size = await response.parse() - assert_matches_type(SizeListResponse, size, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: - async with async_client.sizes.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - size = await response.parse() - assert_matches_type(SizeListResponse, size, path=["response"]) - - assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_snapshots.py b/tests/api_resources/test_snapshots.py deleted file mode 100644 index 5535fef1..00000000 --- a/tests/api_resources/test_snapshots.py +++ /dev/null @@ -1,236 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from gradientai import GradientAI, AsyncGradientAI -from tests.utils import assert_matches_type -from gradientai.types import SnapshotListResponse, SnapshotRetrieveResponse - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestSnapshots: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: GradientAI) -> None: - snapshot = client.snapshots.retrieve( - 6372321, - ) - assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: GradientAI) -> None: - response = client.snapshots.with_raw_response.retrieve( - 6372321, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - snapshot = response.parse() - assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: GradientAI) -> None: - with client.snapshots.with_streaming_response.retrieve( - 6372321, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - snapshot = response.parse() - assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: GradientAI) -> None: - snapshot = client.snapshots.list() - assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: - snapshot = client.snapshots.list( - page=1, - per_page=1, - resource_type="droplet", - ) - assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: - response = client.snapshots.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - snapshot = response.parse() - assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: - with client.snapshots.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - snapshot = response.parse() - assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_delete(self, client: GradientAI) -> None: - snapshot = client.snapshots.delete( - 6372321, - ) - assert snapshot is None - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete(self, client: GradientAI) -> None: - response = client.snapshots.with_raw_response.delete( - 6372321, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - snapshot = response.parse() - assert snapshot is None - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete(self, client: GradientAI) -> None: - with client.snapshots.with_streaming_response.delete( - 6372321, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - snapshot = response.parse() - assert snapshot is None - - assert cast(Any, response.is_closed) is True - - -class TestAsyncSnapshots: - parametrize = pytest.mark.parametrize( - "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] - ) - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: - snapshot = await async_client.snapshots.retrieve( - 6372321, - ) - assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: - response = await async_client.snapshots.with_raw_response.retrieve( - 6372321, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - snapshot = await response.parse() - assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: - async with async_client.snapshots.with_streaming_response.retrieve( - 6372321, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - snapshot = await response.parse() - assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: - snapshot = await async_client.snapshots.list() - assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: - snapshot = await async_client.snapshots.list( - page=1, - per_page=1, - resource_type="droplet", - ) - assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: - response = await async_client.snapshots.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - snapshot = await response.parse() - assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: - async with async_client.snapshots.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - snapshot = await response.parse() - assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_delete(self, async_client: AsyncGradientAI) -> None: - snapshot = await async_client.snapshots.delete( - 6372321, - ) - assert snapshot is None - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: - response = await async_client.snapshots.with_raw_response.delete( - 6372321, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - snapshot = await response.parse() - assert snapshot is None - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: - async with async_client.snapshots.with_streaming_response.delete( - 6372321, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - snapshot = await response.parse() - assert snapshot is None - - assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_volumes.py b/tests/api_resources/test_volumes.py deleted file mode 100644 index 1848bdfb..00000000 --- a/tests/api_resources/test_volumes.py +++ /dev/null @@ -1,568 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from gradientai import GradientAI, AsyncGradientAI -from tests.utils import assert_matches_type -from gradientai.types import ( - VolumeListResponse, - VolumeCreateResponse, - VolumeRetrieveResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestVolumes: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_overload_1(self, client: GradientAI) -> None: - volume = client.volumes.create( - name="example", - region="nyc3", - size_gigabytes=10, - ) - assert_matches_type(VolumeCreateResponse, volume, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params_overload_1(self, client: GradientAI) -> None: - volume = client.volumes.create( - name="example", - region="nyc3", - size_gigabytes=10, - description="Block store for examples", - filesystem_label="example", - filesystem_type="ext4", - snapshot_id="b0798135-fb76-11eb-946a-0a58ac146f33", - tags=["base-image", "prod"], - ) - assert_matches_type(VolumeCreateResponse, volume, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create_overload_1(self, client: GradientAI) -> None: - response = client.volumes.with_raw_response.create( - name="example", - region="nyc3", - size_gigabytes=10, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - volume = response.parse() - assert_matches_type(VolumeCreateResponse, volume, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create_overload_1(self, client: GradientAI) -> None: - with client.volumes.with_streaming_response.create( - name="example", - region="nyc3", - size_gigabytes=10, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - volume = response.parse() - assert_matches_type(VolumeCreateResponse, volume, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_create_overload_2(self, client: GradientAI) -> None: - volume = client.volumes.create( - name="example", - region="nyc3", - size_gigabytes=10, - ) - assert_matches_type(VolumeCreateResponse, volume, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params_overload_2(self, client: GradientAI) -> None: - volume = client.volumes.create( - name="example", - region="nyc3", - size_gigabytes=10, - description="Block store for examples", - filesystem_label="example", - filesystem_type="ext4", - snapshot_id="b0798135-fb76-11eb-946a-0a58ac146f33", - tags=["base-image", "prod"], - ) - assert_matches_type(VolumeCreateResponse, volume, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create_overload_2(self, client: GradientAI) -> None: - response = client.volumes.with_raw_response.create( - name="example", - region="nyc3", - size_gigabytes=10, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - volume = response.parse() - assert_matches_type(VolumeCreateResponse, volume, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create_overload_2(self, client: GradientAI) -> None: - with client.volumes.with_streaming_response.create( - name="example", - region="nyc3", - size_gigabytes=10, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - volume = response.parse() - assert_matches_type(VolumeCreateResponse, volume, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: GradientAI) -> None: - volume = client.volumes.retrieve( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - assert_matches_type(VolumeRetrieveResponse, volume, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: GradientAI) -> None: - response = client.volumes.with_raw_response.retrieve( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - volume = response.parse() - assert_matches_type(VolumeRetrieveResponse, volume, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: GradientAI) -> None: - with client.volumes.with_streaming_response.retrieve( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - volume = response.parse() - assert_matches_type(VolumeRetrieveResponse, volume, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): - client.volumes.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: GradientAI) -> None: - volume = client.volumes.list() - assert_matches_type(VolumeListResponse, volume, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: - volume = client.volumes.list( - name="name", - page=1, - per_page=1, - region="nyc3", - ) - assert_matches_type(VolumeListResponse, volume, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: - response = client.volumes.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - volume = response.parse() - assert_matches_type(VolumeListResponse, volume, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: - with client.volumes.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - volume = response.parse() - assert_matches_type(VolumeListResponse, volume, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_delete(self, client: GradientAI) -> None: - volume = client.volumes.delete( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - assert volume is None - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete(self, client: GradientAI) -> None: - response = client.volumes.with_raw_response.delete( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - volume = response.parse() - assert volume is None - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete(self, client: GradientAI) -> None: - with client.volumes.with_streaming_response.delete( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - volume = response.parse() - assert volume is None - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_delete(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): - client.volumes.with_raw_response.delete( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_delete_by_name(self, client: GradientAI) -> None: - volume = client.volumes.delete_by_name() - assert volume is None - - @pytest.mark.skip() - @parametrize - def test_method_delete_by_name_with_all_params(self, client: GradientAI) -> None: - volume = client.volumes.delete_by_name( - name="name", - region="nyc3", - ) - assert volume is None - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete_by_name(self, client: GradientAI) -> None: - response = client.volumes.with_raw_response.delete_by_name() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - volume = response.parse() - assert volume is None - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete_by_name(self, client: GradientAI) -> None: - with client.volumes.with_streaming_response.delete_by_name() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - volume = response.parse() - assert volume is None - - assert cast(Any, response.is_closed) is True - - -class TestAsyncVolumes: - parametrize = pytest.mark.parametrize( - "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] - ) - - @pytest.mark.skip() - @parametrize - async def test_method_create_overload_1(self, async_client: AsyncGradientAI) -> None: - volume = await async_client.volumes.create( - name="example", - region="nyc3", - size_gigabytes=10, - ) - assert_matches_type(VolumeCreateResponse, volume, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params_overload_1(self, async_client: AsyncGradientAI) -> None: - volume = await async_client.volumes.create( - name="example", - region="nyc3", - size_gigabytes=10, - description="Block store for examples", - filesystem_label="example", - filesystem_type="ext4", - snapshot_id="b0798135-fb76-11eb-946a-0a58ac146f33", - tags=["base-image", "prod"], - ) - assert_matches_type(VolumeCreateResponse, volume, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: - response = await async_client.volumes.with_raw_response.create( - name="example", - region="nyc3", - size_gigabytes=10, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - volume = await response.parse() - assert_matches_type(VolumeCreateResponse, volume, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: - async with async_client.volumes.with_streaming_response.create( - name="example", - region="nyc3", - size_gigabytes=10, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - volume = await response.parse() - assert_matches_type(VolumeCreateResponse, volume, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_create_overload_2(self, async_client: AsyncGradientAI) -> None: - volume = await async_client.volumes.create( - name="example", - region="nyc3", - size_gigabytes=10, - ) - assert_matches_type(VolumeCreateResponse, volume, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params_overload_2(self, async_client: AsyncGradientAI) -> None: - volume = await async_client.volumes.create( - name="example", - region="nyc3", - size_gigabytes=10, - description="Block store for examples", - filesystem_label="example", - filesystem_type="ext4", - snapshot_id="b0798135-fb76-11eb-946a-0a58ac146f33", - tags=["base-image", "prod"], - ) - assert_matches_type(VolumeCreateResponse, volume, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: - response = await async_client.volumes.with_raw_response.create( - name="example", - region="nyc3", - size_gigabytes=10, - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - volume = await response.parse() - assert_matches_type(VolumeCreateResponse, volume, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: - async with async_client.volumes.with_streaming_response.create( - name="example", - region="nyc3", - size_gigabytes=10, - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - volume = await response.parse() - assert_matches_type(VolumeCreateResponse, volume, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: - volume = await async_client.volumes.retrieve( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - assert_matches_type(VolumeRetrieveResponse, volume, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: - response = await async_client.volumes.with_raw_response.retrieve( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - volume = await response.parse() - assert_matches_type(VolumeRetrieveResponse, volume, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: - async with async_client.volumes.with_streaming_response.retrieve( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - volume = await response.parse() - assert_matches_type(VolumeRetrieveResponse, volume, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): - await async_client.volumes.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: - volume = await async_client.volumes.list() - assert_matches_type(VolumeListResponse, volume, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: - volume = await async_client.volumes.list( - name="name", - page=1, - per_page=1, - region="nyc3", - ) - assert_matches_type(VolumeListResponse, volume, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: - response = await async_client.volumes.with_raw_response.list() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - volume = await response.parse() - assert_matches_type(VolumeListResponse, volume, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: - async with async_client.volumes.with_streaming_response.list() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - volume = await response.parse() - assert_matches_type(VolumeListResponse, volume, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_delete(self, async_client: AsyncGradientAI) -> None: - volume = await async_client.volumes.delete( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - assert volume is None - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: - response = await async_client.volumes.with_raw_response.delete( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - volume = await response.parse() - assert volume is None - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: - async with async_client.volumes.with_streaming_response.delete( - "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - volume = await response.parse() - assert volume is None - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): - await async_client.volumes.with_raw_response.delete( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_delete_by_name(self, async_client: AsyncGradientAI) -> None: - volume = await async_client.volumes.delete_by_name() - assert volume is None - - @pytest.mark.skip() - @parametrize - async def test_method_delete_by_name_with_all_params(self, async_client: AsyncGradientAI) -> None: - volume = await async_client.volumes.delete_by_name( - name="name", - region="nyc3", - ) - assert volume is None - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete_by_name(self, async_client: AsyncGradientAI) -> None: - response = await async_client.volumes.with_raw_response.delete_by_name() - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - volume = await response.parse() - assert volume is None - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete_by_name(self, async_client: AsyncGradientAI) -> None: - async with async_client.volumes.with_streaming_response.delete_by_name() as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - volume = await response.parse() - assert volume is None - - assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/volumes/__init__.py b/tests/api_resources/volumes/__init__.py deleted file mode 100644 index fd8019a9..00000000 --- a/tests/api_resources/volumes/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/volumes/test_actions.py b/tests/api_resources/volumes/test_actions.py deleted file mode 100644 index e13b3a58..00000000 --- a/tests/api_resources/volumes/test_actions.py +++ /dev/null @@ -1,825 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from gradientai import GradientAI, AsyncGradientAI -from tests.utils import assert_matches_type -from gradientai.types.volumes import ( - ActionListResponse, - ActionRetrieveResponse, - ActionInitiateByIDResponse, - ActionInitiateByNameResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestActions: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: GradientAI) -> None: - action = client.volumes.actions.retrieve( - action_id=36804636, - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - ) - assert_matches_type(ActionRetrieveResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_retrieve_with_all_params(self, client: GradientAI) -> None: - action = client.volumes.actions.retrieve( - action_id=36804636, - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - page=1, - per_page=1, - ) - assert_matches_type(ActionRetrieveResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: GradientAI) -> None: - response = client.volumes.actions.with_raw_response.retrieve( - action_id=36804636, - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = response.parse() - assert_matches_type(ActionRetrieveResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: GradientAI) -> None: - with client.volumes.actions.with_streaming_response.retrieve( - action_id=36804636, - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = response.parse() - assert_matches_type(ActionRetrieveResponse, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): - client.volumes.actions.with_raw_response.retrieve( - action_id=36804636, - volume_id="", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: GradientAI) -> None: - action = client.volumes.actions.list( - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - ) - assert_matches_type(ActionListResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: - action = client.volumes.actions.list( - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - page=1, - per_page=1, - ) - assert_matches_type(ActionListResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: - response = client.volumes.actions.with_raw_response.list( - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = response.parse() - assert_matches_type(ActionListResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: - with client.volumes.actions.with_streaming_response.list( - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = response.parse() - assert_matches_type(ActionListResponse, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_list(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): - client.volumes.actions.with_raw_response.list( - volume_id="", - ) - - @pytest.mark.skip() - @parametrize - def test_method_initiate_by_id_overload_1(self, client: GradientAI) -> None: - action = client.volumes.actions.initiate_by_id( - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - droplet_id=11612190, - type="attach", - ) - assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_initiate_by_id_with_all_params_overload_1(self, client: GradientAI) -> None: - action = client.volumes.actions.initiate_by_id( - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - droplet_id=11612190, - type="attach", - page=1, - per_page=1, - region="nyc3", - tags=["base-image", "prod"], - ) - assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_initiate_by_id_overload_1(self, client: GradientAI) -> None: - response = client.volumes.actions.with_raw_response.initiate_by_id( - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - droplet_id=11612190, - type="attach", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = response.parse() - assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_initiate_by_id_overload_1(self, client: GradientAI) -> None: - with client.volumes.actions.with_streaming_response.initiate_by_id( - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - droplet_id=11612190, - type="attach", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = response.parse() - assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_initiate_by_id_overload_1(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): - client.volumes.actions.with_raw_response.initiate_by_id( - volume_id="", - droplet_id=11612190, - type="attach", - ) - - @pytest.mark.skip() - @parametrize - def test_method_initiate_by_id_overload_2(self, client: GradientAI) -> None: - action = client.volumes.actions.initiate_by_id( - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - droplet_id=11612190, - type="attach", - ) - assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_initiate_by_id_with_all_params_overload_2(self, client: GradientAI) -> None: - action = client.volumes.actions.initiate_by_id( - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - droplet_id=11612190, - type="attach", - page=1, - per_page=1, - region="nyc3", - ) - assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_initiate_by_id_overload_2(self, client: GradientAI) -> None: - response = client.volumes.actions.with_raw_response.initiate_by_id( - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - droplet_id=11612190, - type="attach", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = response.parse() - assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_initiate_by_id_overload_2(self, client: GradientAI) -> None: - with client.volumes.actions.with_streaming_response.initiate_by_id( - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - droplet_id=11612190, - type="attach", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = response.parse() - assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_initiate_by_id_overload_2(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): - client.volumes.actions.with_raw_response.initiate_by_id( - volume_id="", - droplet_id=11612190, - type="attach", - ) - - @pytest.mark.skip() - @parametrize - def test_method_initiate_by_id_overload_3(self, client: GradientAI) -> None: - action = client.volumes.actions.initiate_by_id( - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - size_gigabytes=16384, - type="attach", - ) - assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_initiate_by_id_with_all_params_overload_3(self, client: GradientAI) -> None: - action = client.volumes.actions.initiate_by_id( - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - size_gigabytes=16384, - type="attach", - page=1, - per_page=1, - region="nyc3", - ) - assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_initiate_by_id_overload_3(self, client: GradientAI) -> None: - response = client.volumes.actions.with_raw_response.initiate_by_id( - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - size_gigabytes=16384, - type="attach", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = response.parse() - assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_initiate_by_id_overload_3(self, client: GradientAI) -> None: - with client.volumes.actions.with_streaming_response.initiate_by_id( - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - size_gigabytes=16384, - type="attach", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = response.parse() - assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_initiate_by_id_overload_3(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): - client.volumes.actions.with_raw_response.initiate_by_id( - volume_id="", - size_gigabytes=16384, - type="attach", - ) - - @pytest.mark.skip() - @parametrize - def test_method_initiate_by_name_overload_1(self, client: GradientAI) -> None: - action = client.volumes.actions.initiate_by_name( - droplet_id=11612190, - type="attach", - ) - assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_initiate_by_name_with_all_params_overload_1(self, client: GradientAI) -> None: - action = client.volumes.actions.initiate_by_name( - droplet_id=11612190, - type="attach", - page=1, - per_page=1, - region="nyc3", - tags=["base-image", "prod"], - ) - assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_initiate_by_name_overload_1(self, client: GradientAI) -> None: - response = client.volumes.actions.with_raw_response.initiate_by_name( - droplet_id=11612190, - type="attach", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = response.parse() - assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_initiate_by_name_overload_1(self, client: GradientAI) -> None: - with client.volumes.actions.with_streaming_response.initiate_by_name( - droplet_id=11612190, - type="attach", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = response.parse() - assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_method_initiate_by_name_overload_2(self, client: GradientAI) -> None: - action = client.volumes.actions.initiate_by_name( - droplet_id=11612190, - type="attach", - ) - assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_initiate_by_name_with_all_params_overload_2(self, client: GradientAI) -> None: - action = client.volumes.actions.initiate_by_name( - droplet_id=11612190, - type="attach", - page=1, - per_page=1, - region="nyc3", - ) - assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_initiate_by_name_overload_2(self, client: GradientAI) -> None: - response = client.volumes.actions.with_raw_response.initiate_by_name( - droplet_id=11612190, - type="attach", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = response.parse() - assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_initiate_by_name_overload_2(self, client: GradientAI) -> None: - with client.volumes.actions.with_streaming_response.initiate_by_name( - droplet_id=11612190, - type="attach", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = response.parse() - assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - -class TestAsyncActions: - parametrize = pytest.mark.parametrize( - "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] - ) - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: - action = await async_client.volumes.actions.retrieve( - action_id=36804636, - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - ) - assert_matches_type(ActionRetrieveResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve_with_all_params(self, async_client: AsyncGradientAI) -> None: - action = await async_client.volumes.actions.retrieve( - action_id=36804636, - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - page=1, - per_page=1, - ) - assert_matches_type(ActionRetrieveResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: - response = await async_client.volumes.actions.with_raw_response.retrieve( - action_id=36804636, - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = await response.parse() - assert_matches_type(ActionRetrieveResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: - async with async_client.volumes.actions.with_streaming_response.retrieve( - action_id=36804636, - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = await response.parse() - assert_matches_type(ActionRetrieveResponse, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): - await async_client.volumes.actions.with_raw_response.retrieve( - action_id=36804636, - volume_id="", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: - action = await async_client.volumes.actions.list( - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - ) - assert_matches_type(ActionListResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: - action = await async_client.volumes.actions.list( - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - page=1, - per_page=1, - ) - assert_matches_type(ActionListResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: - response = await async_client.volumes.actions.with_raw_response.list( - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = await response.parse() - assert_matches_type(ActionListResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: - async with async_client.volumes.actions.with_streaming_response.list( - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = await response.parse() - assert_matches_type(ActionListResponse, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_list(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): - await async_client.volumes.actions.with_raw_response.list( - volume_id="", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_initiate_by_id_overload_1(self, async_client: AsyncGradientAI) -> None: - action = await async_client.volumes.actions.initiate_by_id( - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - droplet_id=11612190, - type="attach", - ) - assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_initiate_by_id_with_all_params_overload_1(self, async_client: AsyncGradientAI) -> None: - action = await async_client.volumes.actions.initiate_by_id( - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - droplet_id=11612190, - type="attach", - page=1, - per_page=1, - region="nyc3", - tags=["base-image", "prod"], - ) - assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_initiate_by_id_overload_1(self, async_client: AsyncGradientAI) -> None: - response = await async_client.volumes.actions.with_raw_response.initiate_by_id( - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - droplet_id=11612190, - type="attach", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = await response.parse() - assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_initiate_by_id_overload_1(self, async_client: AsyncGradientAI) -> None: - async with async_client.volumes.actions.with_streaming_response.initiate_by_id( - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - droplet_id=11612190, - type="attach", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = await response.parse() - assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_initiate_by_id_overload_1(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): - await async_client.volumes.actions.with_raw_response.initiate_by_id( - volume_id="", - droplet_id=11612190, - type="attach", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_initiate_by_id_overload_2(self, async_client: AsyncGradientAI) -> None: - action = await async_client.volumes.actions.initiate_by_id( - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - droplet_id=11612190, - type="attach", - ) - assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_initiate_by_id_with_all_params_overload_2(self, async_client: AsyncGradientAI) -> None: - action = await async_client.volumes.actions.initiate_by_id( - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - droplet_id=11612190, - type="attach", - page=1, - per_page=1, - region="nyc3", - ) - assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_initiate_by_id_overload_2(self, async_client: AsyncGradientAI) -> None: - response = await async_client.volumes.actions.with_raw_response.initiate_by_id( - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - droplet_id=11612190, - type="attach", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = await response.parse() - assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_initiate_by_id_overload_2(self, async_client: AsyncGradientAI) -> None: - async with async_client.volumes.actions.with_streaming_response.initiate_by_id( - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - droplet_id=11612190, - type="attach", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = await response.parse() - assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_initiate_by_id_overload_2(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): - await async_client.volumes.actions.with_raw_response.initiate_by_id( - volume_id="", - droplet_id=11612190, - type="attach", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_initiate_by_id_overload_3(self, async_client: AsyncGradientAI) -> None: - action = await async_client.volumes.actions.initiate_by_id( - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - size_gigabytes=16384, - type="attach", - ) - assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_initiate_by_id_with_all_params_overload_3(self, async_client: AsyncGradientAI) -> None: - action = await async_client.volumes.actions.initiate_by_id( - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - size_gigabytes=16384, - type="attach", - page=1, - per_page=1, - region="nyc3", - ) - assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_initiate_by_id_overload_3(self, async_client: AsyncGradientAI) -> None: - response = await async_client.volumes.actions.with_raw_response.initiate_by_id( - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - size_gigabytes=16384, - type="attach", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = await response.parse() - assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_initiate_by_id_overload_3(self, async_client: AsyncGradientAI) -> None: - async with async_client.volumes.actions.with_streaming_response.initiate_by_id( - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - size_gigabytes=16384, - type="attach", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = await response.parse() - assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_initiate_by_id_overload_3(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): - await async_client.volumes.actions.with_raw_response.initiate_by_id( - volume_id="", - size_gigabytes=16384, - type="attach", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_initiate_by_name_overload_1(self, async_client: AsyncGradientAI) -> None: - action = await async_client.volumes.actions.initiate_by_name( - droplet_id=11612190, - type="attach", - ) - assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_initiate_by_name_with_all_params_overload_1(self, async_client: AsyncGradientAI) -> None: - action = await async_client.volumes.actions.initiate_by_name( - droplet_id=11612190, - type="attach", - page=1, - per_page=1, - region="nyc3", - tags=["base-image", "prod"], - ) - assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_initiate_by_name_overload_1(self, async_client: AsyncGradientAI) -> None: - response = await async_client.volumes.actions.with_raw_response.initiate_by_name( - droplet_id=11612190, - type="attach", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = await response.parse() - assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_initiate_by_name_overload_1(self, async_client: AsyncGradientAI) -> None: - async with async_client.volumes.actions.with_streaming_response.initiate_by_name( - droplet_id=11612190, - type="attach", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = await response.parse() - assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_method_initiate_by_name_overload_2(self, async_client: AsyncGradientAI) -> None: - action = await async_client.volumes.actions.initiate_by_name( - droplet_id=11612190, - type="attach", - ) - assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_initiate_by_name_with_all_params_overload_2(self, async_client: AsyncGradientAI) -> None: - action = await async_client.volumes.actions.initiate_by_name( - droplet_id=11612190, - type="attach", - page=1, - per_page=1, - region="nyc3", - ) - assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_initiate_by_name_overload_2(self, async_client: AsyncGradientAI) -> None: - response = await async_client.volumes.actions.with_raw_response.initiate_by_name( - droplet_id=11612190, - type="attach", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - action = await response.parse() - assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_initiate_by_name_overload_2(self, async_client: AsyncGradientAI) -> None: - async with async_client.volumes.actions.with_streaming_response.initiate_by_name( - droplet_id=11612190, - type="attach", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - action = await response.parse() - assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) - - assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/volumes/test_snapshots.py b/tests/api_resources/volumes/test_snapshots.py deleted file mode 100644 index 21ef565b..00000000 --- a/tests/api_resources/volumes/test_snapshots.py +++ /dev/null @@ -1,412 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -import os -from typing import Any, cast - -import pytest - -from gradientai import GradientAI, AsyncGradientAI -from tests.utils import assert_matches_type -from gradientai.types.volumes import ( - SnapshotListResponse, - SnapshotCreateResponse, - SnapshotRetrieveResponse, -) - -base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") - - -class TestSnapshots: - parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) - - @pytest.mark.skip() - @parametrize - def test_method_create(self, client: GradientAI) -> None: - snapshot = client.volumes.snapshots.create( - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - name="big-data-snapshot1475261774", - ) - assert_matches_type(SnapshotCreateResponse, snapshot, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_create_with_all_params(self, client: GradientAI) -> None: - snapshot = client.volumes.snapshots.create( - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - name="big-data-snapshot1475261774", - tags=["base-image", "prod"], - ) - assert_matches_type(SnapshotCreateResponse, snapshot, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_create(self, client: GradientAI) -> None: - response = client.volumes.snapshots.with_raw_response.create( - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - name="big-data-snapshot1475261774", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - snapshot = response.parse() - assert_matches_type(SnapshotCreateResponse, snapshot, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_create(self, client: GradientAI) -> None: - with client.volumes.snapshots.with_streaming_response.create( - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - name="big-data-snapshot1475261774", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - snapshot = response.parse() - assert_matches_type(SnapshotCreateResponse, snapshot, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_create(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): - client.volumes.snapshots.with_raw_response.create( - volume_id="", - name="big-data-snapshot1475261774", - ) - - @pytest.mark.skip() - @parametrize - def test_method_retrieve(self, client: GradientAI) -> None: - snapshot = client.volumes.snapshots.retrieve( - "snapshot_id", - ) - assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_retrieve(self, client: GradientAI) -> None: - response = client.volumes.snapshots.with_raw_response.retrieve( - "snapshot_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - snapshot = response.parse() - assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_retrieve(self, client: GradientAI) -> None: - with client.volumes.snapshots.with_streaming_response.retrieve( - "snapshot_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - snapshot = response.parse() - assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_retrieve(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `snapshot_id` but received ''"): - client.volumes.snapshots.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - def test_method_list(self, client: GradientAI) -> None: - snapshot = client.volumes.snapshots.list( - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - ) - assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: - snapshot = client.volumes.snapshots.list( - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - page=1, - per_page=1, - ) - assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_raw_response_list(self, client: GradientAI) -> None: - response = client.volumes.snapshots.with_raw_response.list( - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - snapshot = response.parse() - assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) - - @pytest.mark.skip() - @parametrize - def test_streaming_response_list(self, client: GradientAI) -> None: - with client.volumes.snapshots.with_streaming_response.list( - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - snapshot = response.parse() - assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_list(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): - client.volumes.snapshots.with_raw_response.list( - volume_id="", - ) - - @pytest.mark.skip() - @parametrize - def test_method_delete(self, client: GradientAI) -> None: - snapshot = client.volumes.snapshots.delete( - "snapshot_id", - ) - assert snapshot is None - - @pytest.mark.skip() - @parametrize - def test_raw_response_delete(self, client: GradientAI) -> None: - response = client.volumes.snapshots.with_raw_response.delete( - "snapshot_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - snapshot = response.parse() - assert snapshot is None - - @pytest.mark.skip() - @parametrize - def test_streaming_response_delete(self, client: GradientAI) -> None: - with client.volumes.snapshots.with_streaming_response.delete( - "snapshot_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - snapshot = response.parse() - assert snapshot is None - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - def test_path_params_delete(self, client: GradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `snapshot_id` but received ''"): - client.volumes.snapshots.with_raw_response.delete( - "", - ) - - -class TestAsyncSnapshots: - parametrize = pytest.mark.parametrize( - "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] - ) - - @pytest.mark.skip() - @parametrize - async def test_method_create(self, async_client: AsyncGradientAI) -> None: - snapshot = await async_client.volumes.snapshots.create( - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - name="big-data-snapshot1475261774", - ) - assert_matches_type(SnapshotCreateResponse, snapshot, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: - snapshot = await async_client.volumes.snapshots.create( - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - name="big-data-snapshot1475261774", - tags=["base-image", "prod"], - ) - assert_matches_type(SnapshotCreateResponse, snapshot, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: - response = await async_client.volumes.snapshots.with_raw_response.create( - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - name="big-data-snapshot1475261774", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - snapshot = await response.parse() - assert_matches_type(SnapshotCreateResponse, snapshot, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: - async with async_client.volumes.snapshots.with_streaming_response.create( - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - name="big-data-snapshot1475261774", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - snapshot = await response.parse() - assert_matches_type(SnapshotCreateResponse, snapshot, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_create(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): - await async_client.volumes.snapshots.with_raw_response.create( - volume_id="", - name="big-data-snapshot1475261774", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: - snapshot = await async_client.volumes.snapshots.retrieve( - "snapshot_id", - ) - assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: - response = await async_client.volumes.snapshots.with_raw_response.retrieve( - "snapshot_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - snapshot = await response.parse() - assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: - async with async_client.volumes.snapshots.with_streaming_response.retrieve( - "snapshot_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - snapshot = await response.parse() - assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `snapshot_id` but received ''"): - await async_client.volumes.snapshots.with_raw_response.retrieve( - "", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: - snapshot = await async_client.volumes.snapshots.list( - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - ) - assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: - snapshot = await async_client.volumes.snapshots.list( - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - page=1, - per_page=1, - ) - assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: - response = await async_client.volumes.snapshots.with_raw_response.list( - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - snapshot = await response.parse() - assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: - async with async_client.volumes.snapshots.with_streaming_response.list( - volume_id="7724db7c-e098-11e5-b522-000f53304e51", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - snapshot = await response.parse() - assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_list(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): - await async_client.volumes.snapshots.with_raw_response.list( - volume_id="", - ) - - @pytest.mark.skip() - @parametrize - async def test_method_delete(self, async_client: AsyncGradientAI) -> None: - snapshot = await async_client.volumes.snapshots.delete( - "snapshot_id", - ) - assert snapshot is None - - @pytest.mark.skip() - @parametrize - async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: - response = await async_client.volumes.snapshots.with_raw_response.delete( - "snapshot_id", - ) - - assert response.is_closed is True - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - snapshot = await response.parse() - assert snapshot is None - - @pytest.mark.skip() - @parametrize - async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: - async with async_client.volumes.snapshots.with_streaming_response.delete( - "snapshot_id", - ) as response: - assert not response.is_closed - assert response.http_request.headers.get("X-Stainless-Lang") == "python" - - snapshot = await response.parse() - assert snapshot is None - - assert cast(Any, response.is_closed) is True - - @pytest.mark.skip() - @parametrize - async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: - with pytest.raises(ValueError, match=r"Expected a non-empty value for `snapshot_id` but received ''"): - await async_client.volumes.snapshots.with_raw_response.delete( - "", - ) From b9e317bac2c541a7eafcfb59a4b19c81e1145075 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 17 Jul 2025 22:25:45 +0000 Subject: [PATCH 03/19] feat(api): add gpu droplets --- .stats.yml | 8 +- api.md | 454 +++- src/gradientai/resources/__init__.py | 14 + src/gradientai/resources/agents/agents.py | 118 +- src/gradientai/resources/agents/api_keys.py | 28 +- .../resources/agents/chat/completions.py | 98 + .../agents/evaluation_metrics/__init__.py | 14 + .../evaluation_metrics/evaluation_metrics.py | 143 ++ .../agents/evaluation_metrics/models.py | 254 ++ .../evaluation_metrics/workspaces/agents.py | 24 +- .../workspaces/workspaces.py | 20 + .../resources/agents/evaluation_runs.py | 42 +- .../resources/agents/evaluation_test_cases.py | 12 +- src/gradientai/resources/agents/functions.py | 60 + src/gradientai/resources/agents/routes.py | 24 + src/gradientai/resources/agents/versions.py | 16 +- src/gradientai/resources/chat/completions.py | 98 + .../resources/gpu_droplets/__init__.py | 187 ++ .../gpu_droplets/account/__init__.py | 33 + .../resources/gpu_droplets/account/account.py | 102 + .../resources/gpu_droplets/account/keys.py | 588 +++++ .../resources/gpu_droplets/actions.py | 2048 +++++++++++++++ .../resources/gpu_droplets/autoscale.py | 967 ++++++++ .../resources/gpu_droplets/backups.py | 460 ++++ .../destroy_with_associated_resources.py | 624 +++++ .../gpu_droplets/firewalls/__init__.py | 61 + .../gpu_droplets/firewalls/droplets.py | 296 +++ .../gpu_droplets/firewalls/firewalls.py | 647 +++++ .../resources/gpu_droplets/firewalls/rules.py | 320 +++ .../resources/gpu_droplets/firewalls/tags.py | 308 +++ .../gpu_droplets/floating_ips/__init__.py | 33 + .../gpu_droplets/floating_ips/actions.py | 489 ++++ .../gpu_droplets/floating_ips/floating_ips.py | 635 +++++ .../resources/gpu_droplets/gpu_droplets.py | 2008 +++++++++++++++ .../resources/gpu_droplets/images/__init__.py | 33 + .../resources/gpu_droplets/images/actions.py | 560 +++++ .../resources/gpu_droplets/images/images.py | 867 +++++++ .../gpu_droplets/load_balancers/__init__.py | 47 + .../gpu_droplets/load_balancers/droplets.py | 302 +++ .../load_balancers/forwarding_rules.py | 301 +++ .../load_balancers/load_balancers.py | 2205 +++++++++++++++++ .../resources/gpu_droplets/sizes.py | 199 ++ .../resources/gpu_droplets/snapshots.py | 425 ++++ .../gpu_droplets/volumes/__init__.py | 47 + .../resources/gpu_droplets/volumes/actions.py | 1554 ++++++++++++ .../gpu_droplets/volumes/snapshots.py | 499 ++++ .../resources/gpu_droplets/volumes/volumes.py | 1144 +++++++++ .../resources/inference/api_keys.py | 20 +- .../resources/knowledge_bases/data_sources.py | 24 +- .../knowledge_bases/indexing_jobs.py | 18 +- .../knowledge_bases/knowledge_bases.py | 28 +- src/gradientai/resources/models/models.py | 182 +- .../resources/models/providers/anthropic.py | 36 +- .../resources/models/providers/openai.py | 36 +- src/gradientai/resources/regions.py | 42 +- src/gradientai/types/__init__.py | 40 +- src/gradientai/types/agent_create_params.py | 8 + src/gradientai/types/agent_create_response.py | 1 + src/gradientai/types/agent_delete_response.py | 1 + src/gradientai/types/agent_list_params.py | 6 +- src/gradientai/types/agent_list_response.py | 75 + .../types/agent_retrieve_response.py | 1 + src/gradientai/types/agent_update_params.py | 18 + src/gradientai/types/agent_update_response.py | 1 + .../types/agent_update_status_params.py | 11 + .../types/agent_update_status_response.py | 1 + src/gradientai/types/agents/__init__.py | 7 + .../types/agents/api_evaluation_metric.py | 16 +- .../agents/api_evaluation_metric_result.py | 17 + .../types/agents/api_evaluation_prompt.py | 7 + .../types/agents/api_evaluation_run.py | 18 + .../types/agents/api_evaluation_test_case.py | 24 +- .../types/agents/api_key_create_params.py | 2 + .../types/agents/api_key_create_response.py | 1 + .../types/agents/api_key_delete_response.py | 1 + .../types/agents/api_key_list_params.py | 4 +- .../types/agents/api_key_list_response.py | 3 + .../agents/api_key_regenerate_response.py | 1 + .../types/agents/api_key_update_params.py | 3 + .../types/agents/api_key_update_response.py | 1 + .../agents/api_link_knowledge_base_output.py | 1 + .../types/agents/api_star_metric.py | 6 + .../types/agents/api_star_metric_param.py | 6 + .../agents/chat/completion_create_params.py | 113 + .../agents/chat/completion_create_response.py | 54 +- ...reate_file_upload_presigned_urls_params.py | 1 + .../evaluation_metric_list_regions_params.py | 15 + ...evaluation_metric_list_regions_response.py | 29 + .../agents/evaluation_metrics/__init__.py | 2 + .../evaluation_metrics}/model_list_params.py | 8 +- .../evaluation_metrics/model_list_response.py | 21 + .../workspace_create_params.py | 3 + .../workspace_delete_response.py | 1 + .../workspace_list_response.py | 1 + .../workspace_update_params.py | 2 + .../workspaces/agent_list_params.py | 14 +- .../workspaces/agent_list_response.py | 2 + .../workspaces/agent_move_params.py | 2 + .../agents/evaluation_run_create_params.py | 1 + .../evaluation_run_list_results_params.py | 15 + .../evaluation_run_list_results_response.py | 8 + .../evaluation_test_case_list_response.py | 4 + .../evaluation_test_case_update_params.py | 1 + .../types/agents/function_create_params.py | 7 + .../types/agents/function_create_response.py | 1 + .../types/agents/function_delete_response.py | 1 + .../types/agents/function_update_params.py | 8 + .../types/agents/function_update_response.py | 1 + .../agents/knowledge_base_detach_response.py | 1 + .../types/agents/route_add_params.py | 2 + .../types/agents/route_add_response.py | 1 + .../types/agents/route_delete_response.py | 2 + .../types/agents/route_update_params.py | 4 + .../types/agents/route_update_response.py | 2 + .../types/agents/route_view_response.py | 1 + .../types/agents/version_list_params.py | 4 +- .../types/agents/version_list_response.py | 51 +- .../types/agents/version_update_params.py | 2 + .../types/agents/version_update_response.py | 1 + src/gradientai/types/api_agent.py | 104 + .../types/api_agent_api_key_info.py | 5 + src/gradientai/types/api_agent_model.py | 14 + .../types/api_anthropic_api_key_info.py | 6 + src/gradientai/types/api_knowledge_base.py | 10 + src/gradientai/types/api_model.py | 10 + src/gradientai/types/api_model_version.py | 3 + .../types/api_openai_api_key_info.py | 7 + src/gradientai/types/api_workspace.py | 10 + .../types/chat/completion_create_params.py | 113 + .../types/chat/completion_create_response.py | 54 +- src/gradientai/types/droplet_backup_policy.py | 28 + .../types/droplet_backup_policy_param.py | 21 + .../types/gpu_droplet_create_params.py | 213 ++ .../types/gpu_droplet_create_response.py | 39 + .../types/gpu_droplet_delete_by_tag_params.py | 12 + .../gpu_droplet_list_firewalls_params.py | 15 + .../gpu_droplet_list_firewalls_response.py | 19 + .../types/gpu_droplet_list_kernels_params.py | 15 + .../gpu_droplet_list_kernels_response.py | 19 + .../gpu_droplet_list_neighbors_response.py | 12 + .../types/gpu_droplet_list_params.py | 34 + .../types/gpu_droplet_list_response.py | 19 + .../gpu_droplet_list_snapshots_params.py | 15 + .../gpu_droplet_list_snapshots_response.py | 53 + .../types/gpu_droplet_retrieve_response.py | 12 + src/gradientai/types/gpu_droplets/__init__.py | 104 + .../types/gpu_droplets/account/__init__.py | 11 + .../gpu_droplets/account/key_create_params.py | 22 + .../account/key_create_response.py | 39 + .../gpu_droplets/account/key_list_params.py | 15 + .../gpu_droplets/account/key_list_response.py | 46 + .../account/key_retrieve_response.py | 39 + .../gpu_droplets/account/key_update_params.py | 15 + .../account/key_update_response.py | 39 + .../action_bulk_initiate_params.py | 72 + .../action_bulk_initiate_response.py | 12 + .../gpu_droplets/action_initiate_params.py | 278 +++ .../gpu_droplets/action_initiate_response.py | 12 + .../types/gpu_droplets/action_list_params.py | 15 + .../gpu_droplets/action_list_response.py | 19 + .../gpu_droplets/action_retrieve_response.py | 12 + .../types/gpu_droplets/associated_resource.py | 21 + .../gpu_droplets/autoscale_create_params.py | 28 + .../gpu_droplets/autoscale_create_response.py | 12 + .../autoscale_list_history_params.py | 15 + .../autoscale_list_history_response.py | 48 + .../autoscale_list_members_params.py | 15 + .../autoscale_list_members_response.py | 47 + .../gpu_droplets/autoscale_list_params.py | 18 + .../gpu_droplets/autoscale_list_response.py | 19 + .../types/gpu_droplets/autoscale_pool.py | 54 + .../autoscale_pool_droplet_template.py | 69 + .../autoscale_pool_droplet_template_param.py | 84 + .../autoscale_pool_dynamic_config.py | 27 + .../autoscale_pool_dynamic_config_param.py | 27 + .../autoscale_pool_static_config.py | 10 + .../autoscale_pool_static_config_param.py | 12 + .../autoscale_retrieve_response.py | 12 + .../gpu_droplets/autoscale_update_params.py | 28 + .../gpu_droplets/autoscale_update_response.py | 12 + .../types/gpu_droplets/backup_list_params.py | 15 + .../backup_list_policies_params.py | 15 + .../backup_list_policies_response.py | 41 + .../gpu_droplets/backup_list_response.py | 53 + ...backup_list_supported_policies_response.py | 28 + .../backup_retrieve_policy_response.py | 30 + .../types/gpu_droplets/current_utilization.py | 15 + ...sociated_resource_check_status_response.py | 41 + ...ciated_resource_delete_selective_params.py | 34 + ..._with_associated_resource_list_response.py | 37 + .../destroyed_associated_resource.py | 28 + src/gradientai/types/gpu_droplets/domains.py | 22 + .../types/gpu_droplets/domains_param.py | 22 + src/gradientai/types/gpu_droplets/firewall.py | 98 + .../gpu_droplets/firewall_create_params.py | 17 + .../gpu_droplets/firewall_create_response.py | 12 + .../gpu_droplets/firewall_list_params.py | 15 + .../gpu_droplets/firewall_list_response.py | 19 + .../types/gpu_droplets/firewall_param.py | 67 + .../firewall_retrieve_response.py | 12 + .../gpu_droplets/firewall_update_params.py | 13 + .../gpu_droplets/firewall_update_response.py | 12 + .../types/gpu_droplets/firewalls/__init__.py | 10 + .../firewalls/droplet_add_params.py | 13 + .../firewalls/droplet_remove_params.py | 13 + .../gpu_droplets/firewalls/rule_add_params.py | 46 + .../firewalls/rule_remove_params.py | 46 + .../gpu_droplets/firewalls/tag_add_params.py | 18 + .../firewalls/tag_remove_params.py | 18 + .../types/gpu_droplets/floating_ip.py | 47 + .../gpu_droplets/floating_ip_create_params.py | 24 + .../floating_ip_create_response.py | 21 + .../gpu_droplets/floating_ip_list_params.py | 15 + .../gpu_droplets/floating_ip_list_response.py | 19 + .../floating_ip_retrieve_response.py | 12 + .../gpu_droplets/floating_ips/__init__.py | 8 + .../floating_ips/action_create_params.py | 24 + .../floating_ips/action_create_response.py | 17 + .../floating_ips/action_list_response.py | 19 + .../floating_ips/action_retrieve_response.py | 17 + .../types/gpu_droplets/forwarding_rule.py | 49 + .../gpu_droplets/forwarding_rule_param.py | 48 + .../types/gpu_droplets/glb_settings.py | 45 + .../types/gpu_droplets/glb_settings_param.py | 45 + .../types/gpu_droplets/health_check.py | 49 + .../types/gpu_droplets/health_check_param.py | 48 + .../types/gpu_droplets/image_create_params.py | 81 + .../gpu_droplets/image_create_response.py | 12 + .../types/gpu_droplets/image_list_params.py | 27 + .../types/gpu_droplets/image_list_response.py | 19 + .../gpu_droplets/image_retrieve_response.py | 10 + .../types/gpu_droplets/image_update_params.py | 42 + .../gpu_droplets/image_update_response.py | 10 + .../types/gpu_droplets/images/__init__.py | 6 + .../images/action_create_params.py | 45 + .../images/action_list_response.py | 19 + .../types/gpu_droplets/lb_firewall.py | 21 + .../types/gpu_droplets/lb_firewall_param.py | 22 + .../types/gpu_droplets/load_balancer.py | 185 ++ .../load_balancer_create_params.py | 335 +++ .../load_balancer_create_response.py | 12 + .../gpu_droplets/load_balancer_list_params.py | 15 + .../load_balancer_list_response.py | 19 + .../load_balancer_retrieve_response.py | 12 + .../load_balancer_update_params.py | 335 +++ .../load_balancer_update_response.py | 12 + .../gpu_droplets/load_balancers/__init__.py | 8 + .../load_balancers/droplet_add_params.py | 13 + .../load_balancers/droplet_remove_params.py | 13 + .../forwarding_rule_add_params.py | 14 + .../forwarding_rule_remove_params.py | 14 + .../types/gpu_droplets/size_list_params.py | 15 + .../types/gpu_droplets/size_list_response.py | 19 + .../gpu_droplets/snapshot_list_params.py | 18 + .../gpu_droplets/snapshot_list_response.py | 19 + .../snapshot_retrieve_response.py | 12 + .../types/gpu_droplets/sticky_sessions.py | 30 + .../gpu_droplets/sticky_sessions_param.py | 29 + .../gpu_droplets/volume_create_params.py | 153 ++ .../gpu_droplets/volume_create_response.py | 65 + .../volume_delete_by_name_params.py | 31 + .../types/gpu_droplets/volume_list_params.py | 37 + .../gpu_droplets/volume_list_response.py | 73 + .../gpu_droplets/volume_retrieve_response.py | 65 + .../types/gpu_droplets/volumes/__init__.py | 18 + .../volumes/action_initiate_by_id_params.py | 133 + .../volumes/action_initiate_by_id_response.py | 12 + .../volumes/action_initiate_by_name_params.py | 97 + .../action_initiate_by_name_response.py | 12 + .../volumes/action_list_params.py | 15 + .../volumes/action_list_response.py | 19 + .../volumes/action_retrieve_params.py | 17 + .../volumes/action_retrieve_response.py | 12 + .../volumes/snapshot_create_params.py | 21 + .../volumes/snapshot_create_response.py | 12 + .../volumes/snapshot_list_params.py | 15 + .../volumes/snapshot_list_response.py | 19 + .../volumes/snapshot_retrieve_response.py | 12 + .../gpu_droplets/volumes/volume_action.py | 18 + .../types/inference/api_key_create_params.py | 1 + .../inference/api_key_create_response.py | 1 + .../inference/api_key_delete_response.py | 1 + .../types/inference/api_key_list_params.py | 4 +- .../types/inference/api_key_list_response.py | 3 + .../types/inference/api_key_update_params.py | 2 + .../api_key_update_regenerate_response.py | 1 + .../inference/api_key_update_response.py | 1 + .../types/inference/api_model_api_key_info.py | 5 + .../types/knowledge_base_create_params.py | 6 + .../types/knowledge_base_create_response.py | 1 + .../types/knowledge_base_delete_response.py | 1 + .../types/knowledge_base_list_params.py | 4 +- .../types/knowledge_base_list_response.py | 3 + .../types/knowledge_base_retrieve_response.py | 1 + .../types/knowledge_base_update_params.py | 5 +- .../types/knowledge_base_update_response.py | 1 + .../api_file_upload_data_source.py | 3 + .../api_file_upload_data_source_param.py | 3 + .../api_indexed_data_source.py | 13 + .../types/knowledge_bases/api_indexing_job.py | 7 + .../api_knowledge_base_data_source.py | 12 + .../knowledge_bases/api_spaces_data_source.py | 2 + .../api_spaces_data_source_param.py | 2 + .../knowledge_bases/aws_data_source_param.py | 4 + .../data_source_create_params.py | 4 + .../data_source_create_response.py | 1 + .../data_source_delete_response.py | 2 + .../data_source_list_params.py | 4 +- .../data_source_list_response.py | 3 + .../indexing_job_create_params.py | 5 + .../indexing_job_create_response.py | 1 + .../indexing_job_list_params.py | 4 +- .../indexing_job_list_response.py | 3 + .../indexing_job_retrieve_response.py | 1 + .../indexing_job_update_cancel_response.py | 1 + src/gradientai/types/model_list_response.py | 28 +- .../types/model_retrieve_response.py | 21 + .../providers/anthropic_create_params.py | 2 + .../providers/anthropic_create_response.py | 1 + .../providers/anthropic_delete_response.py | 1 + .../providers/anthropic_list_agents_params.py | 4 +- .../anthropic_list_agents_response.py | 2 + .../models/providers/anthropic_list_params.py | 4 +- .../providers/anthropic_list_response.py | 3 + .../providers/anthropic_retrieve_response.py | 1 + .../providers/anthropic_update_params.py | 3 + .../providers/anthropic_update_response.py | 1 + .../models/providers/openai_create_params.py | 2 + .../providers/openai_create_response.py | 1 + .../providers/openai_delete_response.py | 1 + .../models/providers/openai_list_params.py | 4 +- .../models/providers/openai_list_response.py | 3 + .../openai_retrieve_agents_params.py | 4 +- .../openai_retrieve_agents_response.py | 2 + .../providers/openai_retrieve_response.py | 1 + .../models/providers/openai_update_params.py | 3 + .../providers/openai_update_response.py | 1 + src/gradientai/types/region_list_params.py | 8 +- src/gradientai/types/region_list_response.py | 22 +- src/gradientai/types/shared/__init__.py | 23 + src/gradientai/types/shared/action.py | 51 + src/gradientai/types/shared/action_link.py | 18 + src/gradientai/types/shared/api_links.py | 5 + src/gradientai/types/shared/api_meta.py | 3 + src/gradientai/types/shared/backward_links.py | 15 + .../types/shared/chat_completion_chunk.py | 55 +- .../types/shared/completion_usage.py | 16 + src/gradientai/types/shared/disk_info.py | 27 + src/gradientai/types/shared/droplet.py | 143 ++ .../shared/droplet_next_backup_window.py | 22 + .../types/shared/firewall_rule_target.py | 41 + src/gradientai/types/shared/forward_links.py | 15 + .../types/shared/garbage_collection.py | 43 + src/gradientai/types/shared/gpu_info.py | 25 + src/gradientai/types/shared/image.py | 131 + src/gradientai/types/shared/kernel.py | 25 + .../types/shared/meta_properties.py | 12 + src/gradientai/types/shared/network_v4.py | 26 + src/gradientai/types/shared/network_v6.py | 25 + src/gradientai/types/shared/page_links.py | 16 + src/gradientai/types/shared/region.py | 36 + src/gradientai/types/shared/size.py | 79 + src/gradientai/types/shared/snapshots.py | 47 + src/gradientai/types/shared/subscription.py | 19 + .../types/shared/subscription_tier_base.py | 44 + src/gradientai/types/shared/vpc_peering.py | 30 + .../types/shared_params/__init__.py | 3 + .../shared_params/firewall_rule_target.py | 42 + .../agents/chat/test_completions.py | 44 + .../agents/evaluation_metrics/test_models.py | 102 + .../evaluation_metrics/test_workspaces.py | 40 +- .../workspaces/test_agents.py | 42 +- tests/api_resources/agents/test_api_keys.py | 156 +- .../agents/test_evaluation_datasets.py | 20 +- .../agents/test_evaluation_metrics.py | 79 +- .../agents/test_evaluation_runs.py | 76 +- .../agents/test_evaluation_test_cases.py | 116 +- tests/api_resources/agents/test_functions.py | 132 +- .../agents/test_knowledge_bases.py | 64 +- tests/api_resources/agents/test_routes.py | 148 +- tests/api_resources/agents/test_versions.py | 40 +- tests/api_resources/chat/test_completions.py | 44 + tests/api_resources/gpu_droplets/__init__.py | 1 + .../gpu_droplets/account/__init__.py | 1 + .../gpu_droplets/account/test_keys.py | 399 +++ .../gpu_droplets/firewalls/__init__.py | 1 + .../gpu_droplets/firewalls/test_droplets.py | 206 ++ .../gpu_droplets/firewalls/test_rules.py | 326 +++ .../gpu_droplets/firewalls/test_tags.py | 206 ++ .../gpu_droplets/floating_ips/__init__.py | 1 + .../gpu_droplets/floating_ips/test_actions.py | 396 +++ .../gpu_droplets/images/__init__.py | 1 + .../gpu_droplets/images/test_actions.py | 321 +++ .../gpu_droplets/load_balancers/__init__.py | 1 + .../load_balancers/test_droplets.py | 206 ++ .../load_balancers/test_forwarding_rules.py | 318 +++ .../gpu_droplets/test_actions.py | 1209 +++++++++ .../gpu_droplets/test_autoscale.py | 953 +++++++ .../gpu_droplets/test_backups.py | 315 +++ .../test_destroy_with_associated_resources.py | 431 ++++ .../gpu_droplets/test_firewalls.py | 617 +++++ .../gpu_droplets/test_floating_ips.py | 424 ++++ .../api_resources/gpu_droplets/test_images.py | 417 ++++ .../gpu_droplets/test_load_balancers.py | 1443 +++++++++++ .../api_resources/gpu_droplets/test_sizes.py | 98 + .../gpu_droplets/test_snapshots.py | 236 ++ .../gpu_droplets/test_volumes.py | 568 +++++ .../gpu_droplets/volumes/__init__.py | 1 + .../gpu_droplets/volumes/test_actions.py | 825 ++++++ .../gpu_droplets/volumes/test_snapshots.py | 412 +++ .../api_resources/inference/test_api_keys.py | 28 +- .../knowledge_bases/test_data_sources.py | 104 +- .../knowledge_bases/test_indexing_jobs.py | 28 +- .../models/providers/test_anthropic.py | 52 +- .../models/providers/test_openai.py | 52 +- tests/api_resources/test_agents.py | 130 +- tests/api_resources/test_gpu_droplets.py | 912 +++++++ tests/api_resources/test_knowledge_bases.py | 128 +- tests/api_resources/test_models.py | 100 +- tests/api_resources/test_regions.py | 8 +- 420 files changed, 39504 insertions(+), 1005 deletions(-) create mode 100644 src/gradientai/resources/agents/evaluation_metrics/models.py create mode 100644 src/gradientai/resources/gpu_droplets/__init__.py create mode 100644 src/gradientai/resources/gpu_droplets/account/__init__.py create mode 100644 src/gradientai/resources/gpu_droplets/account/account.py create mode 100644 src/gradientai/resources/gpu_droplets/account/keys.py create mode 100644 src/gradientai/resources/gpu_droplets/actions.py create mode 100644 src/gradientai/resources/gpu_droplets/autoscale.py create mode 100644 src/gradientai/resources/gpu_droplets/backups.py create mode 100644 src/gradientai/resources/gpu_droplets/destroy_with_associated_resources.py create mode 100644 src/gradientai/resources/gpu_droplets/firewalls/__init__.py create mode 100644 src/gradientai/resources/gpu_droplets/firewalls/droplets.py create mode 100644 src/gradientai/resources/gpu_droplets/firewalls/firewalls.py create mode 100644 src/gradientai/resources/gpu_droplets/firewalls/rules.py create mode 100644 src/gradientai/resources/gpu_droplets/firewalls/tags.py create mode 100644 src/gradientai/resources/gpu_droplets/floating_ips/__init__.py create mode 100644 src/gradientai/resources/gpu_droplets/floating_ips/actions.py create mode 100644 src/gradientai/resources/gpu_droplets/floating_ips/floating_ips.py create mode 100644 src/gradientai/resources/gpu_droplets/gpu_droplets.py create mode 100644 src/gradientai/resources/gpu_droplets/images/__init__.py create mode 100644 src/gradientai/resources/gpu_droplets/images/actions.py create mode 100644 src/gradientai/resources/gpu_droplets/images/images.py create mode 100644 src/gradientai/resources/gpu_droplets/load_balancers/__init__.py create mode 100644 src/gradientai/resources/gpu_droplets/load_balancers/droplets.py create mode 100644 src/gradientai/resources/gpu_droplets/load_balancers/forwarding_rules.py create mode 100644 src/gradientai/resources/gpu_droplets/load_balancers/load_balancers.py create mode 100644 src/gradientai/resources/gpu_droplets/sizes.py create mode 100644 src/gradientai/resources/gpu_droplets/snapshots.py create mode 100644 src/gradientai/resources/gpu_droplets/volumes/__init__.py create mode 100644 src/gradientai/resources/gpu_droplets/volumes/actions.py create mode 100644 src/gradientai/resources/gpu_droplets/volumes/snapshots.py create mode 100644 src/gradientai/resources/gpu_droplets/volumes/volumes.py create mode 100644 src/gradientai/types/agents/evaluation_metric_list_regions_params.py create mode 100644 src/gradientai/types/agents/evaluation_metric_list_regions_response.py rename src/gradientai/types/{ => agents/evaluation_metrics}/model_list_params.py (87%) create mode 100644 src/gradientai/types/agents/evaluation_metrics/model_list_response.py create mode 100644 src/gradientai/types/agents/evaluation_run_list_results_params.py create mode 100644 src/gradientai/types/droplet_backup_policy.py create mode 100644 src/gradientai/types/droplet_backup_policy_param.py create mode 100644 src/gradientai/types/gpu_droplet_create_params.py create mode 100644 src/gradientai/types/gpu_droplet_create_response.py create mode 100644 src/gradientai/types/gpu_droplet_delete_by_tag_params.py create mode 100644 src/gradientai/types/gpu_droplet_list_firewalls_params.py create mode 100644 src/gradientai/types/gpu_droplet_list_firewalls_response.py create mode 100644 src/gradientai/types/gpu_droplet_list_kernels_params.py create mode 100644 src/gradientai/types/gpu_droplet_list_kernels_response.py create mode 100644 src/gradientai/types/gpu_droplet_list_neighbors_response.py create mode 100644 src/gradientai/types/gpu_droplet_list_params.py create mode 100644 src/gradientai/types/gpu_droplet_list_response.py create mode 100644 src/gradientai/types/gpu_droplet_list_snapshots_params.py create mode 100644 src/gradientai/types/gpu_droplet_list_snapshots_response.py create mode 100644 src/gradientai/types/gpu_droplet_retrieve_response.py create mode 100644 src/gradientai/types/gpu_droplets/__init__.py create mode 100644 src/gradientai/types/gpu_droplets/account/__init__.py create mode 100644 src/gradientai/types/gpu_droplets/account/key_create_params.py create mode 100644 src/gradientai/types/gpu_droplets/account/key_create_response.py create mode 100644 src/gradientai/types/gpu_droplets/account/key_list_params.py create mode 100644 src/gradientai/types/gpu_droplets/account/key_list_response.py create mode 100644 src/gradientai/types/gpu_droplets/account/key_retrieve_response.py create mode 100644 src/gradientai/types/gpu_droplets/account/key_update_params.py create mode 100644 src/gradientai/types/gpu_droplets/account/key_update_response.py create mode 100644 src/gradientai/types/gpu_droplets/action_bulk_initiate_params.py create mode 100644 src/gradientai/types/gpu_droplets/action_bulk_initiate_response.py create mode 100644 src/gradientai/types/gpu_droplets/action_initiate_params.py create mode 100644 src/gradientai/types/gpu_droplets/action_initiate_response.py create mode 100644 src/gradientai/types/gpu_droplets/action_list_params.py create mode 100644 src/gradientai/types/gpu_droplets/action_list_response.py create mode 100644 src/gradientai/types/gpu_droplets/action_retrieve_response.py create mode 100644 src/gradientai/types/gpu_droplets/associated_resource.py create mode 100644 src/gradientai/types/gpu_droplets/autoscale_create_params.py create mode 100644 src/gradientai/types/gpu_droplets/autoscale_create_response.py create mode 100644 src/gradientai/types/gpu_droplets/autoscale_list_history_params.py create mode 100644 src/gradientai/types/gpu_droplets/autoscale_list_history_response.py create mode 100644 src/gradientai/types/gpu_droplets/autoscale_list_members_params.py create mode 100644 src/gradientai/types/gpu_droplets/autoscale_list_members_response.py create mode 100644 src/gradientai/types/gpu_droplets/autoscale_list_params.py create mode 100644 src/gradientai/types/gpu_droplets/autoscale_list_response.py create mode 100644 src/gradientai/types/gpu_droplets/autoscale_pool.py create mode 100644 src/gradientai/types/gpu_droplets/autoscale_pool_droplet_template.py create mode 100644 src/gradientai/types/gpu_droplets/autoscale_pool_droplet_template_param.py create mode 100644 src/gradientai/types/gpu_droplets/autoscale_pool_dynamic_config.py create mode 100644 src/gradientai/types/gpu_droplets/autoscale_pool_dynamic_config_param.py create mode 100644 src/gradientai/types/gpu_droplets/autoscale_pool_static_config.py create mode 100644 src/gradientai/types/gpu_droplets/autoscale_pool_static_config_param.py create mode 100644 src/gradientai/types/gpu_droplets/autoscale_retrieve_response.py create mode 100644 src/gradientai/types/gpu_droplets/autoscale_update_params.py create mode 100644 src/gradientai/types/gpu_droplets/autoscale_update_response.py create mode 100644 src/gradientai/types/gpu_droplets/backup_list_params.py create mode 100644 src/gradientai/types/gpu_droplets/backup_list_policies_params.py create mode 100644 src/gradientai/types/gpu_droplets/backup_list_policies_response.py create mode 100644 src/gradientai/types/gpu_droplets/backup_list_response.py create mode 100644 src/gradientai/types/gpu_droplets/backup_list_supported_policies_response.py create mode 100644 src/gradientai/types/gpu_droplets/backup_retrieve_policy_response.py create mode 100644 src/gradientai/types/gpu_droplets/current_utilization.py create mode 100644 src/gradientai/types/gpu_droplets/destroy_with_associated_resource_check_status_response.py create mode 100644 src/gradientai/types/gpu_droplets/destroy_with_associated_resource_delete_selective_params.py create mode 100644 src/gradientai/types/gpu_droplets/destroy_with_associated_resource_list_response.py create mode 100644 src/gradientai/types/gpu_droplets/destroyed_associated_resource.py create mode 100644 src/gradientai/types/gpu_droplets/domains.py create mode 100644 src/gradientai/types/gpu_droplets/domains_param.py create mode 100644 src/gradientai/types/gpu_droplets/firewall.py create mode 100644 src/gradientai/types/gpu_droplets/firewall_create_params.py create mode 100644 src/gradientai/types/gpu_droplets/firewall_create_response.py create mode 100644 src/gradientai/types/gpu_droplets/firewall_list_params.py create mode 100644 src/gradientai/types/gpu_droplets/firewall_list_response.py create mode 100644 src/gradientai/types/gpu_droplets/firewall_param.py create mode 100644 src/gradientai/types/gpu_droplets/firewall_retrieve_response.py create mode 100644 src/gradientai/types/gpu_droplets/firewall_update_params.py create mode 100644 src/gradientai/types/gpu_droplets/firewall_update_response.py create mode 100644 src/gradientai/types/gpu_droplets/firewalls/__init__.py create mode 100644 src/gradientai/types/gpu_droplets/firewalls/droplet_add_params.py create mode 100644 src/gradientai/types/gpu_droplets/firewalls/droplet_remove_params.py create mode 100644 src/gradientai/types/gpu_droplets/firewalls/rule_add_params.py create mode 100644 src/gradientai/types/gpu_droplets/firewalls/rule_remove_params.py create mode 100644 src/gradientai/types/gpu_droplets/firewalls/tag_add_params.py create mode 100644 src/gradientai/types/gpu_droplets/firewalls/tag_remove_params.py create mode 100644 src/gradientai/types/gpu_droplets/floating_ip.py create mode 100644 src/gradientai/types/gpu_droplets/floating_ip_create_params.py create mode 100644 src/gradientai/types/gpu_droplets/floating_ip_create_response.py create mode 100644 src/gradientai/types/gpu_droplets/floating_ip_list_params.py create mode 100644 src/gradientai/types/gpu_droplets/floating_ip_list_response.py create mode 100644 src/gradientai/types/gpu_droplets/floating_ip_retrieve_response.py create mode 100644 src/gradientai/types/gpu_droplets/floating_ips/__init__.py create mode 100644 src/gradientai/types/gpu_droplets/floating_ips/action_create_params.py create mode 100644 src/gradientai/types/gpu_droplets/floating_ips/action_create_response.py create mode 100644 src/gradientai/types/gpu_droplets/floating_ips/action_list_response.py create mode 100644 src/gradientai/types/gpu_droplets/floating_ips/action_retrieve_response.py create mode 100644 src/gradientai/types/gpu_droplets/forwarding_rule.py create mode 100644 src/gradientai/types/gpu_droplets/forwarding_rule_param.py create mode 100644 src/gradientai/types/gpu_droplets/glb_settings.py create mode 100644 src/gradientai/types/gpu_droplets/glb_settings_param.py create mode 100644 src/gradientai/types/gpu_droplets/health_check.py create mode 100644 src/gradientai/types/gpu_droplets/health_check_param.py create mode 100644 src/gradientai/types/gpu_droplets/image_create_params.py create mode 100644 src/gradientai/types/gpu_droplets/image_create_response.py create mode 100644 src/gradientai/types/gpu_droplets/image_list_params.py create mode 100644 src/gradientai/types/gpu_droplets/image_list_response.py create mode 100644 src/gradientai/types/gpu_droplets/image_retrieve_response.py create mode 100644 src/gradientai/types/gpu_droplets/image_update_params.py create mode 100644 src/gradientai/types/gpu_droplets/image_update_response.py create mode 100644 src/gradientai/types/gpu_droplets/images/__init__.py create mode 100644 src/gradientai/types/gpu_droplets/images/action_create_params.py create mode 100644 src/gradientai/types/gpu_droplets/images/action_list_response.py create mode 100644 src/gradientai/types/gpu_droplets/lb_firewall.py create mode 100644 src/gradientai/types/gpu_droplets/lb_firewall_param.py create mode 100644 src/gradientai/types/gpu_droplets/load_balancer.py create mode 100644 src/gradientai/types/gpu_droplets/load_balancer_create_params.py create mode 100644 src/gradientai/types/gpu_droplets/load_balancer_create_response.py create mode 100644 src/gradientai/types/gpu_droplets/load_balancer_list_params.py create mode 100644 src/gradientai/types/gpu_droplets/load_balancer_list_response.py create mode 100644 src/gradientai/types/gpu_droplets/load_balancer_retrieve_response.py create mode 100644 src/gradientai/types/gpu_droplets/load_balancer_update_params.py create mode 100644 src/gradientai/types/gpu_droplets/load_balancer_update_response.py create mode 100644 src/gradientai/types/gpu_droplets/load_balancers/__init__.py create mode 100644 src/gradientai/types/gpu_droplets/load_balancers/droplet_add_params.py create mode 100644 src/gradientai/types/gpu_droplets/load_balancers/droplet_remove_params.py create mode 100644 src/gradientai/types/gpu_droplets/load_balancers/forwarding_rule_add_params.py create mode 100644 src/gradientai/types/gpu_droplets/load_balancers/forwarding_rule_remove_params.py create mode 100644 src/gradientai/types/gpu_droplets/size_list_params.py create mode 100644 src/gradientai/types/gpu_droplets/size_list_response.py create mode 100644 src/gradientai/types/gpu_droplets/snapshot_list_params.py create mode 100644 src/gradientai/types/gpu_droplets/snapshot_list_response.py create mode 100644 src/gradientai/types/gpu_droplets/snapshot_retrieve_response.py create mode 100644 src/gradientai/types/gpu_droplets/sticky_sessions.py create mode 100644 src/gradientai/types/gpu_droplets/sticky_sessions_param.py create mode 100644 src/gradientai/types/gpu_droplets/volume_create_params.py create mode 100644 src/gradientai/types/gpu_droplets/volume_create_response.py create mode 100644 src/gradientai/types/gpu_droplets/volume_delete_by_name_params.py create mode 100644 src/gradientai/types/gpu_droplets/volume_list_params.py create mode 100644 src/gradientai/types/gpu_droplets/volume_list_response.py create mode 100644 src/gradientai/types/gpu_droplets/volume_retrieve_response.py create mode 100644 src/gradientai/types/gpu_droplets/volumes/__init__.py create mode 100644 src/gradientai/types/gpu_droplets/volumes/action_initiate_by_id_params.py create mode 100644 src/gradientai/types/gpu_droplets/volumes/action_initiate_by_id_response.py create mode 100644 src/gradientai/types/gpu_droplets/volumes/action_initiate_by_name_params.py create mode 100644 src/gradientai/types/gpu_droplets/volumes/action_initiate_by_name_response.py create mode 100644 src/gradientai/types/gpu_droplets/volumes/action_list_params.py create mode 100644 src/gradientai/types/gpu_droplets/volumes/action_list_response.py create mode 100644 src/gradientai/types/gpu_droplets/volumes/action_retrieve_params.py create mode 100644 src/gradientai/types/gpu_droplets/volumes/action_retrieve_response.py create mode 100644 src/gradientai/types/gpu_droplets/volumes/snapshot_create_params.py create mode 100644 src/gradientai/types/gpu_droplets/volumes/snapshot_create_response.py create mode 100644 src/gradientai/types/gpu_droplets/volumes/snapshot_list_params.py create mode 100644 src/gradientai/types/gpu_droplets/volumes/snapshot_list_response.py create mode 100644 src/gradientai/types/gpu_droplets/volumes/snapshot_retrieve_response.py create mode 100644 src/gradientai/types/gpu_droplets/volumes/volume_action.py create mode 100644 src/gradientai/types/model_retrieve_response.py create mode 100644 src/gradientai/types/shared/action.py create mode 100644 src/gradientai/types/shared/action_link.py create mode 100644 src/gradientai/types/shared/backward_links.py create mode 100644 src/gradientai/types/shared/completion_usage.py create mode 100644 src/gradientai/types/shared/disk_info.py create mode 100644 src/gradientai/types/shared/droplet.py create mode 100644 src/gradientai/types/shared/droplet_next_backup_window.py create mode 100644 src/gradientai/types/shared/firewall_rule_target.py create mode 100644 src/gradientai/types/shared/forward_links.py create mode 100644 src/gradientai/types/shared/garbage_collection.py create mode 100644 src/gradientai/types/shared/gpu_info.py create mode 100644 src/gradientai/types/shared/image.py create mode 100644 src/gradientai/types/shared/kernel.py create mode 100644 src/gradientai/types/shared/meta_properties.py create mode 100644 src/gradientai/types/shared/network_v4.py create mode 100644 src/gradientai/types/shared/network_v6.py create mode 100644 src/gradientai/types/shared/page_links.py create mode 100644 src/gradientai/types/shared/region.py create mode 100644 src/gradientai/types/shared/size.py create mode 100644 src/gradientai/types/shared/snapshots.py create mode 100644 src/gradientai/types/shared/subscription.py create mode 100644 src/gradientai/types/shared/subscription_tier_base.py create mode 100644 src/gradientai/types/shared/vpc_peering.py create mode 100644 src/gradientai/types/shared_params/__init__.py create mode 100644 src/gradientai/types/shared_params/firewall_rule_target.py create mode 100644 tests/api_resources/agents/evaluation_metrics/test_models.py create mode 100644 tests/api_resources/gpu_droplets/__init__.py create mode 100644 tests/api_resources/gpu_droplets/account/__init__.py create mode 100644 tests/api_resources/gpu_droplets/account/test_keys.py create mode 100644 tests/api_resources/gpu_droplets/firewalls/__init__.py create mode 100644 tests/api_resources/gpu_droplets/firewalls/test_droplets.py create mode 100644 tests/api_resources/gpu_droplets/firewalls/test_rules.py create mode 100644 tests/api_resources/gpu_droplets/firewalls/test_tags.py create mode 100644 tests/api_resources/gpu_droplets/floating_ips/__init__.py create mode 100644 tests/api_resources/gpu_droplets/floating_ips/test_actions.py create mode 100644 tests/api_resources/gpu_droplets/images/__init__.py create mode 100644 tests/api_resources/gpu_droplets/images/test_actions.py create mode 100644 tests/api_resources/gpu_droplets/load_balancers/__init__.py create mode 100644 tests/api_resources/gpu_droplets/load_balancers/test_droplets.py create mode 100644 tests/api_resources/gpu_droplets/load_balancers/test_forwarding_rules.py create mode 100644 tests/api_resources/gpu_droplets/test_actions.py create mode 100644 tests/api_resources/gpu_droplets/test_autoscale.py create mode 100644 tests/api_resources/gpu_droplets/test_backups.py create mode 100644 tests/api_resources/gpu_droplets/test_destroy_with_associated_resources.py create mode 100644 tests/api_resources/gpu_droplets/test_firewalls.py create mode 100644 tests/api_resources/gpu_droplets/test_floating_ips.py create mode 100644 tests/api_resources/gpu_droplets/test_images.py create mode 100644 tests/api_resources/gpu_droplets/test_load_balancers.py create mode 100644 tests/api_resources/gpu_droplets/test_sizes.py create mode 100644 tests/api_resources/gpu_droplets/test_snapshots.py create mode 100644 tests/api_resources/gpu_droplets/test_volumes.py create mode 100644 tests/api_resources/gpu_droplets/volumes/__init__.py create mode 100644 tests/api_resources/gpu_droplets/volumes/test_actions.py create mode 100644 tests/api_resources/gpu_droplets/volumes/test_snapshots.py create mode 100644 tests/api_resources/test_gpu_droplets.py diff --git a/.stats.yml b/.stats.yml index 89f80bc1..5f9d16dd 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 77 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-391afaae764eb758523b67805cb47ae3bc319dc119d83414afdd66f123ceaf5c.yml -openapi_spec_hash: c773d792724f5647ae25a5ae4ccec208 -config_hash: 0bd094d86a010f7cbd5eb22ef548a29f +configured_endpoints: 168 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-f8e8c290636c1e218efcf7bfe92ba7570c11690754d21287d838919fbc943a80.yml +openapi_spec_hash: 1eddf488ecbe415efb45445697716f5d +config_hash: 683ea6ba4d63037c1c72484e5936e73c diff --git a/api.md b/api.md index c6acd4ec..8682940b 100644 --- a/api.md +++ b/api.md @@ -1,7 +1,35 @@ # Shared Types ```python -from gradientai.types import APILinks, APIMeta, ChatCompletionChunk, ChatCompletionTokenLogprob +from gradientai.types import ( + Action, + ActionLink, + APILinks, + APIMeta, + BackwardLinks, + ChatCompletionChunk, + ChatCompletionTokenLogprob, + CompletionUsage, + DiskInfo, + Droplet, + DropletNextBackupWindow, + FirewallRuleTarget, + ForwardLinks, + GarbageCollection, + GPUInfo, + Image, + Kernel, + MetaProperties, + NetworkV4, + NetworkV6, + PageLinks, + Region, + Size, + Snapshots, + Subscription, + SubscriptionTierBase, + VpcPeering, +) ``` # Agents @@ -77,12 +105,16 @@ Methods: Types: ```python -from gradientai.types.agents import EvaluationMetricListResponse +from gradientai.types.agents import ( + EvaluationMetricListResponse, + EvaluationMetricListRegionsResponse, +) ``` Methods: - client.agents.evaluation_metrics.list() -> EvaluationMetricListResponse +- client.agents.evaluation_metrics.list_regions(\*\*params) -> EvaluationMetricListRegionsResponse ### Workspaces @@ -124,6 +156,18 @@ Methods: - client.agents.evaluation_metrics.workspaces.agents.list(workspace_uuid, \*\*params) -> AgentListResponse - client.agents.evaluation_metrics.workspaces.agents.move(path_workspace_uuid, \*\*params) -> AgentMoveResponse +### Models + +Types: + +```python +from gradientai.types.agents.evaluation_metrics import ModelListResponse +``` + +Methods: + +- client.agents.evaluation_metrics.models.list(\*\*params) -> ModelListResponse + ## EvaluationRuns Types: @@ -145,7 +189,7 @@ Methods: - client.agents.evaluation_runs.create(\*\*params) -> EvaluationRunCreateResponse - client.agents.evaluation_runs.retrieve(evaluation_run_uuid) -> EvaluationRunRetrieveResponse -- client.agents.evaluation_runs.list_results(evaluation_run_uuid) -> EvaluationRunListResultsResponse +- client.agents.evaluation_runs.list_results(evaluation_run_uuid, \*\*params) -> EvaluationRunListResultsResponse - client.agents.evaluation_runs.retrieve_results(prompt_id, \*, evaluation_run_uuid) -> EvaluationRunRetrieveResultsResponse ## EvaluationTestCases @@ -168,7 +212,7 @@ Methods: - client.agents.evaluation_test_cases.create(\*\*params) -> EvaluationTestCaseCreateResponse - client.agents.evaluation_test_cases.retrieve(test_case_uuid, \*\*params) -> EvaluationTestCaseRetrieveResponse -- client.agents.evaluation_test_cases.update(path_test_case_uuid, \*\*params) -> EvaluationTestCaseUpdateResponse +- client.agents.evaluation_test_cases.update(path_test_case_uuid, \*\*params) -> EvaluationTestCaseUpdateResponse - client.agents.evaluation_test_cases.list() -> EvaluationTestCaseListResponse - client.agents.evaluation_test_cases.list_evaluation_runs(evaluation_test_case_uuid, \*\*params) -> EvaluationTestCaseListEvaluationRunsResponse @@ -277,7 +321,7 @@ from gradientai.types import RegionListResponse Methods: -- client.regions.list(\*\*params) -> RegionListResponse +- client.regions.list(\*\*params) -> RegionListResponse # KnowledgeBases @@ -379,12 +423,19 @@ Methods: Types: ```python -from gradientai.types import APIAgreement, APIModel, APIModelVersion, ModelListResponse +from gradientai.types import ( + APIAgreement, + APIModel, + APIModelVersion, + ModelRetrieveResponse, + ModelListResponse, +) ``` Methods: -- client.models.list(\*\*params) -> ModelListResponse +- client.models.retrieve(model) -> ModelRetrieveResponse +- client.models.list() -> ModelListResponse ## Providers @@ -435,3 +486,392 @@ Methods: - client.models.providers.openai.list(\*\*params) -> OpenAIListResponse - client.models.providers.openai.delete(api_key_uuid) -> OpenAIDeleteResponse - client.models.providers.openai.retrieve_agents(uuid, \*\*params) -> OpenAIRetrieveAgentsResponse + +# GPUDroplets + +Types: + +```python +from gradientai.types import ( + DropletBackupPolicy, + GPUDropletCreateResponse, + GPUDropletRetrieveResponse, + GPUDropletListResponse, + GPUDropletListFirewallsResponse, + GPUDropletListKernelsResponse, + GPUDropletListNeighborsResponse, + GPUDropletListSnapshotsResponse, +) +``` + +Methods: + +- client.gpu_droplets.create(\*\*params) -> GPUDropletCreateResponse +- client.gpu_droplets.retrieve(droplet_id) -> GPUDropletRetrieveResponse +- client.gpu_droplets.list(\*\*params) -> GPUDropletListResponse +- client.gpu_droplets.delete(droplet_id) -> None +- client.gpu_droplets.delete_by_tag(\*\*params) -> None +- client.gpu_droplets.list_firewalls(droplet_id, \*\*params) -> GPUDropletListFirewallsResponse +- client.gpu_droplets.list_kernels(droplet_id, \*\*params) -> GPUDropletListKernelsResponse +- client.gpu_droplets.list_neighbors(droplet_id) -> GPUDropletListNeighborsResponse +- client.gpu_droplets.list_snapshots(droplet_id, \*\*params) -> GPUDropletListSnapshotsResponse + +## Backups + +Types: + +```python +from gradientai.types.gpu_droplets import ( + BackupListResponse, + BackupListPoliciesResponse, + BackupListSupportedPoliciesResponse, + BackupRetrievePolicyResponse, +) +``` + +Methods: + +- client.gpu_droplets.backups.list(droplet_id, \*\*params) -> BackupListResponse +- client.gpu_droplets.backups.list_policies(\*\*params) -> BackupListPoliciesResponse +- client.gpu_droplets.backups.list_supported_policies() -> BackupListSupportedPoliciesResponse +- client.gpu_droplets.backups.retrieve_policy(droplet_id) -> BackupRetrievePolicyResponse + +## Actions + +Types: + +```python +from gradientai.types.gpu_droplets import ( + ActionRetrieveResponse, + ActionListResponse, + ActionBulkInitiateResponse, + ActionInitiateResponse, +) +``` + +Methods: + +- client.gpu_droplets.actions.retrieve(action_id, \*, droplet_id) -> ActionRetrieveResponse +- client.gpu_droplets.actions.list(droplet_id, \*\*params) -> ActionListResponse +- client.gpu_droplets.actions.bulk_initiate(\*\*params) -> ActionBulkInitiateResponse +- client.gpu_droplets.actions.initiate(droplet_id, \*\*params) -> ActionInitiateResponse + +## DestroyWithAssociatedResources + +Types: + +```python +from gradientai.types.gpu_droplets import ( + AssociatedResource, + DestroyedAssociatedResource, + DestroyWithAssociatedResourceListResponse, + DestroyWithAssociatedResourceCheckStatusResponse, +) +``` + +Methods: + +- client.gpu_droplets.destroy_with_associated_resources.list(droplet_id) -> DestroyWithAssociatedResourceListResponse +- client.gpu_droplets.destroy_with_associated_resources.check_status(droplet_id) -> DestroyWithAssociatedResourceCheckStatusResponse +- client.gpu_droplets.destroy_with_associated_resources.delete_dangerous(droplet_id) -> None +- client.gpu_droplets.destroy_with_associated_resources.delete_selective(droplet_id, \*\*params) -> None +- client.gpu_droplets.destroy_with_associated_resources.retry(droplet_id) -> None + +## Autoscale + +Types: + +```python +from gradientai.types.gpu_droplets import ( + AutoscalePool, + AutoscalePoolDropletTemplate, + AutoscalePoolDynamicConfig, + AutoscalePoolStaticConfig, + CurrentUtilization, + AutoscaleCreateResponse, + AutoscaleRetrieveResponse, + AutoscaleUpdateResponse, + AutoscaleListResponse, + AutoscaleListHistoryResponse, + AutoscaleListMembersResponse, +) +``` + +Methods: + +- client.gpu_droplets.autoscale.create(\*\*params) -> AutoscaleCreateResponse +- client.gpu_droplets.autoscale.retrieve(autoscale_pool_id) -> AutoscaleRetrieveResponse +- client.gpu_droplets.autoscale.update(autoscale_pool_id, \*\*params) -> AutoscaleUpdateResponse +- client.gpu_droplets.autoscale.list(\*\*params) -> AutoscaleListResponse +- client.gpu_droplets.autoscale.delete(autoscale_pool_id) -> None +- client.gpu_droplets.autoscale.delete_dangerous(autoscale_pool_id) -> None +- client.gpu_droplets.autoscale.list_history(autoscale_pool_id, \*\*params) -> AutoscaleListHistoryResponse +- client.gpu_droplets.autoscale.list_members(autoscale_pool_id, \*\*params) -> AutoscaleListMembersResponse + +## Firewalls + +Types: + +```python +from gradientai.types.gpu_droplets import ( + Firewall, + FirewallCreateResponse, + FirewallRetrieveResponse, + FirewallUpdateResponse, + FirewallListResponse, +) +``` + +Methods: + +- client.gpu_droplets.firewalls.create(\*\*params) -> FirewallCreateResponse +- client.gpu_droplets.firewalls.retrieve(firewall_id) -> FirewallRetrieveResponse +- client.gpu_droplets.firewalls.update(firewall_id, \*\*params) -> FirewallUpdateResponse +- client.gpu_droplets.firewalls.list(\*\*params) -> FirewallListResponse +- client.gpu_droplets.firewalls.delete(firewall_id) -> None + +### Droplets + +Methods: + +- client.gpu_droplets.firewalls.droplets.add(firewall_id, \*\*params) -> None +- client.gpu_droplets.firewalls.droplets.remove(firewall_id, \*\*params) -> None + +### Tags + +Methods: + +- client.gpu_droplets.firewalls.tags.add(firewall_id, \*\*params) -> None +- client.gpu_droplets.firewalls.tags.remove(firewall_id, \*\*params) -> None + +### Rules + +Methods: + +- client.gpu_droplets.firewalls.rules.add(firewall_id, \*\*params) -> None +- client.gpu_droplets.firewalls.rules.remove(firewall_id, \*\*params) -> None + +## FloatingIPs + +Types: + +```python +from gradientai.types.gpu_droplets import ( + FloatingIP, + FloatingIPCreateResponse, + FloatingIPRetrieveResponse, + FloatingIPListResponse, +) +``` + +Methods: + +- client.gpu_droplets.floating_ips.create(\*\*params) -> FloatingIPCreateResponse +- client.gpu_droplets.floating_ips.retrieve(floating_ip) -> FloatingIPRetrieveResponse +- client.gpu_droplets.floating_ips.list(\*\*params) -> FloatingIPListResponse +- client.gpu_droplets.floating_ips.delete(floating_ip) -> None + +### Actions + +Types: + +```python +from gradientai.types.gpu_droplets.floating_ips import ( + ActionCreateResponse, + ActionRetrieveResponse, + ActionListResponse, +) +``` + +Methods: + +- client.gpu_droplets.floating_ips.actions.create(floating_ip, \*\*params) -> ActionCreateResponse +- client.gpu_droplets.floating_ips.actions.retrieve(action_id, \*, floating_ip) -> ActionRetrieveResponse +- client.gpu_droplets.floating_ips.actions.list(floating_ip) -> ActionListResponse + +## Images + +Types: + +```python +from gradientai.types.gpu_droplets import ( + ImageCreateResponse, + ImageRetrieveResponse, + ImageUpdateResponse, + ImageListResponse, +) +``` + +Methods: + +- client.gpu_droplets.images.create(\*\*params) -> ImageCreateResponse +- client.gpu_droplets.images.retrieve(image_id) -> ImageRetrieveResponse +- client.gpu_droplets.images.update(image_id, \*\*params) -> ImageUpdateResponse +- client.gpu_droplets.images.list(\*\*params) -> ImageListResponse +- client.gpu_droplets.images.delete(image_id) -> None + +### Actions + +Types: + +```python +from gradientai.types.gpu_droplets.images import ActionListResponse +``` + +Methods: + +- client.gpu_droplets.images.actions.create(image_id, \*\*params) -> Action +- client.gpu_droplets.images.actions.retrieve(action_id, \*, image_id) -> Action +- client.gpu_droplets.images.actions.list(image_id) -> ActionListResponse + +## LoadBalancers + +Types: + +```python +from gradientai.types.gpu_droplets import ( + Domains, + ForwardingRule, + GlbSettings, + HealthCheck, + LbFirewall, + LoadBalancer, + StickySessions, + LoadBalancerCreateResponse, + LoadBalancerRetrieveResponse, + LoadBalancerUpdateResponse, + LoadBalancerListResponse, +) +``` + +Methods: + +- client.gpu_droplets.load_balancers.create(\*\*params) -> LoadBalancerCreateResponse +- client.gpu_droplets.load_balancers.retrieve(lb_id) -> LoadBalancerRetrieveResponse +- client.gpu_droplets.load_balancers.update(lb_id, \*\*params) -> LoadBalancerUpdateResponse +- client.gpu_droplets.load_balancers.list(\*\*params) -> LoadBalancerListResponse +- client.gpu_droplets.load_balancers.delete(lb_id) -> None +- client.gpu_droplets.load_balancers.delete_cache(lb_id) -> None + +### Droplets + +Methods: + +- client.gpu_droplets.load_balancers.droplets.add(lb_id, \*\*params) -> None +- client.gpu_droplets.load_balancers.droplets.remove(lb_id, \*\*params) -> None + +### ForwardingRules + +Methods: + +- client.gpu_droplets.load_balancers.forwarding_rules.add(lb_id, \*\*params) -> None +- client.gpu_droplets.load_balancers.forwarding_rules.remove(lb_id, \*\*params) -> None + +## Sizes + +Types: + +```python +from gradientai.types.gpu_droplets import SizeListResponse +``` + +Methods: + +- client.gpu_droplets.sizes.list(\*\*params) -> SizeListResponse + +## Snapshots + +Types: + +```python +from gradientai.types.gpu_droplets import SnapshotRetrieveResponse, SnapshotListResponse +``` + +Methods: + +- client.gpu_droplets.snapshots.retrieve(snapshot_id) -> SnapshotRetrieveResponse +- client.gpu_droplets.snapshots.list(\*\*params) -> SnapshotListResponse +- client.gpu_droplets.snapshots.delete(snapshot_id) -> None + +## Volumes + +Types: + +```python +from gradientai.types.gpu_droplets import ( + VolumeCreateResponse, + VolumeRetrieveResponse, + VolumeListResponse, +) +``` + +Methods: + +- client.gpu_droplets.volumes.create(\*\*params) -> VolumeCreateResponse +- client.gpu_droplets.volumes.retrieve(volume_id) -> VolumeRetrieveResponse +- client.gpu_droplets.volumes.list(\*\*params) -> VolumeListResponse +- client.gpu_droplets.volumes.delete(volume_id) -> None +- client.gpu_droplets.volumes.delete_by_name(\*\*params) -> None + +### Actions + +Types: + +```python +from gradientai.types.gpu_droplets.volumes import ( + VolumeAction, + ActionRetrieveResponse, + ActionListResponse, + ActionInitiateByIDResponse, + ActionInitiateByNameResponse, +) +``` + +Methods: + +- client.gpu_droplets.volumes.actions.retrieve(action_id, \*, volume_id, \*\*params) -> ActionRetrieveResponse +- client.gpu_droplets.volumes.actions.list(volume_id, \*\*params) -> ActionListResponse +- client.gpu_droplets.volumes.actions.initiate_by_id(volume_id, \*\*params) -> ActionInitiateByIDResponse +- client.gpu_droplets.volumes.actions.initiate_by_name(\*\*params) -> ActionInitiateByNameResponse + +### Snapshots + +Types: + +```python +from gradientai.types.gpu_droplets.volumes import ( + SnapshotCreateResponse, + SnapshotRetrieveResponse, + SnapshotListResponse, +) +``` + +Methods: + +- client.gpu_droplets.volumes.snapshots.create(volume_id, \*\*params) -> SnapshotCreateResponse +- client.gpu_droplets.volumes.snapshots.retrieve(snapshot_id) -> SnapshotRetrieveResponse +- client.gpu_droplets.volumes.snapshots.list(volume_id, \*\*params) -> SnapshotListResponse +- client.gpu_droplets.volumes.snapshots.delete(snapshot_id) -> None + +## Account + +### Keys + +Types: + +```python +from gradientai.types.gpu_droplets.account import ( + KeyCreateResponse, + KeyRetrieveResponse, + KeyUpdateResponse, + KeyListResponse, +) +``` + +Methods: + +- client.gpu_droplets.account.keys.create(\*\*params) -> KeyCreateResponse +- client.gpu_droplets.account.keys.retrieve(ssh_key_identifier) -> KeyRetrieveResponse +- client.gpu_droplets.account.keys.update(ssh_key_identifier, \*\*params) -> KeyUpdateResponse +- client.gpu_droplets.account.keys.list(\*\*params) -> KeyListResponse +- client.gpu_droplets.account.keys.delete(ssh_key_identifier) -> None diff --git a/src/gradientai/resources/__init__.py b/src/gradientai/resources/__init__.py index e1ed4a00..fd6da608 100644 --- a/src/gradientai/resources/__init__.py +++ b/src/gradientai/resources/__init__.py @@ -40,6 +40,14 @@ InferenceResourceWithStreamingResponse, AsyncInferenceResourceWithStreamingResponse, ) +from .gpu_droplets import ( + GPUDropletsResource, + AsyncGPUDropletsResource, + GPUDropletsResourceWithRawResponse, + AsyncGPUDropletsResourceWithRawResponse, + GPUDropletsResourceWithStreamingResponse, + AsyncGPUDropletsResourceWithStreamingResponse, +) from .knowledge_bases import ( KnowledgeBasesResource, AsyncKnowledgeBasesResource, @@ -86,4 +94,10 @@ "AsyncModelsResourceWithRawResponse", "ModelsResourceWithStreamingResponse", "AsyncModelsResourceWithStreamingResponse", + "GPUDropletsResource", + "AsyncGPUDropletsResource", + "GPUDropletsResourceWithRawResponse", + "AsyncGPUDropletsResourceWithRawResponse", + "GPUDropletsResourceWithStreamingResponse", + "AsyncGPUDropletsResourceWithStreamingResponse", ] diff --git a/src/gradientai/resources/agents/agents.py b/src/gradientai/resources/agents/agents.py index 200e9fc0..92d696ba 100644 --- a/src/gradientai/resources/agents/agents.py +++ b/src/gradientai/resources/agents/agents.py @@ -203,13 +203,29 @@ def create( body contains a JSON object with the newly created agent object. Args: + anthropic_key_uuid: Optional Anthropic API key ID to use with Anthropic models + + description: A text description of the agent, not used in inference + instruction: Agent instruction. Instructions help your agent to perform its job effectively. See [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) for best practices. + knowledge_base_uuid: Ids of the knowledge base(s) to attach to the agent + model_uuid: Identifier for the foundation model. + name: Agent name + + openai_key_uuid: Optional OpenAI API key ID to use with OpenAI models + + project_id: The id of the DigitalOcean project this agent will belong to + + region: The DigitalOcean region to deploy your agent in + + tags: Agent tag to organize related resources + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -285,6 +301,7 @@ def update( path_uuid: str, *, anthropic_key_uuid: str | NotGiven = NOT_GIVEN, + conversation_logs_enabled: bool | NotGiven = NOT_GIVEN, description: str | NotGiven = NOT_GIVEN, instruction: str | NotGiven = NOT_GIVEN, k: int | NotGiven = NOT_GIVEN, @@ -312,17 +329,39 @@ def update( response body is a JSON object containing the agent. Args: + anthropic_key_uuid: Optional anthropic key uuid for use with anthropic models + + conversation_logs_enabled: Optional update of conversation logs enabled + + description: Agent description + instruction: Agent instruction. Instructions help your agent to perform its job effectively. See [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) for best practices. + k: How many results should be considered from an attached knowledge base + max_tokens: Specifies the maximum number of tokens the model can process in a single input or output, set as a number between 1 and 512. This determines the length of each response. model_uuid: Identifier for the foundation model. + name: Agent name + + openai_key_uuid: Optional OpenAI key uuid for use with OpenAI models + + project_id: The id of the DigitalOcean project this agent will belong to + + retrieval_method: - RETRIEVAL_METHOD_UNKNOWN: The retrieval method is unknown + - RETRIEVAL_METHOD_REWRITE: The retrieval method is rewrite + - RETRIEVAL_METHOD_STEP_BACK: The retrieval method is step back + - RETRIEVAL_METHOD_SUB_QUERIES: The retrieval method is sub queries + - RETRIEVAL_METHOD_NONE: The retrieval method is none + + tags: A set of abitrary tags to organize your agent + temperature: Controls the model’s creativity, specified as a number between 0 and 1. Lower values produce more predictable and conservative responses, while higher values encourage creativity and variation. @@ -331,6 +370,8 @@ def update( number between 0 and 1. Higher values allow for more diverse outputs, while lower values ensure focused and coherent responses. + body_uuid: Unique agent id + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -348,6 +389,7 @@ def update( body=maybe_transform( { "anthropic_key_uuid": anthropic_key_uuid, + "conversation_logs_enabled": conversation_logs_enabled, "description": description, "instruction": instruction, "k": k, @@ -388,11 +430,11 @@ def list( To list all agents, send a GET request to `/v2/gen-ai/agents`. Args: - only_deployed: only list agents that are deployed. + only_deployed: Only list agents that are deployed. - page: page number. + page: Page number. - per_page: items per page. + per_page: Items per page. extra_headers: Send extra headers @@ -477,6 +519,17 @@ def update_status( PUT request to `/v2/gen-ai/agents/{uuid}/deployment_visibility`. Args: + body_uuid: Unique id + + visibility: - VISIBILITY_UNKNOWN: The status of the deployment is unknown + - VISIBILITY_DISABLED: The deployment is disabled and will no longer service + requests + - VISIBILITY_PLAYGROUND: Deprecated: No longer a valid state + - VISIBILITY_PUBLIC: The deployment is public and will service requests from the + public internet + - VISIBILITY_PRIVATE: The deployment is private and will only service requests + from other agents, or through API keys + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -591,13 +644,29 @@ async def create( body contains a JSON object with the newly created agent object. Args: + anthropic_key_uuid: Optional Anthropic API key ID to use with Anthropic models + + description: A text description of the agent, not used in inference + instruction: Agent instruction. Instructions help your agent to perform its job effectively. See [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) for best practices. + knowledge_base_uuid: Ids of the knowledge base(s) to attach to the agent + model_uuid: Identifier for the foundation model. + name: Agent name + + openai_key_uuid: Optional OpenAI API key ID to use with OpenAI models + + project_id: The id of the DigitalOcean project this agent will belong to + + region: The DigitalOcean region to deploy your agent in + + tags: Agent tag to organize related resources + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -673,6 +742,7 @@ async def update( path_uuid: str, *, anthropic_key_uuid: str | NotGiven = NOT_GIVEN, + conversation_logs_enabled: bool | NotGiven = NOT_GIVEN, description: str | NotGiven = NOT_GIVEN, instruction: str | NotGiven = NOT_GIVEN, k: int | NotGiven = NOT_GIVEN, @@ -700,17 +770,39 @@ async def update( response body is a JSON object containing the agent. Args: + anthropic_key_uuid: Optional anthropic key uuid for use with anthropic models + + conversation_logs_enabled: Optional update of conversation logs enabled + + description: Agent description + instruction: Agent instruction. Instructions help your agent to perform its job effectively. See [Write Effective Agent Instructions](https://docs.digitalocean.com/products/genai-platform/concepts/best-practices/#agent-instructions) for best practices. + k: How many results should be considered from an attached knowledge base + max_tokens: Specifies the maximum number of tokens the model can process in a single input or output, set as a number between 1 and 512. This determines the length of each response. model_uuid: Identifier for the foundation model. + name: Agent name + + openai_key_uuid: Optional OpenAI key uuid for use with OpenAI models + + project_id: The id of the DigitalOcean project this agent will belong to + + retrieval_method: - RETRIEVAL_METHOD_UNKNOWN: The retrieval method is unknown + - RETRIEVAL_METHOD_REWRITE: The retrieval method is rewrite + - RETRIEVAL_METHOD_STEP_BACK: The retrieval method is step back + - RETRIEVAL_METHOD_SUB_QUERIES: The retrieval method is sub queries + - RETRIEVAL_METHOD_NONE: The retrieval method is none + + tags: A set of abitrary tags to organize your agent + temperature: Controls the model’s creativity, specified as a number between 0 and 1. Lower values produce more predictable and conservative responses, while higher values encourage creativity and variation. @@ -719,6 +811,8 @@ async def update( number between 0 and 1. Higher values allow for more diverse outputs, while lower values ensure focused and coherent responses. + body_uuid: Unique agent id + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -736,6 +830,7 @@ async def update( body=await async_maybe_transform( { "anthropic_key_uuid": anthropic_key_uuid, + "conversation_logs_enabled": conversation_logs_enabled, "description": description, "instruction": instruction, "k": k, @@ -776,11 +871,11 @@ async def list( To list all agents, send a GET request to `/v2/gen-ai/agents`. Args: - only_deployed: only list agents that are deployed. + only_deployed: Only list agents that are deployed. - page: page number. + page: Page number. - per_page: items per page. + per_page: Items per page. extra_headers: Send extra headers @@ -865,6 +960,17 @@ async def update_status( PUT request to `/v2/gen-ai/agents/{uuid}/deployment_visibility`. Args: + body_uuid: Unique id + + visibility: - VISIBILITY_UNKNOWN: The status of the deployment is unknown + - VISIBILITY_DISABLED: The deployment is disabled and will no longer service + requests + - VISIBILITY_PLAYGROUND: Deprecated: No longer a valid state + - VISIBILITY_PUBLIC: The deployment is public and will service requests from the + public internet + - VISIBILITY_PRIVATE: The deployment is private and will only service requests + from other agents, or through API keys + extra_headers: Send extra headers extra_query: Add additional query parameters to the request diff --git a/src/gradientai/resources/agents/api_keys.py b/src/gradientai/resources/agents/api_keys.py index 1cf2278e..9f4d9660 100644 --- a/src/gradientai/resources/agents/api_keys.py +++ b/src/gradientai/resources/agents/api_keys.py @@ -63,6 +63,10 @@ def create( `/v2/gen-ai/agents/{agent_uuid}/api_keys`. Args: + body_agent_uuid: Agent id + + name: A human friendly name to identify the key + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -110,6 +114,12 @@ def update( `/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}`. Args: + body_agent_uuid: Agent id + + body_api_key_uuid: API key ID + + name: Name + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -158,9 +168,9 @@ def list( `/v2/gen-ai/agents/{agent_uuid}/api_keys`. Args: - page: page number. + page: Page number. - per_page: items per page. + per_page: Items per page. extra_headers: Send extra headers @@ -309,6 +319,10 @@ async def create( `/v2/gen-ai/agents/{agent_uuid}/api_keys`. Args: + body_agent_uuid: Agent id + + name: A human friendly name to identify the key + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -356,6 +370,12 @@ async def update( `/v2/gen-ai/agents/{agent_uuid}/api_keys/{api_key_uuid}`. Args: + body_agent_uuid: Agent id + + body_api_key_uuid: API key ID + + name: Name + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -404,9 +424,9 @@ async def list( `/v2/gen-ai/agents/{agent_uuid}/api_keys`. Args: - page: page number. + page: Page number. - per_page: items per page. + per_page: Items per page. extra_headers: Send extra headers diff --git a/src/gradientai/resources/agents/chat/completions.py b/src/gradientai/resources/agents/chat/completions.py index 604bffb3..96a6d843 100644 --- a/src/gradientai/resources/agents/chat/completions.py +++ b/src/gradientai/resources/agents/chat/completions.py @@ -66,6 +66,8 @@ def create( Optional[completion_create_params.StreamOptions] | NotGiven ) = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, + tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -138,6 +140,19 @@ def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. + tool_choice: Controls which (if any) tool is called by the model. `none` means the model will + not call any tool and instead generates a message. `auto` means the model can + pick between generating a message or calling one or more tools. `required` means + the model must call one or more tools. Specifying a particular tool via + `{"type": "function", "function": {"name": "my_function"}}` forces the model to + call that tool. + + `none` is the default when no tools are present. `auto` is the default if tools + are present. + + tools: A list of tools the model may call. Currently, only functions are supported as a + tool. + top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @@ -181,6 +196,8 @@ def create( Optional[completion_create_params.StreamOptions] | NotGiven ) = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, + tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -253,6 +270,19 @@ def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. + tool_choice: Controls which (if any) tool is called by the model. `none` means the model will + not call any tool and instead generates a message. `auto` means the model can + pick between generating a message or calling one or more tools. `required` means + the model must call one or more tools. Specifying a particular tool via + `{"type": "function", "function": {"name": "my_function"}}` forces the model to + call that tool. + + `none` is the default when no tools are present. `auto` is the default if tools + are present. + + tools: A list of tools the model may call. Currently, only functions are supported as a + tool. + top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @@ -296,6 +326,8 @@ def create( Optional[completion_create_params.StreamOptions] | NotGiven ) = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, + tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -368,6 +400,19 @@ def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. + tool_choice: Controls which (if any) tool is called by the model. `none` means the model will + not call any tool and instead generates a message. `auto` means the model can + pick between generating a message or calling one or more tools. `required` means + the model must call one or more tools. Specifying a particular tool via + `{"type": "function", "function": {"name": "my_function"}}` forces the model to + call that tool. + + `none` is the default when no tools are present. `auto` is the default if tools + are present. + + tools: A list of tools the model may call. Currently, only functions are supported as a + tool. + top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @@ -414,6 +459,8 @@ def create( Optional[completion_create_params.StreamOptions] | NotGiven ) = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, + tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -454,6 +501,8 @@ def create( "stream": stream, "stream_options": stream_options, "temperature": temperature, + "tool_choice": tool_choice, + "tools": tools, "top_logprobs": top_logprobs, "top_p": top_p, "user": user, @@ -516,6 +565,8 @@ async def create( Optional[completion_create_params.StreamOptions] | NotGiven ) = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, + tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -588,6 +639,19 @@ async def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. + tool_choice: Controls which (if any) tool is called by the model. `none` means the model will + not call any tool and instead generates a message. `auto` means the model can + pick between generating a message or calling one or more tools. `required` means + the model must call one or more tools. Specifying a particular tool via + `{"type": "function", "function": {"name": "my_function"}}` forces the model to + call that tool. + + `none` is the default when no tools are present. `auto` is the default if tools + are present. + + tools: A list of tools the model may call. Currently, only functions are supported as a + tool. + top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @@ -631,6 +695,8 @@ async def create( Optional[completion_create_params.StreamOptions] | NotGiven ) = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, + tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -703,6 +769,19 @@ async def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. + tool_choice: Controls which (if any) tool is called by the model. `none` means the model will + not call any tool and instead generates a message. `auto` means the model can + pick between generating a message or calling one or more tools. `required` means + the model must call one or more tools. Specifying a particular tool via + `{"type": "function", "function": {"name": "my_function"}}` forces the model to + call that tool. + + `none` is the default when no tools are present. `auto` is the default if tools + are present. + + tools: A list of tools the model may call. Currently, only functions are supported as a + tool. + top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @@ -746,6 +825,8 @@ async def create( Optional[completion_create_params.StreamOptions] | NotGiven ) = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, + tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -818,6 +899,19 @@ async def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. + tool_choice: Controls which (if any) tool is called by the model. `none` means the model will + not call any tool and instead generates a message. `auto` means the model can + pick between generating a message or calling one or more tools. `required` means + the model must call one or more tools. Specifying a particular tool via + `{"type": "function", "function": {"name": "my_function"}}` forces the model to + call that tool. + + `none` is the default when no tools are present. `auto` is the default if tools + are present. + + tools: A list of tools the model may call. Currently, only functions are supported as a + tool. + top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @@ -861,6 +955,8 @@ async def create( Optional[completion_create_params.StreamOptions] | NotGiven ) = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, + tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -901,6 +997,8 @@ async def create( "stream": stream, "stream_options": stream_options, "temperature": temperature, + "tool_choice": tool_choice, + "tools": tools, "top_logprobs": top_logprobs, "top_p": top_p, "user": user, diff --git a/src/gradientai/resources/agents/evaluation_metrics/__init__.py b/src/gradientai/resources/agents/evaluation_metrics/__init__.py index 1c0ec1ea..ce687621 100644 --- a/src/gradientai/resources/agents/evaluation_metrics/__init__.py +++ b/src/gradientai/resources/agents/evaluation_metrics/__init__.py @@ -1,5 +1,13 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +from .models import ( + ModelsResource, + AsyncModelsResource, + ModelsResourceWithRawResponse, + AsyncModelsResourceWithRawResponse, + ModelsResourceWithStreamingResponse, + AsyncModelsResourceWithStreamingResponse, +) from .workspaces import ( WorkspacesResource, AsyncWorkspacesResource, @@ -24,6 +32,12 @@ "AsyncWorkspacesResourceWithRawResponse", "WorkspacesResourceWithStreamingResponse", "AsyncWorkspacesResourceWithStreamingResponse", + "ModelsResource", + "AsyncModelsResource", + "ModelsResourceWithRawResponse", + "AsyncModelsResourceWithRawResponse", + "ModelsResourceWithStreamingResponse", + "AsyncModelsResourceWithStreamingResponse", "EvaluationMetricsResource", "AsyncEvaluationMetricsResource", "EvaluationMetricsResourceWithRawResponse", diff --git a/src/gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py b/src/gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py index ce549527..edf708df 100644 --- a/src/gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py +++ b/src/gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py @@ -4,7 +4,16 @@ import httpx +from .models import ( + ModelsResource, + AsyncModelsResource, + ModelsResourceWithRawResponse, + AsyncModelsResourceWithRawResponse, + ModelsResourceWithStreamingResponse, + AsyncModelsResourceWithStreamingResponse, +) from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import maybe_transform, async_maybe_transform from ...._compat import cached_property from ...._resource import SyncAPIResource, AsyncAPIResource from ...._response import ( @@ -14,6 +23,7 @@ async_to_streamed_response_wrapper, ) from ...._base_client import make_request_options +from ....types.agents import evaluation_metric_list_regions_params from .workspaces.workspaces import ( WorkspacesResource, AsyncWorkspacesResource, @@ -23,6 +33,7 @@ AsyncWorkspacesResourceWithStreamingResponse, ) from ....types.agents.evaluation_metric_list_response import EvaluationMetricListResponse +from ....types.agents.evaluation_metric_list_regions_response import EvaluationMetricListRegionsResponse __all__ = ["EvaluationMetricsResource", "AsyncEvaluationMetricsResource"] @@ -32,6 +43,10 @@ class EvaluationMetricsResource(SyncAPIResource): def workspaces(self) -> WorkspacesResource: return WorkspacesResource(self._client) + @cached_property + def models(self) -> ModelsResource: + return ModelsResource(self._client) + @cached_property def with_raw_response(self) -> EvaluationMetricsResourceWithRawResponse: """ @@ -75,12 +90,64 @@ def list( cast_to=EvaluationMetricListResponse, ) + def list_regions( + self, + *, + serves_batch: bool | NotGiven = NOT_GIVEN, + serves_inference: bool | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EvaluationMetricListRegionsResponse: + """ + To list all datacenter regions, send a GET request to `/v2/gen-ai/regions`. + + Args: + serves_batch: Include datacenters that are capable of running batch jobs. + + serves_inference: Include datacenters that serve inference. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/gen-ai/regions" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/regions", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "serves_batch": serves_batch, + "serves_inference": serves_inference, + }, + evaluation_metric_list_regions_params.EvaluationMetricListRegionsParams, + ), + ), + cast_to=EvaluationMetricListRegionsResponse, + ) + class AsyncEvaluationMetricsResource(AsyncAPIResource): @cached_property def workspaces(self) -> AsyncWorkspacesResource: return AsyncWorkspacesResource(self._client) + @cached_property + def models(self) -> AsyncModelsResource: + return AsyncModelsResource(self._client) + @cached_property def with_raw_response(self) -> AsyncEvaluationMetricsResourceWithRawResponse: """ @@ -124,6 +191,54 @@ async def list( cast_to=EvaluationMetricListResponse, ) + async def list_regions( + self, + *, + serves_batch: bool | NotGiven = NOT_GIVEN, + serves_inference: bool | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> EvaluationMetricListRegionsResponse: + """ + To list all datacenter regions, send a GET request to `/v2/gen-ai/regions`. + + Args: + serves_batch: Include datacenters that are capable of running batch jobs. + + serves_inference: Include datacenters that serve inference. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/gen-ai/regions" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/regions", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "serves_batch": serves_batch, + "serves_inference": serves_inference, + }, + evaluation_metric_list_regions_params.EvaluationMetricListRegionsParams, + ), + ), + cast_to=EvaluationMetricListRegionsResponse, + ) + class EvaluationMetricsResourceWithRawResponse: def __init__(self, evaluation_metrics: EvaluationMetricsResource) -> None: @@ -132,11 +247,18 @@ def __init__(self, evaluation_metrics: EvaluationMetricsResource) -> None: self.list = to_raw_response_wrapper( evaluation_metrics.list, ) + self.list_regions = to_raw_response_wrapper( + evaluation_metrics.list_regions, + ) @cached_property def workspaces(self) -> WorkspacesResourceWithRawResponse: return WorkspacesResourceWithRawResponse(self._evaluation_metrics.workspaces) + @cached_property + def models(self) -> ModelsResourceWithRawResponse: + return ModelsResourceWithRawResponse(self._evaluation_metrics.models) + class AsyncEvaluationMetricsResourceWithRawResponse: def __init__(self, evaluation_metrics: AsyncEvaluationMetricsResource) -> None: @@ -145,11 +267,18 @@ def __init__(self, evaluation_metrics: AsyncEvaluationMetricsResource) -> None: self.list = async_to_raw_response_wrapper( evaluation_metrics.list, ) + self.list_regions = async_to_raw_response_wrapper( + evaluation_metrics.list_regions, + ) @cached_property def workspaces(self) -> AsyncWorkspacesResourceWithRawResponse: return AsyncWorkspacesResourceWithRawResponse(self._evaluation_metrics.workspaces) + @cached_property + def models(self) -> AsyncModelsResourceWithRawResponse: + return AsyncModelsResourceWithRawResponse(self._evaluation_metrics.models) + class EvaluationMetricsResourceWithStreamingResponse: def __init__(self, evaluation_metrics: EvaluationMetricsResource) -> None: @@ -158,11 +287,18 @@ def __init__(self, evaluation_metrics: EvaluationMetricsResource) -> None: self.list = to_streamed_response_wrapper( evaluation_metrics.list, ) + self.list_regions = to_streamed_response_wrapper( + evaluation_metrics.list_regions, + ) @cached_property def workspaces(self) -> WorkspacesResourceWithStreamingResponse: return WorkspacesResourceWithStreamingResponse(self._evaluation_metrics.workspaces) + @cached_property + def models(self) -> ModelsResourceWithStreamingResponse: + return ModelsResourceWithStreamingResponse(self._evaluation_metrics.models) + class AsyncEvaluationMetricsResourceWithStreamingResponse: def __init__(self, evaluation_metrics: AsyncEvaluationMetricsResource) -> None: @@ -171,7 +307,14 @@ def __init__(self, evaluation_metrics: AsyncEvaluationMetricsResource) -> None: self.list = async_to_streamed_response_wrapper( evaluation_metrics.list, ) + self.list_regions = async_to_streamed_response_wrapper( + evaluation_metrics.list_regions, + ) @cached_property def workspaces(self) -> AsyncWorkspacesResourceWithStreamingResponse: return AsyncWorkspacesResourceWithStreamingResponse(self._evaluation_metrics.workspaces) + + @cached_property + def models(self) -> AsyncModelsResourceWithStreamingResponse: + return AsyncModelsResourceWithStreamingResponse(self._evaluation_metrics.models) diff --git a/src/gradientai/resources/agents/evaluation_metrics/models.py b/src/gradientai/resources/agents/evaluation_metrics/models.py new file mode 100644 index 00000000..20a44a22 --- /dev/null +++ b/src/gradientai/resources/agents/evaluation_metrics/models.py @@ -0,0 +1,254 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Literal + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.agents.evaluation_metrics import model_list_params +from ....types.agents.evaluation_metrics.model_list_response import ModelListResponse + +__all__ = ["ModelsResource", "AsyncModelsResource"] + + +class ModelsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> ModelsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return ModelsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ModelsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return ModelsResourceWithStreamingResponse(self) + + def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + public_only: bool | NotGiven = NOT_GIVEN, + usecases: List[ + Literal[ + "MODEL_USECASE_UNKNOWN", + "MODEL_USECASE_AGENT", + "MODEL_USECASE_FINETUNED", + "MODEL_USECASE_KNOWLEDGEBASE", + "MODEL_USECASE_GUARDRAIL", + "MODEL_USECASE_REASONING", + "MODEL_USECASE_SERVERLESS", + ] + ] + | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ModelListResponse: + """ + To list all models, send a GET request to `/v2/gen-ai/models`. + + Args: + page: Page number. + + per_page: Items per page. + + public_only: Only include models that are publicly available. + + usecases: Include only models defined for the listed usecases. + + - MODEL_USECASE_UNKNOWN: The use case of the model is unknown + - MODEL_USECASE_AGENT: The model maybe used in an agent + - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning + - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases + (embedding models) + - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails + - MODEL_USECASE_REASONING: The model usecase for reasoning + - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/gen-ai/models" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/models", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + "public_only": public_only, + "usecases": usecases, + }, + model_list_params.ModelListParams, + ), + ), + cast_to=ModelListResponse, + ) + + +class AsyncModelsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncModelsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncModelsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncModelsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncModelsResourceWithStreamingResponse(self) + + async def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + public_only: bool | NotGiven = NOT_GIVEN, + usecases: List[ + Literal[ + "MODEL_USECASE_UNKNOWN", + "MODEL_USECASE_AGENT", + "MODEL_USECASE_FINETUNED", + "MODEL_USECASE_KNOWLEDGEBASE", + "MODEL_USECASE_GUARDRAIL", + "MODEL_USECASE_REASONING", + "MODEL_USECASE_SERVERLESS", + ] + ] + | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ModelListResponse: + """ + To list all models, send a GET request to `/v2/gen-ai/models`. + + Args: + page: Page number. + + per_page: Items per page. + + public_only: Only include models that are publicly available. + + usecases: Include only models defined for the listed usecases. + + - MODEL_USECASE_UNKNOWN: The use case of the model is unknown + - MODEL_USECASE_AGENT: The model maybe used in an agent + - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning + - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases + (embedding models) + - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails + - MODEL_USECASE_REASONING: The model usecase for reasoning + - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/gen-ai/models" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/gen-ai/models", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + "public_only": public_only, + "usecases": usecases, + }, + model_list_params.ModelListParams, + ), + ), + cast_to=ModelListResponse, + ) + + +class ModelsResourceWithRawResponse: + def __init__(self, models: ModelsResource) -> None: + self._models = models + + self.list = to_raw_response_wrapper( + models.list, + ) + + +class AsyncModelsResourceWithRawResponse: + def __init__(self, models: AsyncModelsResource) -> None: + self._models = models + + self.list = async_to_raw_response_wrapper( + models.list, + ) + + +class ModelsResourceWithStreamingResponse: + def __init__(self, models: ModelsResource) -> None: + self._models = models + + self.list = to_streamed_response_wrapper( + models.list, + ) + + +class AsyncModelsResourceWithStreamingResponse: + def __init__(self, models: AsyncModelsResource) -> None: + self._models = models + + self.list = async_to_streamed_response_wrapper( + models.list, + ) diff --git a/src/gradientai/resources/agents/evaluation_metrics/workspaces/agents.py b/src/gradientai/resources/agents/evaluation_metrics/workspaces/agents.py index 1e11739f..a5e68a45 100644 --- a/src/gradientai/resources/agents/evaluation_metrics/workspaces/agents.py +++ b/src/gradientai/resources/agents/evaluation_metrics/workspaces/agents.py @@ -48,7 +48,6 @@ def list( self, workspace_uuid: str, *, - field_mask: agent_list_params.FieldMask | NotGiven = NOT_GIVEN, only_deployed: bool | NotGiven = NOT_GIVEN, page: int | NotGiven = NOT_GIVEN, per_page: int | NotGiven = NOT_GIVEN, @@ -66,9 +65,9 @@ def list( Args: only_deployed: Only list agents that are deployed. - page: page number. + page: Page number. - per_page: items per page. + per_page: Items per page. extra_headers: Send extra headers @@ -91,7 +90,6 @@ def list( timeout=timeout, query=maybe_transform( { - "field_mask": field_mask, "only_deployed": only_deployed, "page": page, "per_page": per_page, @@ -116,10 +114,14 @@ def move( timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> AgentMoveResponse: """ - To move all listed agetns a given workspace, send a PUT request to + To move all listed agents a given workspace, send a PUT request to `/v2/gen-ai/workspaces/{workspace_uuid}/agents`. Args: + agent_uuids: Agent uuids + + body_workspace_uuid: Workspace uuid to move agents to + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -174,7 +176,6 @@ async def list( self, workspace_uuid: str, *, - field_mask: agent_list_params.FieldMask | NotGiven = NOT_GIVEN, only_deployed: bool | NotGiven = NOT_GIVEN, page: int | NotGiven = NOT_GIVEN, per_page: int | NotGiven = NOT_GIVEN, @@ -192,9 +193,9 @@ async def list( Args: only_deployed: Only list agents that are deployed. - page: page number. + page: Page number. - per_page: items per page. + per_page: Items per page. extra_headers: Send extra headers @@ -217,7 +218,6 @@ async def list( timeout=timeout, query=await async_maybe_transform( { - "field_mask": field_mask, "only_deployed": only_deployed, "page": page, "per_page": per_page, @@ -242,10 +242,14 @@ async def move( timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> AgentMoveResponse: """ - To move all listed agetns a given workspace, send a PUT request to + To move all listed agents a given workspace, send a PUT request to `/v2/gen-ai/workspaces/{workspace_uuid}/agents`. Args: + agent_uuids: Agent uuids + + body_workspace_uuid: Workspace uuid to move agents to + extra_headers: Send extra headers extra_query: Add additional query parameters to the request diff --git a/src/gradientai/resources/agents/evaluation_metrics/workspaces/workspaces.py b/src/gradientai/resources/agents/evaluation_metrics/workspaces/workspaces.py index 0f506118..cb213e1d 100644 --- a/src/gradientai/resources/agents/evaluation_metrics/workspaces/workspaces.py +++ b/src/gradientai/resources/agents/evaluation_metrics/workspaces/workspaces.py @@ -81,6 +81,12 @@ def create( response body contains a JSON object with the newly created workspace object. Args: + agent_uuids: Ids of the agents(s) to attach to the workspace + + description: Description of the workspace + + name: Name of the workspace + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -164,6 +170,10 @@ def update( containing the workspace. Args: + description: The new description of the workspace + + name: The new name of the workspace + body_workspace_uuid: Workspace UUID. extra_headers: Send extra headers @@ -333,6 +343,12 @@ async def create( response body contains a JSON object with the newly created workspace object. Args: + agent_uuids: Ids of the agents(s) to attach to the workspace + + description: Description of the workspace + + name: Name of the workspace + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -416,6 +432,10 @@ async def update( containing the workspace. Args: + description: The new description of the workspace + + name: The new name of the workspace + body_workspace_uuid: Workspace UUID. extra_headers: Send extra headers diff --git a/src/gradientai/resources/agents/evaluation_runs.py b/src/gradientai/resources/agents/evaluation_runs.py index 47045132..c5ea2520 100644 --- a/src/gradientai/resources/agents/evaluation_runs.py +++ b/src/gradientai/resources/agents/evaluation_runs.py @@ -17,7 +17,7 @@ async_to_streamed_response_wrapper, ) from ..._base_client import make_request_options -from ...types.agents import evaluation_run_create_params +from ...types.agents import evaluation_run_create_params, evaluation_run_list_results_params from ...types.agents.evaluation_run_create_response import EvaluationRunCreateResponse from ...types.agents.evaluation_run_retrieve_response import EvaluationRunRetrieveResponse from ...types.agents.evaluation_run_list_results_response import EvaluationRunListResultsResponse @@ -68,6 +68,8 @@ def create( run_name: The name of the run. + test_case_uuid: Test-case UUID to run + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -136,6 +138,8 @@ def list_results( self, evaluation_run_uuid: str, *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -148,6 +152,10 @@ def list_results( `/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results`. Args: + page: Page number. + + per_page: Items per page. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -165,7 +173,17 @@ def list_results( if self._client._base_url_overridden else f"https://api.digitalocean.com/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results", options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + evaluation_run_list_results_params.EvaluationRunListResultsParams, + ), ), cast_to=EvaluationRunListResultsResponse, ) @@ -252,6 +270,8 @@ async def create( run_name: The name of the run. + test_case_uuid: Test-case UUID to run + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -320,6 +340,8 @@ async def list_results( self, evaluation_run_uuid: str, *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -332,6 +354,10 @@ async def list_results( `/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results`. Args: + page: Page number. + + per_page: Items per page. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -349,7 +375,17 @@ async def list_results( if self._client._base_url_overridden else f"https://api.digitalocean.com/v2/gen-ai/evaluation_runs/{evaluation_run_uuid}/results", options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + evaluation_run_list_results_params.EvaluationRunListResultsParams, + ), ), cast_to=EvaluationRunListResultsResponse, ) diff --git a/src/gradientai/resources/agents/evaluation_test_cases.py b/src/gradientai/resources/agents/evaluation_test_cases.py index beff8752..e33f9f91 100644 --- a/src/gradientai/resources/agents/evaluation_test_cases.py +++ b/src/gradientai/resources/agents/evaluation_test_cases.py @@ -179,7 +179,7 @@ def update( timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> EvaluationTestCaseUpdateResponse: """ - To update an evaluation test-case send a POST request to + To update an evaluation test-case send a PUT request to `/v2/gen-ai/evaluation_test_cases/{test_case_uuid}`. Args: @@ -189,6 +189,8 @@ def update( name: Name of the test case. + body_test_case_uuid: Test-case UUID to update + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -201,7 +203,7 @@ def update( raise ValueError( f"Expected a non-empty value for `path_test_case_uuid` but received {path_test_case_uuid!r}" ) - return self._post( + return self._put( f"/v2/gen-ai/evaluation_test_cases/{path_test_case_uuid}" if self._client._base_url_overridden else f"https://api.digitalocean.com/v2/gen-ai/evaluation_test_cases/{path_test_case_uuid}", @@ -439,7 +441,7 @@ async def update( timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> EvaluationTestCaseUpdateResponse: """ - To update an evaluation test-case send a POST request to + To update an evaluation test-case send a PUT request to `/v2/gen-ai/evaluation_test_cases/{test_case_uuid}`. Args: @@ -449,6 +451,8 @@ async def update( name: Name of the test case. + body_test_case_uuid: Test-case UUID to update + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -461,7 +465,7 @@ async def update( raise ValueError( f"Expected a non-empty value for `path_test_case_uuid` but received {path_test_case_uuid!r}" ) - return await self._post( + return await self._put( f"/v2/gen-ai/evaluation_test_cases/{path_test_case_uuid}" if self._client._base_url_overridden else f"https://api.digitalocean.com/v2/gen-ai/evaluation_test_cases/{path_test_case_uuid}", diff --git a/src/gradientai/resources/agents/functions.py b/src/gradientai/resources/agents/functions.py index 8c5f3f49..1c5b2015 100644 --- a/src/gradientai/resources/agents/functions.py +++ b/src/gradientai/resources/agents/functions.py @@ -66,6 +66,20 @@ def create( `/v2/gen-ai/agents/{agent_uuid}/functions`. Args: + body_agent_uuid: Agent id + + description: Function description + + faas_name: The name of the function in the DigitalOcean functions platform + + faas_namespace: The namespace of the function in the DigitalOcean functions platform + + function_name: Function name + + input_schema: Describe the input schema for the function so the agent may call it + + output_schema: Describe the output schema for the function so the agent handle its response + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -123,6 +137,22 @@ def update( `/v2/gen-ai/agents/{agent_uuid}/functions/{function_uuid}`. Args: + body_agent_uuid: Agent id + + description: Funciton description + + faas_name: The name of the function in the DigitalOcean functions platform + + faas_namespace: The namespace of the function in the DigitalOcean functions platform + + function_name: Function name + + body_function_uuid: Function id + + input_schema: Describe the input schema for the function so the agent may call it + + output_schema: Describe the output schema for the function so the agent handle its response + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -241,6 +271,20 @@ async def create( `/v2/gen-ai/agents/{agent_uuid}/functions`. Args: + body_agent_uuid: Agent id + + description: Function description + + faas_name: The name of the function in the DigitalOcean functions platform + + faas_namespace: The namespace of the function in the DigitalOcean functions platform + + function_name: Function name + + input_schema: Describe the input schema for the function so the agent may call it + + output_schema: Describe the output schema for the function so the agent handle its response + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -298,6 +342,22 @@ async def update( `/v2/gen-ai/agents/{agent_uuid}/functions/{function_uuid}`. Args: + body_agent_uuid: Agent id + + description: Funciton description + + faas_name: The name of the function in the DigitalOcean functions platform + + faas_namespace: The namespace of the function in the DigitalOcean functions platform + + function_name: Function name + + body_function_uuid: Function id + + input_schema: Describe the input schema for the function so the agent may call it + + output_schema: Describe the output schema for the function so the agent handle its response + extra_headers: Send extra headers extra_query: Add additional query parameters to the request diff --git a/src/gradientai/resources/agents/routes.py b/src/gradientai/resources/agents/routes.py index ed25d795..a7a298f2 100644 --- a/src/gradientai/resources/agents/routes.py +++ b/src/gradientai/resources/agents/routes.py @@ -66,8 +66,16 @@ def update( `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`. Args: + body_child_agent_uuid: Routed agent id + + if_case: Describes the case in which the child agent should be used + body_parent_agent_uuid: A unique identifier for the parent agent. + route_name: Route name + + uuid: Unique id of linkage + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -164,8 +172,12 @@ def add( `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`. Args: + body_child_agent_uuid: Routed agent id + body_parent_agent_uuid: A unique identifier for the parent agent. + route_name: Name of route + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -280,8 +292,16 @@ async def update( `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`. Args: + body_child_agent_uuid: Routed agent id + + if_case: Describes the case in which the child agent should be used + body_parent_agent_uuid: A unique identifier for the parent agent. + route_name: Route name + + uuid: Unique id of linkage + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -378,8 +398,12 @@ async def add( `/v2/gen-ai/agents/{parent_agent_uuid}/child_agents/{child_agent_uuid}`. Args: + body_child_agent_uuid: Routed agent id + body_parent_agent_uuid: A unique identifier for the parent agent. + route_name: Name of route + extra_headers: Send extra headers extra_query: Add additional query parameters to the request diff --git a/src/gradientai/resources/agents/versions.py b/src/gradientai/resources/agents/versions.py index 65a35472..77eabea9 100644 --- a/src/gradientai/resources/agents/versions.py +++ b/src/gradientai/resources/agents/versions.py @@ -60,6 +60,10 @@ def update( `/v2/gen-ai/agents/{uuid}/versions`. Args: + body_uuid: Agent unique identifier + + version_hash: Unique identifier + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -105,9 +109,9 @@ def list( `/v2/gen-ai/agents/{uuid}/versions`. Args: - page: page number. + page: Page number. - per_page: items per page. + per_page: Items per page. extra_headers: Send extra headers @@ -178,6 +182,10 @@ async def update( `/v2/gen-ai/agents/{uuid}/versions`. Args: + body_uuid: Agent unique identifier + + version_hash: Unique identifier + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -223,9 +231,9 @@ async def list( `/v2/gen-ai/agents/{uuid}/versions`. Args: - page: page number. + page: Page number. - per_page: items per page. + per_page: Items per page. extra_headers: Send extra headers diff --git a/src/gradientai/resources/chat/completions.py b/src/gradientai/resources/chat/completions.py index ec351ea1..ff5c25b8 100644 --- a/src/gradientai/resources/chat/completions.py +++ b/src/gradientai/resources/chat/completions.py @@ -64,6 +64,8 @@ def create( stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, + tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -136,6 +138,19 @@ def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. + tool_choice: Controls which (if any) tool is called by the model. `none` means the model will + not call any tool and instead generates a message. `auto` means the model can + pick between generating a message or calling one or more tools. `required` means + the model must call one or more tools. Specifying a particular tool via + `{"type": "function", "function": {"name": "my_function"}}` forces the model to + call that tool. + + `none` is the default when no tools are present. `auto` is the default if tools + are present. + + tools: A list of tools the model may call. Currently, only functions are supported as a + tool. + top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @@ -178,6 +193,8 @@ def create( stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, + tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -250,6 +267,19 @@ def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. + tool_choice: Controls which (if any) tool is called by the model. `none` means the model will + not call any tool and instead generates a message. `auto` means the model can + pick between generating a message or calling one or more tools. `required` means + the model must call one or more tools. Specifying a particular tool via + `{"type": "function", "function": {"name": "my_function"}}` forces the model to + call that tool. + + `none` is the default when no tools are present. `auto` is the default if tools + are present. + + tools: A list of tools the model may call. Currently, only functions are supported as a + tool. + top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @@ -291,6 +321,8 @@ def create( stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, + tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -363,6 +395,19 @@ def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. + tool_choice: Controls which (if any) tool is called by the model. `none` means the model will + not call any tool and instead generates a message. `auto` means the model can + pick between generating a message or calling one or more tools. `required` means + the model must call one or more tools. Specifying a particular tool via + `{"type": "function", "function": {"name": "my_function"}}` forces the model to + call that tool. + + `none` is the default when no tools are present. `auto` is the default if tools + are present. + + tools: A list of tools the model may call. Currently, only functions are supported as a + tool. + top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @@ -404,6 +449,8 @@ def create( stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, + tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -442,6 +489,8 @@ def create( "stream": stream, "stream_options": stream_options, "temperature": temperature, + "tool_choice": tool_choice, + "tools": tools, "top_logprobs": top_logprobs, "top_p": top_p, "user": user, @@ -497,6 +546,8 @@ async def create( stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, + tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -569,6 +620,19 @@ async def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. + tool_choice: Controls which (if any) tool is called by the model. `none` means the model will + not call any tool and instead generates a message. `auto` means the model can + pick between generating a message or calling one or more tools. `required` means + the model must call one or more tools. Specifying a particular tool via + `{"type": "function", "function": {"name": "my_function"}}` forces the model to + call that tool. + + `none` is the default when no tools are present. `auto` is the default if tools + are present. + + tools: A list of tools the model may call. Currently, only functions are supported as a + tool. + top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @@ -611,6 +675,8 @@ async def create( stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, + tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -683,6 +749,19 @@ async def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. + tool_choice: Controls which (if any) tool is called by the model. `none` means the model will + not call any tool and instead generates a message. `auto` means the model can + pick between generating a message or calling one or more tools. `required` means + the model must call one or more tools. Specifying a particular tool via + `{"type": "function", "function": {"name": "my_function"}}` forces the model to + call that tool. + + `none` is the default when no tools are present. `auto` is the default if tools + are present. + + tools: A list of tools the model may call. Currently, only functions are supported as a + tool. + top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @@ -724,6 +803,8 @@ async def create( stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, + tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -796,6 +877,19 @@ async def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. + tool_choice: Controls which (if any) tool is called by the model. `none` means the model will + not call any tool and instead generates a message. `auto` means the model can + pick between generating a message or calling one or more tools. `required` means + the model must call one or more tools. Specifying a particular tool via + `{"type": "function", "function": {"name": "my_function"}}` forces the model to + call that tool. + + `none` is the default when no tools are present. `auto` is the default if tools + are present. + + tools: A list of tools the model may call. Currently, only functions are supported as a + tool. + top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. @@ -837,6 +931,8 @@ async def create( stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream_options: Optional[completion_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, + tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -875,6 +971,8 @@ async def create( "stream": stream, "stream_options": stream_options, "temperature": temperature, + "tool_choice": tool_choice, + "tools": tools, "top_logprobs": top_logprobs, "top_p": top_p, "user": user, diff --git a/src/gradientai/resources/gpu_droplets/__init__.py b/src/gradientai/resources/gpu_droplets/__init__.py new file mode 100644 index 00000000..064a36ce --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/__init__.py @@ -0,0 +1,187 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .sizes import ( + SizesResource, + AsyncSizesResource, + SizesResourceWithRawResponse, + AsyncSizesResourceWithRawResponse, + SizesResourceWithStreamingResponse, + AsyncSizesResourceWithStreamingResponse, +) +from .images import ( + ImagesResource, + AsyncImagesResource, + ImagesResourceWithRawResponse, + AsyncImagesResourceWithRawResponse, + ImagesResourceWithStreamingResponse, + AsyncImagesResourceWithStreamingResponse, +) +from .account import ( + AccountResource, + AsyncAccountResource, + AccountResourceWithRawResponse, + AsyncAccountResourceWithRawResponse, + AccountResourceWithStreamingResponse, + AsyncAccountResourceWithStreamingResponse, +) +from .actions import ( + ActionsResource, + AsyncActionsResource, + ActionsResourceWithRawResponse, + AsyncActionsResourceWithRawResponse, + ActionsResourceWithStreamingResponse, + AsyncActionsResourceWithStreamingResponse, +) +from .backups import ( + BackupsResource, + AsyncBackupsResource, + BackupsResourceWithRawResponse, + AsyncBackupsResourceWithRawResponse, + BackupsResourceWithStreamingResponse, + AsyncBackupsResourceWithStreamingResponse, +) +from .volumes import ( + VolumesResource, + AsyncVolumesResource, + VolumesResourceWithRawResponse, + AsyncVolumesResourceWithRawResponse, + VolumesResourceWithStreamingResponse, + AsyncVolumesResourceWithStreamingResponse, +) +from .autoscale import ( + AutoscaleResource, + AsyncAutoscaleResource, + AutoscaleResourceWithRawResponse, + AsyncAutoscaleResourceWithRawResponse, + AutoscaleResourceWithStreamingResponse, + AsyncAutoscaleResourceWithStreamingResponse, +) +from .firewalls import ( + FirewallsResource, + AsyncFirewallsResource, + FirewallsResourceWithRawResponse, + AsyncFirewallsResourceWithRawResponse, + FirewallsResourceWithStreamingResponse, + AsyncFirewallsResourceWithStreamingResponse, +) +from .snapshots import ( + SnapshotsResource, + AsyncSnapshotsResource, + SnapshotsResourceWithRawResponse, + AsyncSnapshotsResourceWithRawResponse, + SnapshotsResourceWithStreamingResponse, + AsyncSnapshotsResourceWithStreamingResponse, +) +from .floating_ips import ( + FloatingIPsResource, + AsyncFloatingIPsResource, + FloatingIPsResourceWithRawResponse, + AsyncFloatingIPsResourceWithRawResponse, + FloatingIPsResourceWithStreamingResponse, + AsyncFloatingIPsResourceWithStreamingResponse, +) +from .gpu_droplets import ( + GPUDropletsResource, + AsyncGPUDropletsResource, + GPUDropletsResourceWithRawResponse, + AsyncGPUDropletsResourceWithRawResponse, + GPUDropletsResourceWithStreamingResponse, + AsyncGPUDropletsResourceWithStreamingResponse, +) +from .load_balancers import ( + LoadBalancersResource, + AsyncLoadBalancersResource, + LoadBalancersResourceWithRawResponse, + AsyncLoadBalancersResourceWithRawResponse, + LoadBalancersResourceWithStreamingResponse, + AsyncLoadBalancersResourceWithStreamingResponse, +) +from .destroy_with_associated_resources import ( + DestroyWithAssociatedResourcesResource, + AsyncDestroyWithAssociatedResourcesResource, + DestroyWithAssociatedResourcesResourceWithRawResponse, + AsyncDestroyWithAssociatedResourcesResourceWithRawResponse, + DestroyWithAssociatedResourcesResourceWithStreamingResponse, + AsyncDestroyWithAssociatedResourcesResourceWithStreamingResponse, +) + +__all__ = [ + "BackupsResource", + "AsyncBackupsResource", + "BackupsResourceWithRawResponse", + "AsyncBackupsResourceWithRawResponse", + "BackupsResourceWithStreamingResponse", + "AsyncBackupsResourceWithStreamingResponse", + "ActionsResource", + "AsyncActionsResource", + "ActionsResourceWithRawResponse", + "AsyncActionsResourceWithRawResponse", + "ActionsResourceWithStreamingResponse", + "AsyncActionsResourceWithStreamingResponse", + "DestroyWithAssociatedResourcesResource", + "AsyncDestroyWithAssociatedResourcesResource", + "DestroyWithAssociatedResourcesResourceWithRawResponse", + "AsyncDestroyWithAssociatedResourcesResourceWithRawResponse", + "DestroyWithAssociatedResourcesResourceWithStreamingResponse", + "AsyncDestroyWithAssociatedResourcesResourceWithStreamingResponse", + "AutoscaleResource", + "AsyncAutoscaleResource", + "AutoscaleResourceWithRawResponse", + "AsyncAutoscaleResourceWithRawResponse", + "AutoscaleResourceWithStreamingResponse", + "AsyncAutoscaleResourceWithStreamingResponse", + "FirewallsResource", + "AsyncFirewallsResource", + "FirewallsResourceWithRawResponse", + "AsyncFirewallsResourceWithRawResponse", + "FirewallsResourceWithStreamingResponse", + "AsyncFirewallsResourceWithStreamingResponse", + "FloatingIPsResource", + "AsyncFloatingIPsResource", + "FloatingIPsResourceWithRawResponse", + "AsyncFloatingIPsResourceWithRawResponse", + "FloatingIPsResourceWithStreamingResponse", + "AsyncFloatingIPsResourceWithStreamingResponse", + "ImagesResource", + "AsyncImagesResource", + "ImagesResourceWithRawResponse", + "AsyncImagesResourceWithRawResponse", + "ImagesResourceWithStreamingResponse", + "AsyncImagesResourceWithStreamingResponse", + "LoadBalancersResource", + "AsyncLoadBalancersResource", + "LoadBalancersResourceWithRawResponse", + "AsyncLoadBalancersResourceWithRawResponse", + "LoadBalancersResourceWithStreamingResponse", + "AsyncLoadBalancersResourceWithStreamingResponse", + "SizesResource", + "AsyncSizesResource", + "SizesResourceWithRawResponse", + "AsyncSizesResourceWithRawResponse", + "SizesResourceWithStreamingResponse", + "AsyncSizesResourceWithStreamingResponse", + "SnapshotsResource", + "AsyncSnapshotsResource", + "SnapshotsResourceWithRawResponse", + "AsyncSnapshotsResourceWithRawResponse", + "SnapshotsResourceWithStreamingResponse", + "AsyncSnapshotsResourceWithStreamingResponse", + "VolumesResource", + "AsyncVolumesResource", + "VolumesResourceWithRawResponse", + "AsyncVolumesResourceWithRawResponse", + "VolumesResourceWithStreamingResponse", + "AsyncVolumesResourceWithStreamingResponse", + "AccountResource", + "AsyncAccountResource", + "AccountResourceWithRawResponse", + "AsyncAccountResourceWithRawResponse", + "AccountResourceWithStreamingResponse", + "AsyncAccountResourceWithStreamingResponse", + "GPUDropletsResource", + "AsyncGPUDropletsResource", + "GPUDropletsResourceWithRawResponse", + "AsyncGPUDropletsResourceWithRawResponse", + "GPUDropletsResourceWithStreamingResponse", + "AsyncGPUDropletsResourceWithStreamingResponse", +] diff --git a/src/gradientai/resources/gpu_droplets/account/__init__.py b/src/gradientai/resources/gpu_droplets/account/__init__.py new file mode 100644 index 00000000..33286c3f --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/account/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .keys import ( + KeysResource, + AsyncKeysResource, + KeysResourceWithRawResponse, + AsyncKeysResourceWithRawResponse, + KeysResourceWithStreamingResponse, + AsyncKeysResourceWithStreamingResponse, +) +from .account import ( + AccountResource, + AsyncAccountResource, + AccountResourceWithRawResponse, + AsyncAccountResourceWithRawResponse, + AccountResourceWithStreamingResponse, + AsyncAccountResourceWithStreamingResponse, +) + +__all__ = [ + "KeysResource", + "AsyncKeysResource", + "KeysResourceWithRawResponse", + "AsyncKeysResourceWithRawResponse", + "KeysResourceWithStreamingResponse", + "AsyncKeysResourceWithStreamingResponse", + "AccountResource", + "AsyncAccountResource", + "AccountResourceWithRawResponse", + "AsyncAccountResourceWithRawResponse", + "AccountResourceWithStreamingResponse", + "AsyncAccountResourceWithStreamingResponse", +] diff --git a/src/gradientai/resources/gpu_droplets/account/account.py b/src/gradientai/resources/gpu_droplets/account/account.py new file mode 100644 index 00000000..d61fb68b --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/account/account.py @@ -0,0 +1,102 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .keys import ( + KeysResource, + AsyncKeysResource, + KeysResourceWithRawResponse, + AsyncKeysResourceWithRawResponse, + KeysResourceWithStreamingResponse, + AsyncKeysResourceWithStreamingResponse, +) +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource + +__all__ = ["AccountResource", "AsyncAccountResource"] + + +class AccountResource(SyncAPIResource): + @cached_property + def keys(self) -> KeysResource: + return KeysResource(self._client) + + @cached_property + def with_raw_response(self) -> AccountResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AccountResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AccountResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AccountResourceWithStreamingResponse(self) + + +class AsyncAccountResource(AsyncAPIResource): + @cached_property + def keys(self) -> AsyncKeysResource: + return AsyncKeysResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncAccountResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncAccountResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncAccountResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncAccountResourceWithStreamingResponse(self) + + +class AccountResourceWithRawResponse: + def __init__(self, account: AccountResource) -> None: + self._account = account + + @cached_property + def keys(self) -> KeysResourceWithRawResponse: + return KeysResourceWithRawResponse(self._account.keys) + + +class AsyncAccountResourceWithRawResponse: + def __init__(self, account: AsyncAccountResource) -> None: + self._account = account + + @cached_property + def keys(self) -> AsyncKeysResourceWithRawResponse: + return AsyncKeysResourceWithRawResponse(self._account.keys) + + +class AccountResourceWithStreamingResponse: + def __init__(self, account: AccountResource) -> None: + self._account = account + + @cached_property + def keys(self) -> KeysResourceWithStreamingResponse: + return KeysResourceWithStreamingResponse(self._account.keys) + + +class AsyncAccountResourceWithStreamingResponse: + def __init__(self, account: AsyncAccountResource) -> None: + self._account = account + + @cached_property + def keys(self) -> AsyncKeysResourceWithStreamingResponse: + return AsyncKeysResourceWithStreamingResponse(self._account.keys) diff --git a/src/gradientai/resources/gpu_droplets/account/keys.py b/src/gradientai/resources/gpu_droplets/account/keys.py new file mode 100644 index 00000000..66d3bd55 --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/account/keys.py @@ -0,0 +1,588 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.gpu_droplets.account import key_list_params, key_create_params, key_update_params +from ....types.gpu_droplets.account.key_list_response import KeyListResponse +from ....types.gpu_droplets.account.key_create_response import KeyCreateResponse +from ....types.gpu_droplets.account.key_update_response import KeyUpdateResponse +from ....types.gpu_droplets.account.key_retrieve_response import KeyRetrieveResponse + +__all__ = ["KeysResource", "AsyncKeysResource"] + + +class KeysResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> KeysResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return KeysResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> KeysResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return KeysResourceWithStreamingResponse(self) + + def create( + self, + *, + name: str, + public_key: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyCreateResponse: + """ + To add a new SSH public key to your DigitalOcean account, send a POST request to + `/v2/account/keys`. Set the `name` attribute to the name you wish to use and the + `public_key` attribute to the full public key you are adding. + + Args: + name: A human-readable display name for this key, used to easily identify the SSH keys + when they are displayed. + + public_key: The entire public key string that was uploaded. Embedded into the root user's + `authorized_keys` file if you include this key during Droplet creation. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v2/account/keys" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/account/keys", + body=maybe_transform( + { + "name": name, + "public_key": public_key, + }, + key_create_params.KeyCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyCreateResponse, + ) + + def retrieve( + self, + ssh_key_identifier: Union[int, str], + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyRetrieveResponse: + """ + To get information about a key, send a GET request to `/v2/account/keys/$KEY_ID` + or `/v2/account/keys/$KEY_FINGERPRINT`. The response will be a JSON object with + the key `ssh_key` and value an ssh_key object which contains the standard + ssh_key attributes. + + Args: + ssh_key_identifier: A unique identification number for this key. Can be used to embed a specific SSH + key into a Droplet. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + f"/v2/account/keys/{ssh_key_identifier}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/account/keys/{ssh_key_identifier}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyRetrieveResponse, + ) + + def update( + self, + ssh_key_identifier: Union[int, str], + *, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyUpdateResponse: + """ + To update the name of an SSH key, send a PUT request to either + `/v2/account/keys/$SSH_KEY_ID` or `/v2/account/keys/$SSH_KEY_FINGERPRINT`. Set + the `name` attribute to the new name you want to use. + + Args: + ssh_key_identifier: A unique identification number for this key. Can be used to embed a specific SSH + key into a Droplet. + + name: A human-readable display name for this key, used to easily identify the SSH keys + when they are displayed. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._put( + f"/v2/account/keys/{ssh_key_identifier}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/account/keys/{ssh_key_identifier}", + body=maybe_transform({"name": name}, key_update_params.KeyUpdateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyUpdateResponse, + ) + + def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyListResponse: + """ + To list all of the keys in your account, send a GET request to + `/v2/account/keys`. The response will be a JSON object with a key set to + `ssh_keys`. The value of this will be an array of ssh_key objects, each of which + contains the standard ssh_key attributes. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/account/keys" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/account/keys", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + key_list_params.KeyListParams, + ), + ), + cast_to=KeyListResponse, + ) + + def delete( + self, + ssh_key_identifier: Union[int, str], + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To destroy a public SSH key that you have in your account, send a DELETE request + to `/v2/account/keys/$KEY_ID` or `/v2/account/keys/$KEY_FINGERPRINT`. A 204 + status will be returned, indicating that the action was successful and that the + response body is empty. + + Args: + ssh_key_identifier: A unique identification number for this key. Can be used to embed a specific SSH + key into a Droplet. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._delete( + f"/v2/account/keys/{ssh_key_identifier}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/account/keys/{ssh_key_identifier}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class AsyncKeysResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncKeysResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncKeysResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncKeysResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncKeysResourceWithStreamingResponse(self) + + async def create( + self, + *, + name: str, + public_key: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyCreateResponse: + """ + To add a new SSH public key to your DigitalOcean account, send a POST request to + `/v2/account/keys`. Set the `name` attribute to the name you wish to use and the + `public_key` attribute to the full public key you are adding. + + Args: + name: A human-readable display name for this key, used to easily identify the SSH keys + when they are displayed. + + public_key: The entire public key string that was uploaded. Embedded into the root user's + `authorized_keys` file if you include this key during Droplet creation. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v2/account/keys" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/account/keys", + body=await async_maybe_transform( + { + "name": name, + "public_key": public_key, + }, + key_create_params.KeyCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyCreateResponse, + ) + + async def retrieve( + self, + ssh_key_identifier: Union[int, str], + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyRetrieveResponse: + """ + To get information about a key, send a GET request to `/v2/account/keys/$KEY_ID` + or `/v2/account/keys/$KEY_FINGERPRINT`. The response will be a JSON object with + the key `ssh_key` and value an ssh_key object which contains the standard + ssh_key attributes. + + Args: + ssh_key_identifier: A unique identification number for this key. Can be used to embed a specific SSH + key into a Droplet. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + f"/v2/account/keys/{ssh_key_identifier}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/account/keys/{ssh_key_identifier}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyRetrieveResponse, + ) + + async def update( + self, + ssh_key_identifier: Union[int, str], + *, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyUpdateResponse: + """ + To update the name of an SSH key, send a PUT request to either + `/v2/account/keys/$SSH_KEY_ID` or `/v2/account/keys/$SSH_KEY_FINGERPRINT`. Set + the `name` attribute to the new name you want to use. + + Args: + ssh_key_identifier: A unique identification number for this key. Can be used to embed a specific SSH + key into a Droplet. + + name: A human-readable display name for this key, used to easily identify the SSH keys + when they are displayed. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._put( + f"/v2/account/keys/{ssh_key_identifier}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/account/keys/{ssh_key_identifier}", + body=await async_maybe_transform({"name": name}, key_update_params.KeyUpdateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=KeyUpdateResponse, + ) + + async def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> KeyListResponse: + """ + To list all of the keys in your account, send a GET request to + `/v2/account/keys`. The response will be a JSON object with a key set to + `ssh_keys`. The value of this will be an array of ssh_key objects, each of which + contains the standard ssh_key attributes. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/account/keys" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/account/keys", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + key_list_params.KeyListParams, + ), + ), + cast_to=KeyListResponse, + ) + + async def delete( + self, + ssh_key_identifier: Union[int, str], + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To destroy a public SSH key that you have in your account, send a DELETE request + to `/v2/account/keys/$KEY_ID` or `/v2/account/keys/$KEY_FINGERPRINT`. A 204 + status will be returned, indicating that the action was successful and that the + response body is empty. + + Args: + ssh_key_identifier: A unique identification number for this key. Can be used to embed a specific SSH + key into a Droplet. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._delete( + f"/v2/account/keys/{ssh_key_identifier}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/account/keys/{ssh_key_identifier}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class KeysResourceWithRawResponse: + def __init__(self, keys: KeysResource) -> None: + self._keys = keys + + self.create = to_raw_response_wrapper( + keys.create, + ) + self.retrieve = to_raw_response_wrapper( + keys.retrieve, + ) + self.update = to_raw_response_wrapper( + keys.update, + ) + self.list = to_raw_response_wrapper( + keys.list, + ) + self.delete = to_raw_response_wrapper( + keys.delete, + ) + + +class AsyncKeysResourceWithRawResponse: + def __init__(self, keys: AsyncKeysResource) -> None: + self._keys = keys + + self.create = async_to_raw_response_wrapper( + keys.create, + ) + self.retrieve = async_to_raw_response_wrapper( + keys.retrieve, + ) + self.update = async_to_raw_response_wrapper( + keys.update, + ) + self.list = async_to_raw_response_wrapper( + keys.list, + ) + self.delete = async_to_raw_response_wrapper( + keys.delete, + ) + + +class KeysResourceWithStreamingResponse: + def __init__(self, keys: KeysResource) -> None: + self._keys = keys + + self.create = to_streamed_response_wrapper( + keys.create, + ) + self.retrieve = to_streamed_response_wrapper( + keys.retrieve, + ) + self.update = to_streamed_response_wrapper( + keys.update, + ) + self.list = to_streamed_response_wrapper( + keys.list, + ) + self.delete = to_streamed_response_wrapper( + keys.delete, + ) + + +class AsyncKeysResourceWithStreamingResponse: + def __init__(self, keys: AsyncKeysResource) -> None: + self._keys = keys + + self.create = async_to_streamed_response_wrapper( + keys.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + keys.retrieve, + ) + self.update = async_to_streamed_response_wrapper( + keys.update, + ) + self.list = async_to_streamed_response_wrapper( + keys.list, + ) + self.delete = async_to_streamed_response_wrapper( + keys.delete, + ) diff --git a/src/gradientai/resources/gpu_droplets/actions.py b/src/gradientai/resources/gpu_droplets/actions.py new file mode 100644 index 00000000..197b2ce7 --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/actions.py @@ -0,0 +1,2048 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal, overload + +import httpx + +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import required_args, maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.gpu_droplets import action_list_params, action_initiate_params, action_bulk_initiate_params +from ...types.droplet_backup_policy_param import DropletBackupPolicyParam +from ...types.gpu_droplets.action_list_response import ActionListResponse +from ...types.gpu_droplets.action_initiate_response import ActionInitiateResponse +from ...types.gpu_droplets.action_retrieve_response import ActionRetrieveResponse +from ...types.gpu_droplets.action_bulk_initiate_response import ActionBulkInitiateResponse + +__all__ = ["ActionsResource", "AsyncActionsResource"] + + +class ActionsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> ActionsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return ActionsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ActionsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return ActionsResourceWithStreamingResponse(self) + + def retrieve( + self, + action_id: int, + *, + droplet_id: int, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionRetrieveResponse: + """ + To retrieve a Droplet action, send a GET request to + `/v2/droplets/$DROPLET_ID/actions/$ACTION_ID`. + + The response will be a JSON object with a key called `action`. The value will be + a Droplet action object. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + f"/v2/droplets/{droplet_id}/actions/{action_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/actions/{action_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ActionRetrieveResponse, + ) + + def list( + self, + droplet_id: int, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionListResponse: + """ + To retrieve a list of all actions that have been executed for a Droplet, send a + GET request to `/v2/droplets/$DROPLET_ID/actions`. + + The results will be returned as a JSON object with an `actions` key. This will + be set to an array filled with `action` objects containing the standard `action` + attributes. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + f"/v2/droplets/{droplet_id}/actions" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/actions", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + action_list_params.ActionListParams, + ), + ), + cast_to=ActionListResponse, + ) + + @overload + def bulk_initiate( + self, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + tag_name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionBulkInitiateResponse: + """Some actions can be performed in bulk on tagged Droplets. + + The actions can be + initiated by sending a POST to `/v2/droplets/actions?tag_name=$TAG_NAME` with + the action arguments. + + Only a sub-set of action types are supported: + + - `power_cycle` + - `power_on` + - `power_off` + - `shutdown` + - `enable_ipv6` + - `enable_backups` + - `disable_backups` + - `snapshot` (also requires `image:create` permission) + + Args: + type: The type of action to initiate for the Droplet. + + tag_name: Used to filter Droplets by a specific tag. Can not be combined with `name` or + `type`. Requires `tag:read` scope. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def bulk_initiate( + self, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + tag_name: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionBulkInitiateResponse: + """Some actions can be performed in bulk on tagged Droplets. + + The actions can be + initiated by sending a POST to `/v2/droplets/actions?tag_name=$TAG_NAME` with + the action arguments. + + Only a sub-set of action types are supported: + + - `power_cycle` + - `power_on` + - `power_off` + - `shutdown` + - `enable_ipv6` + - `enable_backups` + - `disable_backups` + - `snapshot` (also requires `image:create` permission) + + Args: + type: The type of action to initiate for the Droplet. + + tag_name: Used to filter Droplets by a specific tag. Can not be combined with `name` or + `type`. Requires `tag:read` scope. + + name: The name to give the new snapshot of the Droplet. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["type"]) + def bulk_initiate( + self, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + tag_name: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionBulkInitiateResponse: + return self._post( + "/v2/droplets/actions" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/droplets/actions", + body=maybe_transform( + { + "type": type, + "name": name, + }, + action_bulk_initiate_params.ActionBulkInitiateParams, + ), + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform({"tag_name": tag_name}, action_bulk_initiate_params.ActionBulkInitiateParams), + ), + cast_to=ActionBulkInitiateResponse, + ) + + @overload + def initiate( + self, + droplet_id: int, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateResponse: + """ + To initiate an action on a Droplet send a POST request to + `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the + `type` attribute to on of the supported action types: + + | Action | Details | Additionally Required Permission | + | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | + | `enable_backups` | Enables backups for a Droplet | | + | `disable_backups` | Disables backups for a Droplet | | + | `change_backup_policy` | Update the backup policy for a Droplet | | + | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | + | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | + | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | + | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | + | `power_on` | Powers on a Droplet. | | + | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | + | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | + | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | + | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | + | `rename` | Renames a Droplet. | | + | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | + | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | + | `snapshot` | Takes a snapshot of a Droplet. | image:create | + + Args: + type: The type of action to initiate for the Droplet. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def initiate( + self, + droplet_id: int, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + backup_policy: DropletBackupPolicyParam | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateResponse: + """ + To initiate an action on a Droplet send a POST request to + `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the + `type` attribute to on of the supported action types: + + | Action | Details | Additionally Required Permission | + | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | + | `enable_backups` | Enables backups for a Droplet | | + | `disable_backups` | Disables backups for a Droplet | | + | `change_backup_policy` | Update the backup policy for a Droplet | | + | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | + | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | + | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | + | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | + | `power_on` | Powers on a Droplet. | | + | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | + | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | + | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | + | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | + | `rename` | Renames a Droplet. | | + | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | + | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | + | `snapshot` | Takes a snapshot of a Droplet. | image:create | + + Args: + type: The type of action to initiate for the Droplet. + + backup_policy: An object specifying the backup policy for the Droplet. If omitted, the backup + plan will default to daily. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def initiate( + self, + droplet_id: int, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + backup_policy: DropletBackupPolicyParam | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateResponse: + """ + To initiate an action on a Droplet send a POST request to + `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the + `type` attribute to on of the supported action types: + + | Action | Details | Additionally Required Permission | + | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | + | `enable_backups` | Enables backups for a Droplet | | + | `disable_backups` | Disables backups for a Droplet | | + | `change_backup_policy` | Update the backup policy for a Droplet | | + | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | + | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | + | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | + | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | + | `power_on` | Powers on a Droplet. | | + | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | + | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | + | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | + | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | + | `rename` | Renames a Droplet. | | + | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | + | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | + | `snapshot` | Takes a snapshot of a Droplet. | image:create | + + Args: + type: The type of action to initiate for the Droplet. + + backup_policy: An object specifying the backup policy for the Droplet. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def initiate( + self, + droplet_id: int, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + image: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateResponse: + """ + To initiate an action on a Droplet send a POST request to + `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the + `type` attribute to on of the supported action types: + + | Action | Details | Additionally Required Permission | + | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | + | `enable_backups` | Enables backups for a Droplet | | + | `disable_backups` | Disables backups for a Droplet | | + | `change_backup_policy` | Update the backup policy for a Droplet | | + | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | + | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | + | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | + | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | + | `power_on` | Powers on a Droplet. | | + | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | + | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | + | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | + | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | + | `rename` | Renames a Droplet. | | + | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | + | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | + | `snapshot` | Takes a snapshot of a Droplet. | image:create | + + Args: + type: The type of action to initiate for the Droplet. + + image: The ID of a backup of the current Droplet instance to restore from. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def initiate( + self, + droplet_id: int, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + disk: bool | NotGiven = NOT_GIVEN, + size: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateResponse: + """ + To initiate an action on a Droplet send a POST request to + `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the + `type` attribute to on of the supported action types: + + | Action | Details | Additionally Required Permission | + | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | + | `enable_backups` | Enables backups for a Droplet | | + | `disable_backups` | Disables backups for a Droplet | | + | `change_backup_policy` | Update the backup policy for a Droplet | | + | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | + | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | + | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | + | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | + | `power_on` | Powers on a Droplet. | | + | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | + | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | + | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | + | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | + | `rename` | Renames a Droplet. | | + | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | + | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | + | `snapshot` | Takes a snapshot of a Droplet. | image:create | + + Args: + type: The type of action to initiate for the Droplet. + + disk: When `true`, the Droplet's disk will be resized in addition to its RAM and CPU. + This is a permanent change and cannot be reversed as a Droplet's disk size + cannot be decreased. + + size: The slug identifier for the size to which you wish to resize the Droplet. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def initiate( + self, + droplet_id: int, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + image: Union[str, int] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateResponse: + """ + To initiate an action on a Droplet send a POST request to + `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the + `type` attribute to on of the supported action types: + + | Action | Details | Additionally Required Permission | + | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | + | `enable_backups` | Enables backups for a Droplet | | + | `disable_backups` | Disables backups for a Droplet | | + | `change_backup_policy` | Update the backup policy for a Droplet | | + | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | + | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | + | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | + | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | + | `power_on` | Powers on a Droplet. | | + | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | + | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | + | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | + | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | + | `rename` | Renames a Droplet. | | + | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | + | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | + | `snapshot` | Takes a snapshot of a Droplet. | image:create | + + Args: + type: The type of action to initiate for the Droplet. + + image: The image ID of a public or private image or the slug identifier for a public + image. The Droplet will be rebuilt using this image as its base. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def initiate( + self, + droplet_id: int, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateResponse: + """ + To initiate an action on a Droplet send a POST request to + `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the + `type` attribute to on of the supported action types: + + | Action | Details | Additionally Required Permission | + | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | + | `enable_backups` | Enables backups for a Droplet | | + | `disable_backups` | Disables backups for a Droplet | | + | `change_backup_policy` | Update the backup policy for a Droplet | | + | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | + | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | + | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | + | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | + | `power_on` | Powers on a Droplet. | | + | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | + | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | + | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | + | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | + | `rename` | Renames a Droplet. | | + | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | + | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | + | `snapshot` | Takes a snapshot of a Droplet. | image:create | + + Args: + type: The type of action to initiate for the Droplet. + + name: The new name for the Droplet. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def initiate( + self, + droplet_id: int, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + kernel: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateResponse: + """ + To initiate an action on a Droplet send a POST request to + `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the + `type` attribute to on of the supported action types: + + | Action | Details | Additionally Required Permission | + | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | + | `enable_backups` | Enables backups for a Droplet | | + | `disable_backups` | Disables backups for a Droplet | | + | `change_backup_policy` | Update the backup policy for a Droplet | | + | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | + | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | + | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | + | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | + | `power_on` | Powers on a Droplet. | | + | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | + | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | + | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | + | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | + | `rename` | Renames a Droplet. | | + | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | + | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | + | `snapshot` | Takes a snapshot of a Droplet. | image:create | + + Args: + type: The type of action to initiate for the Droplet. + + kernel: A unique number used to identify and reference a specific kernel. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def initiate( + self, + droplet_id: int, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateResponse: + """ + To initiate an action on a Droplet send a POST request to + `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the + `type` attribute to on of the supported action types: + + | Action | Details | Additionally Required Permission | + | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | + | `enable_backups` | Enables backups for a Droplet | | + | `disable_backups` | Disables backups for a Droplet | | + | `change_backup_policy` | Update the backup policy for a Droplet | | + | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | + | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | + | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | + | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | + | `power_on` | Powers on a Droplet. | | + | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | + | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | + | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | + | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | + | `rename` | Renames a Droplet. | | + | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | + | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | + | `snapshot` | Takes a snapshot of a Droplet. | image:create | + + Args: + type: The type of action to initiate for the Droplet. + + name: The name to give the new snapshot of the Droplet. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["type"]) + def initiate( + self, + droplet_id: int, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + backup_policy: DropletBackupPolicyParam | NotGiven = NOT_GIVEN, + image: int | Union[str, int] | NotGiven = NOT_GIVEN, + disk: bool | NotGiven = NOT_GIVEN, + size: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + kernel: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateResponse: + return self._post( + f"/v2/droplets/{droplet_id}/actions" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/actions", + body=maybe_transform( + { + "type": type, + "backup_policy": backup_policy, + "image": image, + "disk": disk, + "size": size, + "name": name, + "kernel": kernel, + }, + action_initiate_params.ActionInitiateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ActionInitiateResponse, + ) + + +class AsyncActionsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncActionsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncActionsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncActionsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncActionsResourceWithStreamingResponse(self) + + async def retrieve( + self, + action_id: int, + *, + droplet_id: int, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionRetrieveResponse: + """ + To retrieve a Droplet action, send a GET request to + `/v2/droplets/$DROPLET_ID/actions/$ACTION_ID`. + + The response will be a JSON object with a key called `action`. The value will be + a Droplet action object. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + f"/v2/droplets/{droplet_id}/actions/{action_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/actions/{action_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ActionRetrieveResponse, + ) + + async def list( + self, + droplet_id: int, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionListResponse: + """ + To retrieve a list of all actions that have been executed for a Droplet, send a + GET request to `/v2/droplets/$DROPLET_ID/actions`. + + The results will be returned as a JSON object with an `actions` key. This will + be set to an array filled with `action` objects containing the standard `action` + attributes. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + f"/v2/droplets/{droplet_id}/actions" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/actions", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + action_list_params.ActionListParams, + ), + ), + cast_to=ActionListResponse, + ) + + @overload + async def bulk_initiate( + self, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + tag_name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionBulkInitiateResponse: + """Some actions can be performed in bulk on tagged Droplets. + + The actions can be + initiated by sending a POST to `/v2/droplets/actions?tag_name=$TAG_NAME` with + the action arguments. + + Only a sub-set of action types are supported: + + - `power_cycle` + - `power_on` + - `power_off` + - `shutdown` + - `enable_ipv6` + - `enable_backups` + - `disable_backups` + - `snapshot` (also requires `image:create` permission) + + Args: + type: The type of action to initiate for the Droplet. + + tag_name: Used to filter Droplets by a specific tag. Can not be combined with `name` or + `type`. Requires `tag:read` scope. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def bulk_initiate( + self, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + tag_name: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionBulkInitiateResponse: + """Some actions can be performed in bulk on tagged Droplets. + + The actions can be + initiated by sending a POST to `/v2/droplets/actions?tag_name=$TAG_NAME` with + the action arguments. + + Only a sub-set of action types are supported: + + - `power_cycle` + - `power_on` + - `power_off` + - `shutdown` + - `enable_ipv6` + - `enable_backups` + - `disable_backups` + - `snapshot` (also requires `image:create` permission) + + Args: + type: The type of action to initiate for the Droplet. + + tag_name: Used to filter Droplets by a specific tag. Can not be combined with `name` or + `type`. Requires `tag:read` scope. + + name: The name to give the new snapshot of the Droplet. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["type"]) + async def bulk_initiate( + self, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + tag_name: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionBulkInitiateResponse: + return await self._post( + "/v2/droplets/actions" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/droplets/actions", + body=await async_maybe_transform( + { + "type": type, + "name": name, + }, + action_bulk_initiate_params.ActionBulkInitiateParams, + ), + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + {"tag_name": tag_name}, action_bulk_initiate_params.ActionBulkInitiateParams + ), + ), + cast_to=ActionBulkInitiateResponse, + ) + + @overload + async def initiate( + self, + droplet_id: int, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateResponse: + """ + To initiate an action on a Droplet send a POST request to + `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the + `type` attribute to on of the supported action types: + + | Action | Details | Additionally Required Permission | + | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | + | `enable_backups` | Enables backups for a Droplet | | + | `disable_backups` | Disables backups for a Droplet | | + | `change_backup_policy` | Update the backup policy for a Droplet | | + | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | + | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | + | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | + | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | + | `power_on` | Powers on a Droplet. | | + | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | + | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | + | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | + | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | + | `rename` | Renames a Droplet. | | + | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | + | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | + | `snapshot` | Takes a snapshot of a Droplet. | image:create | + + Args: + type: The type of action to initiate for the Droplet. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def initiate( + self, + droplet_id: int, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + backup_policy: DropletBackupPolicyParam | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateResponse: + """ + To initiate an action on a Droplet send a POST request to + `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the + `type` attribute to on of the supported action types: + + | Action | Details | Additionally Required Permission | + | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | + | `enable_backups` | Enables backups for a Droplet | | + | `disable_backups` | Disables backups for a Droplet | | + | `change_backup_policy` | Update the backup policy for a Droplet | | + | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | + | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | + | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | + | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | + | `power_on` | Powers on a Droplet. | | + | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | + | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | + | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | + | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | + | `rename` | Renames a Droplet. | | + | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | + | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | + | `snapshot` | Takes a snapshot of a Droplet. | image:create | + + Args: + type: The type of action to initiate for the Droplet. + + backup_policy: An object specifying the backup policy for the Droplet. If omitted, the backup + plan will default to daily. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def initiate( + self, + droplet_id: int, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + backup_policy: DropletBackupPolicyParam | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateResponse: + """ + To initiate an action on a Droplet send a POST request to + `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the + `type` attribute to on of the supported action types: + + | Action | Details | Additionally Required Permission | + | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | + | `enable_backups` | Enables backups for a Droplet | | + | `disable_backups` | Disables backups for a Droplet | | + | `change_backup_policy` | Update the backup policy for a Droplet | | + | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | + | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | + | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | + | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | + | `power_on` | Powers on a Droplet. | | + | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | + | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | + | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | + | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | + | `rename` | Renames a Droplet. | | + | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | + | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | + | `snapshot` | Takes a snapshot of a Droplet. | image:create | + + Args: + type: The type of action to initiate for the Droplet. + + backup_policy: An object specifying the backup policy for the Droplet. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def initiate( + self, + droplet_id: int, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + image: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateResponse: + """ + To initiate an action on a Droplet send a POST request to + `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the + `type` attribute to on of the supported action types: + + | Action | Details | Additionally Required Permission | + | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | + | `enable_backups` | Enables backups for a Droplet | | + | `disable_backups` | Disables backups for a Droplet | | + | `change_backup_policy` | Update the backup policy for a Droplet | | + | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | + | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | + | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | + | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | + | `power_on` | Powers on a Droplet. | | + | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | + | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | + | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | + | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | + | `rename` | Renames a Droplet. | | + | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | + | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | + | `snapshot` | Takes a snapshot of a Droplet. | image:create | + + Args: + type: The type of action to initiate for the Droplet. + + image: The ID of a backup of the current Droplet instance to restore from. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def initiate( + self, + droplet_id: int, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + disk: bool | NotGiven = NOT_GIVEN, + size: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateResponse: + """ + To initiate an action on a Droplet send a POST request to + `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the + `type` attribute to on of the supported action types: + + | Action | Details | Additionally Required Permission | + | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | + | `enable_backups` | Enables backups for a Droplet | | + | `disable_backups` | Disables backups for a Droplet | | + | `change_backup_policy` | Update the backup policy for a Droplet | | + | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | + | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | + | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | + | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | + | `power_on` | Powers on a Droplet. | | + | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | + | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | + | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | + | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | + | `rename` | Renames a Droplet. | | + | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | + | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | + | `snapshot` | Takes a snapshot of a Droplet. | image:create | + + Args: + type: The type of action to initiate for the Droplet. + + disk: When `true`, the Droplet's disk will be resized in addition to its RAM and CPU. + This is a permanent change and cannot be reversed as a Droplet's disk size + cannot be decreased. + + size: The slug identifier for the size to which you wish to resize the Droplet. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def initiate( + self, + droplet_id: int, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + image: Union[str, int] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateResponse: + """ + To initiate an action on a Droplet send a POST request to + `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the + `type` attribute to on of the supported action types: + + | Action | Details | Additionally Required Permission | + | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | + | `enable_backups` | Enables backups for a Droplet | | + | `disable_backups` | Disables backups for a Droplet | | + | `change_backup_policy` | Update the backup policy for a Droplet | | + | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | + | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | + | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | + | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | + | `power_on` | Powers on a Droplet. | | + | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | + | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | + | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | + | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | + | `rename` | Renames a Droplet. | | + | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | + | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | + | `snapshot` | Takes a snapshot of a Droplet. | image:create | + + Args: + type: The type of action to initiate for the Droplet. + + image: The image ID of a public or private image or the slug identifier for a public + image. The Droplet will be rebuilt using this image as its base. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def initiate( + self, + droplet_id: int, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateResponse: + """ + To initiate an action on a Droplet send a POST request to + `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the + `type` attribute to on of the supported action types: + + | Action | Details | Additionally Required Permission | + | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | + | `enable_backups` | Enables backups for a Droplet | | + | `disable_backups` | Disables backups for a Droplet | | + | `change_backup_policy` | Update the backup policy for a Droplet | | + | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | + | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | + | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | + | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | + | `power_on` | Powers on a Droplet. | | + | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | + | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | + | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | + | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | + | `rename` | Renames a Droplet. | | + | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | + | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | + | `snapshot` | Takes a snapshot of a Droplet. | image:create | + + Args: + type: The type of action to initiate for the Droplet. + + name: The new name for the Droplet. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def initiate( + self, + droplet_id: int, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + kernel: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateResponse: + """ + To initiate an action on a Droplet send a POST request to + `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the + `type` attribute to on of the supported action types: + + | Action | Details | Additionally Required Permission | + | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | + | `enable_backups` | Enables backups for a Droplet | | + | `disable_backups` | Disables backups for a Droplet | | + | `change_backup_policy` | Update the backup policy for a Droplet | | + | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | + | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | + | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | + | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | + | `power_on` | Powers on a Droplet. | | + | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | + | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | + | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | + | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | + | `rename` | Renames a Droplet. | | + | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | + | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | + | `snapshot` | Takes a snapshot of a Droplet. | image:create | + + Args: + type: The type of action to initiate for the Droplet. + + kernel: A unique number used to identify and reference a specific kernel. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def initiate( + self, + droplet_id: int, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateResponse: + """ + To initiate an action on a Droplet send a POST request to + `/v2/droplets/$DROPLET_ID/actions`. In the JSON body to the request, set the + `type` attribute to on of the supported action types: + + | Action | Details | Additionally Required Permission | + | ----------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | + | `enable_backups` | Enables backups for a Droplet | | + | `disable_backups` | Disables backups for a Droplet | | + | `change_backup_policy` | Update the backup policy for a Droplet | | + | `reboot` | Reboots a Droplet. A `reboot` action is an attempt to reboot the Droplet in a graceful way, similar to using the `reboot` command from the console. | | + | `power_cycle` | Power cycles a Droplet. A `powercycle` action is similar to pushing the reset button on a physical machine, it's similar to booting from scratch. | | + | `shutdown` | Shutsdown a Droplet. A shutdown action is an attempt to shutdown the Droplet in a graceful way, similar to using the `shutdown` command from the console. Since a `shutdown` command can fail, this action guarantees that the command is issued, not that it succeeds. The preferred way to turn off a Droplet is to attempt a shutdown, with a reasonable timeout, followed by a `power_off` action to ensure the Droplet is off. | | + | `power_off` | Powers off a Droplet. A `power_off` event is a hard shutdown and should only be used if the `shutdown` action is not successful. It is similar to cutting the power on a server and could lead to complications. | | + | `power_on` | Powers on a Droplet. | | + | `restore` | Restore a Droplet using a backup image. The image ID that is passed in must be a backup of the current Droplet instance. The operation will leave any embedded SSH keys intact. | droplet:admin | + | `password_reset` | Resets the root password for a Droplet. A new password will be provided via email. It must be changed after first use. | droplet:admin | + | `resize` | Resizes a Droplet. Set the `size` attribute to a size slug. If a permanent resize with disk changes included is desired, set the `disk` attribute to `true`. | droplet:create | + | `rebuild` | Rebuilds a Droplet from a new base image. Set the `image` attribute to an image ID or slug. | droplet:admin | + | `rename` | Renames a Droplet. | | + | `change_kernel` | Changes a Droplet's kernel. Only applies to Droplets with externally managed kernels. All Droplets created after March 2017 use internal kernels by default. | | + | `enable_ipv6` | Enables IPv6 for a Droplet. Once enabled for a Droplet, IPv6 can not be disabled. When enabling IPv6 on an existing Droplet, [additional OS-level configuration](https://docs.digitalocean.com/products/networking/ipv6/how-to/enable/#on-existing-droplets) is required. | | + | `snapshot` | Takes a snapshot of a Droplet. | image:create | + + Args: + type: The type of action to initiate for the Droplet. + + name: The name to give the new snapshot of the Droplet. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["type"]) + async def initiate( + self, + droplet_id: int, + *, + type: Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ], + backup_policy: DropletBackupPolicyParam | NotGiven = NOT_GIVEN, + image: int | Union[str, int] | NotGiven = NOT_GIVEN, + disk: bool | NotGiven = NOT_GIVEN, + size: str | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + kernel: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateResponse: + return await self._post( + f"/v2/droplets/{droplet_id}/actions" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/actions", + body=await async_maybe_transform( + { + "type": type, + "backup_policy": backup_policy, + "image": image, + "disk": disk, + "size": size, + "name": name, + "kernel": kernel, + }, + action_initiate_params.ActionInitiateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ActionInitiateResponse, + ) + + +class ActionsResourceWithRawResponse: + def __init__(self, actions: ActionsResource) -> None: + self._actions = actions + + self.retrieve = to_raw_response_wrapper( + actions.retrieve, + ) + self.list = to_raw_response_wrapper( + actions.list, + ) + self.bulk_initiate = to_raw_response_wrapper( + actions.bulk_initiate, + ) + self.initiate = to_raw_response_wrapper( + actions.initiate, + ) + + +class AsyncActionsResourceWithRawResponse: + def __init__(self, actions: AsyncActionsResource) -> None: + self._actions = actions + + self.retrieve = async_to_raw_response_wrapper( + actions.retrieve, + ) + self.list = async_to_raw_response_wrapper( + actions.list, + ) + self.bulk_initiate = async_to_raw_response_wrapper( + actions.bulk_initiate, + ) + self.initiate = async_to_raw_response_wrapper( + actions.initiate, + ) + + +class ActionsResourceWithStreamingResponse: + def __init__(self, actions: ActionsResource) -> None: + self._actions = actions + + self.retrieve = to_streamed_response_wrapper( + actions.retrieve, + ) + self.list = to_streamed_response_wrapper( + actions.list, + ) + self.bulk_initiate = to_streamed_response_wrapper( + actions.bulk_initiate, + ) + self.initiate = to_streamed_response_wrapper( + actions.initiate, + ) + + +class AsyncActionsResourceWithStreamingResponse: + def __init__(self, actions: AsyncActionsResource) -> None: + self._actions = actions + + self.retrieve = async_to_streamed_response_wrapper( + actions.retrieve, + ) + self.list = async_to_streamed_response_wrapper( + actions.list, + ) + self.bulk_initiate = async_to_streamed_response_wrapper( + actions.bulk_initiate, + ) + self.initiate = async_to_streamed_response_wrapper( + actions.initiate, + ) diff --git a/src/gradientai/resources/gpu_droplets/autoscale.py b/src/gradientai/resources/gpu_droplets/autoscale.py new file mode 100644 index 00000000..a1a72430 --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/autoscale.py @@ -0,0 +1,967 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.gpu_droplets import ( + autoscale_list_params, + autoscale_create_params, + autoscale_update_params, + autoscale_list_history_params, + autoscale_list_members_params, +) +from ...types.gpu_droplets.autoscale_list_response import AutoscaleListResponse +from ...types.gpu_droplets.autoscale_create_response import AutoscaleCreateResponse +from ...types.gpu_droplets.autoscale_update_response import AutoscaleUpdateResponse +from ...types.gpu_droplets.autoscale_retrieve_response import AutoscaleRetrieveResponse +from ...types.gpu_droplets.autoscale_list_history_response import AutoscaleListHistoryResponse +from ...types.gpu_droplets.autoscale_list_members_response import AutoscaleListMembersResponse +from ...types.gpu_droplets.autoscale_pool_droplet_template_param import AutoscalePoolDropletTemplateParam + +__all__ = ["AutoscaleResource", "AsyncAutoscaleResource"] + + +class AutoscaleResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> AutoscaleResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AutoscaleResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AutoscaleResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AutoscaleResourceWithStreamingResponse(self) + + def create( + self, + *, + config: autoscale_create_params.Config, + droplet_template: AutoscalePoolDropletTemplateParam, + name: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AutoscaleCreateResponse: + """ + To create a new autoscale pool, send a POST request to `/v2/droplets/autoscale` + setting the required attributes. + + The response body will contain a JSON object with a key called `autoscale_pool` + containing the standard attributes for the new autoscale pool. + + Args: + config: The scaling configuration for an autoscale pool, which is how the pool scales up + and down (either by resource utilization or static configuration). + + name: The human-readable name of the autoscale pool. This field cannot be updated + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v2/droplets/autoscale" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/droplets/autoscale", + body=maybe_transform( + { + "config": config, + "droplet_template": droplet_template, + "name": name, + }, + autoscale_create_params.AutoscaleCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AutoscaleCreateResponse, + ) + + def retrieve( + self, + autoscale_pool_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AutoscaleRetrieveResponse: + """ + To show information about an individual autoscale pool, send a GET request to + `/v2/droplets/autoscale/$AUTOSCALE_POOL_ID`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not autoscale_pool_id: + raise ValueError(f"Expected a non-empty value for `autoscale_pool_id` but received {autoscale_pool_id!r}") + return self._get( + f"/v2/droplets/autoscale/{autoscale_pool_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/autoscale/{autoscale_pool_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AutoscaleRetrieveResponse, + ) + + def update( + self, + autoscale_pool_id: str, + *, + config: autoscale_update_params.Config, + droplet_template: AutoscalePoolDropletTemplateParam, + name: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AutoscaleUpdateResponse: + """ + To update the configuration of an existing autoscale pool, send a PUT request to + `/v2/droplets/autoscale/$AUTOSCALE_POOL_ID`. The request must contain a full + representation of the autoscale pool including existing attributes. + + Args: + config: The scaling configuration for an autoscale pool, which is how the pool scales up + and down (either by resource utilization or static configuration). + + name: The human-readable name of the autoscale pool. This field cannot be updated + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not autoscale_pool_id: + raise ValueError(f"Expected a non-empty value for `autoscale_pool_id` but received {autoscale_pool_id!r}") + return self._put( + f"/v2/droplets/autoscale/{autoscale_pool_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/autoscale/{autoscale_pool_id}", + body=maybe_transform( + { + "config": config, + "droplet_template": droplet_template, + "name": name, + }, + autoscale_update_params.AutoscaleUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AutoscaleUpdateResponse, + ) + + def list( + self, + *, + name: str | NotGiven = NOT_GIVEN, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AutoscaleListResponse: + """ + To list all autoscale pools in your team, send a GET request to + `/v2/droplets/autoscale`. The response body will be a JSON object with a key of + `autoscale_pools` containing an array of autoscale pool objects. These each + contain the standard autoscale pool attributes. + + Args: + name: The name of the autoscale pool + + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/droplets/autoscale" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/droplets/autoscale", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "name": name, + "page": page, + "per_page": per_page, + }, + autoscale_list_params.AutoscaleListParams, + ), + ), + cast_to=AutoscaleListResponse, + ) + + def delete( + self, + autoscale_pool_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To destroy an autoscale pool, send a DELETE request to the + `/v2/droplets/autoscale/$AUTOSCALE_POOL_ID` endpoint. + + A successful response will include a 202 response code and no content. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not autoscale_pool_id: + raise ValueError(f"Expected a non-empty value for `autoscale_pool_id` but received {autoscale_pool_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._delete( + f"/v2/droplets/autoscale/{autoscale_pool_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/autoscale/{autoscale_pool_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + def delete_dangerous( + self, + autoscale_pool_id: str, + *, + x_dangerous: bool, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To destroy an autoscale pool and its associated resources (Droplets), send a + DELETE request to the `/v2/droplets/autoscale/$AUTOSCALE_POOL_ID/dangerous` + endpoint. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not autoscale_pool_id: + raise ValueError(f"Expected a non-empty value for `autoscale_pool_id` but received {autoscale_pool_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + extra_headers.update({"X-Dangerous": ("true" if x_dangerous else "false")}) + return self._delete( + f"/v2/droplets/autoscale/{autoscale_pool_id}/dangerous" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/autoscale/{autoscale_pool_id}/dangerous", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + def list_history( + self, + autoscale_pool_id: str, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AutoscaleListHistoryResponse: + """ + To list all of the scaling history events of an autoscale pool, send a GET + request to `/v2/droplets/autoscale/$AUTOSCALE_POOL_ID/history`. + + The response body will be a JSON object with a key of `history`. This will be + set to an array containing objects each representing a history event. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not autoscale_pool_id: + raise ValueError(f"Expected a non-empty value for `autoscale_pool_id` but received {autoscale_pool_id!r}") + return self._get( + f"/v2/droplets/autoscale/{autoscale_pool_id}/history" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/autoscale/{autoscale_pool_id}/history", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + autoscale_list_history_params.AutoscaleListHistoryParams, + ), + ), + cast_to=AutoscaleListHistoryResponse, + ) + + def list_members( + self, + autoscale_pool_id: str, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AutoscaleListMembersResponse: + """ + To list the Droplets in an autoscale pool, send a GET request to + `/v2/droplets/autoscale/$AUTOSCALE_POOL_ID/members`. + + The response body will be a JSON object with a key of `droplets`. This will be + set to an array containing information about each of the Droplets in the + autoscale pool. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not autoscale_pool_id: + raise ValueError(f"Expected a non-empty value for `autoscale_pool_id` but received {autoscale_pool_id!r}") + return self._get( + f"/v2/droplets/autoscale/{autoscale_pool_id}/members" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/autoscale/{autoscale_pool_id}/members", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + autoscale_list_members_params.AutoscaleListMembersParams, + ), + ), + cast_to=AutoscaleListMembersResponse, + ) + + +class AsyncAutoscaleResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncAutoscaleResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncAutoscaleResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncAutoscaleResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncAutoscaleResourceWithStreamingResponse(self) + + async def create( + self, + *, + config: autoscale_create_params.Config, + droplet_template: AutoscalePoolDropletTemplateParam, + name: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AutoscaleCreateResponse: + """ + To create a new autoscale pool, send a POST request to `/v2/droplets/autoscale` + setting the required attributes. + + The response body will contain a JSON object with a key called `autoscale_pool` + containing the standard attributes for the new autoscale pool. + + Args: + config: The scaling configuration for an autoscale pool, which is how the pool scales up + and down (either by resource utilization or static configuration). + + name: The human-readable name of the autoscale pool. This field cannot be updated + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v2/droplets/autoscale" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/droplets/autoscale", + body=await async_maybe_transform( + { + "config": config, + "droplet_template": droplet_template, + "name": name, + }, + autoscale_create_params.AutoscaleCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AutoscaleCreateResponse, + ) + + async def retrieve( + self, + autoscale_pool_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AutoscaleRetrieveResponse: + """ + To show information about an individual autoscale pool, send a GET request to + `/v2/droplets/autoscale/$AUTOSCALE_POOL_ID`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not autoscale_pool_id: + raise ValueError(f"Expected a non-empty value for `autoscale_pool_id` but received {autoscale_pool_id!r}") + return await self._get( + f"/v2/droplets/autoscale/{autoscale_pool_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/autoscale/{autoscale_pool_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AutoscaleRetrieveResponse, + ) + + async def update( + self, + autoscale_pool_id: str, + *, + config: autoscale_update_params.Config, + droplet_template: AutoscalePoolDropletTemplateParam, + name: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AutoscaleUpdateResponse: + """ + To update the configuration of an existing autoscale pool, send a PUT request to + `/v2/droplets/autoscale/$AUTOSCALE_POOL_ID`. The request must contain a full + representation of the autoscale pool including existing attributes. + + Args: + config: The scaling configuration for an autoscale pool, which is how the pool scales up + and down (either by resource utilization or static configuration). + + name: The human-readable name of the autoscale pool. This field cannot be updated + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not autoscale_pool_id: + raise ValueError(f"Expected a non-empty value for `autoscale_pool_id` but received {autoscale_pool_id!r}") + return await self._put( + f"/v2/droplets/autoscale/{autoscale_pool_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/autoscale/{autoscale_pool_id}", + body=await async_maybe_transform( + { + "config": config, + "droplet_template": droplet_template, + "name": name, + }, + autoscale_update_params.AutoscaleUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=AutoscaleUpdateResponse, + ) + + async def list( + self, + *, + name: str | NotGiven = NOT_GIVEN, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AutoscaleListResponse: + """ + To list all autoscale pools in your team, send a GET request to + `/v2/droplets/autoscale`. The response body will be a JSON object with a key of + `autoscale_pools` containing an array of autoscale pool objects. These each + contain the standard autoscale pool attributes. + + Args: + name: The name of the autoscale pool + + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/droplets/autoscale" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/droplets/autoscale", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "name": name, + "page": page, + "per_page": per_page, + }, + autoscale_list_params.AutoscaleListParams, + ), + ), + cast_to=AutoscaleListResponse, + ) + + async def delete( + self, + autoscale_pool_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To destroy an autoscale pool, send a DELETE request to the + `/v2/droplets/autoscale/$AUTOSCALE_POOL_ID` endpoint. + + A successful response will include a 202 response code and no content. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not autoscale_pool_id: + raise ValueError(f"Expected a non-empty value for `autoscale_pool_id` but received {autoscale_pool_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._delete( + f"/v2/droplets/autoscale/{autoscale_pool_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/autoscale/{autoscale_pool_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + async def delete_dangerous( + self, + autoscale_pool_id: str, + *, + x_dangerous: bool, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To destroy an autoscale pool and its associated resources (Droplets), send a + DELETE request to the `/v2/droplets/autoscale/$AUTOSCALE_POOL_ID/dangerous` + endpoint. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not autoscale_pool_id: + raise ValueError(f"Expected a non-empty value for `autoscale_pool_id` but received {autoscale_pool_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + extra_headers.update({"X-Dangerous": ("true" if x_dangerous else "false")}) + return await self._delete( + f"/v2/droplets/autoscale/{autoscale_pool_id}/dangerous" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/autoscale/{autoscale_pool_id}/dangerous", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + async def list_history( + self, + autoscale_pool_id: str, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AutoscaleListHistoryResponse: + """ + To list all of the scaling history events of an autoscale pool, send a GET + request to `/v2/droplets/autoscale/$AUTOSCALE_POOL_ID/history`. + + The response body will be a JSON object with a key of `history`. This will be + set to an array containing objects each representing a history event. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not autoscale_pool_id: + raise ValueError(f"Expected a non-empty value for `autoscale_pool_id` but received {autoscale_pool_id!r}") + return await self._get( + f"/v2/droplets/autoscale/{autoscale_pool_id}/history" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/autoscale/{autoscale_pool_id}/history", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + autoscale_list_history_params.AutoscaleListHistoryParams, + ), + ), + cast_to=AutoscaleListHistoryResponse, + ) + + async def list_members( + self, + autoscale_pool_id: str, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AutoscaleListMembersResponse: + """ + To list the Droplets in an autoscale pool, send a GET request to + `/v2/droplets/autoscale/$AUTOSCALE_POOL_ID/members`. + + The response body will be a JSON object with a key of `droplets`. This will be + set to an array containing information about each of the Droplets in the + autoscale pool. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not autoscale_pool_id: + raise ValueError(f"Expected a non-empty value for `autoscale_pool_id` but received {autoscale_pool_id!r}") + return await self._get( + f"/v2/droplets/autoscale/{autoscale_pool_id}/members" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/autoscale/{autoscale_pool_id}/members", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + autoscale_list_members_params.AutoscaleListMembersParams, + ), + ), + cast_to=AutoscaleListMembersResponse, + ) + + +class AutoscaleResourceWithRawResponse: + def __init__(self, autoscale: AutoscaleResource) -> None: + self._autoscale = autoscale + + self.create = to_raw_response_wrapper( + autoscale.create, + ) + self.retrieve = to_raw_response_wrapper( + autoscale.retrieve, + ) + self.update = to_raw_response_wrapper( + autoscale.update, + ) + self.list = to_raw_response_wrapper( + autoscale.list, + ) + self.delete = to_raw_response_wrapper( + autoscale.delete, + ) + self.delete_dangerous = to_raw_response_wrapper( + autoscale.delete_dangerous, + ) + self.list_history = to_raw_response_wrapper( + autoscale.list_history, + ) + self.list_members = to_raw_response_wrapper( + autoscale.list_members, + ) + + +class AsyncAutoscaleResourceWithRawResponse: + def __init__(self, autoscale: AsyncAutoscaleResource) -> None: + self._autoscale = autoscale + + self.create = async_to_raw_response_wrapper( + autoscale.create, + ) + self.retrieve = async_to_raw_response_wrapper( + autoscale.retrieve, + ) + self.update = async_to_raw_response_wrapper( + autoscale.update, + ) + self.list = async_to_raw_response_wrapper( + autoscale.list, + ) + self.delete = async_to_raw_response_wrapper( + autoscale.delete, + ) + self.delete_dangerous = async_to_raw_response_wrapper( + autoscale.delete_dangerous, + ) + self.list_history = async_to_raw_response_wrapper( + autoscale.list_history, + ) + self.list_members = async_to_raw_response_wrapper( + autoscale.list_members, + ) + + +class AutoscaleResourceWithStreamingResponse: + def __init__(self, autoscale: AutoscaleResource) -> None: + self._autoscale = autoscale + + self.create = to_streamed_response_wrapper( + autoscale.create, + ) + self.retrieve = to_streamed_response_wrapper( + autoscale.retrieve, + ) + self.update = to_streamed_response_wrapper( + autoscale.update, + ) + self.list = to_streamed_response_wrapper( + autoscale.list, + ) + self.delete = to_streamed_response_wrapper( + autoscale.delete, + ) + self.delete_dangerous = to_streamed_response_wrapper( + autoscale.delete_dangerous, + ) + self.list_history = to_streamed_response_wrapper( + autoscale.list_history, + ) + self.list_members = to_streamed_response_wrapper( + autoscale.list_members, + ) + + +class AsyncAutoscaleResourceWithStreamingResponse: + def __init__(self, autoscale: AsyncAutoscaleResource) -> None: + self._autoscale = autoscale + + self.create = async_to_streamed_response_wrapper( + autoscale.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + autoscale.retrieve, + ) + self.update = async_to_streamed_response_wrapper( + autoscale.update, + ) + self.list = async_to_streamed_response_wrapper( + autoscale.list, + ) + self.delete = async_to_streamed_response_wrapper( + autoscale.delete, + ) + self.delete_dangerous = async_to_streamed_response_wrapper( + autoscale.delete_dangerous, + ) + self.list_history = async_to_streamed_response_wrapper( + autoscale.list_history, + ) + self.list_members = async_to_streamed_response_wrapper( + autoscale.list_members, + ) diff --git a/src/gradientai/resources/gpu_droplets/backups.py b/src/gradientai/resources/gpu_droplets/backups.py new file mode 100644 index 00000000..06fca19e --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/backups.py @@ -0,0 +1,460 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.gpu_droplets import backup_list_params, backup_list_policies_params +from ...types.gpu_droplets.backup_list_response import BackupListResponse +from ...types.gpu_droplets.backup_list_policies_response import BackupListPoliciesResponse +from ...types.gpu_droplets.backup_retrieve_policy_response import BackupRetrievePolicyResponse +from ...types.gpu_droplets.backup_list_supported_policies_response import BackupListSupportedPoliciesResponse + +__all__ = ["BackupsResource", "AsyncBackupsResource"] + + +class BackupsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> BackupsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return BackupsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> BackupsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return BackupsResourceWithStreamingResponse(self) + + def list( + self, + droplet_id: int, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> BackupListResponse: + """ + To retrieve any backups associated with a Droplet, send a GET request to + `/v2/droplets/$DROPLET_ID/backups`. + + You will get back a JSON object that has a `backups` key. This will be set to an + array of backup objects, each of which contain the standard Droplet backup + attributes. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + f"/v2/droplets/{droplet_id}/backups" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/backups", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + backup_list_params.BackupListParams, + ), + ), + cast_to=BackupListResponse, + ) + + def list_policies( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> BackupListPoliciesResponse: + """ + To list information about the backup policies for all Droplets in the account, + send a GET request to `/v2/droplets/backups/policies`. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/droplets/backups/policies" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/droplets/backups/policies", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + backup_list_policies_params.BackupListPoliciesParams, + ), + ), + cast_to=BackupListPoliciesResponse, + ) + + def list_supported_policies( + self, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> BackupListSupportedPoliciesResponse: + """ + To retrieve a list of all supported Droplet backup policies, send a GET request + to `/v2/droplets/backups/supported_policies`. + """ + return self._get( + "/v2/droplets/backups/supported_policies" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/droplets/backups/supported_policies", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=BackupListSupportedPoliciesResponse, + ) + + def retrieve_policy( + self, + droplet_id: int, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> BackupRetrievePolicyResponse: + """ + To show information about an individual Droplet's backup policy, send a GET + request to `/v2/droplets/$DROPLET_ID/backups/policy`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + f"/v2/droplets/{droplet_id}/backups/policy" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/backups/policy", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=BackupRetrievePolicyResponse, + ) + + +class AsyncBackupsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncBackupsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncBackupsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncBackupsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncBackupsResourceWithStreamingResponse(self) + + async def list( + self, + droplet_id: int, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> BackupListResponse: + """ + To retrieve any backups associated with a Droplet, send a GET request to + `/v2/droplets/$DROPLET_ID/backups`. + + You will get back a JSON object that has a `backups` key. This will be set to an + array of backup objects, each of which contain the standard Droplet backup + attributes. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + f"/v2/droplets/{droplet_id}/backups" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/backups", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + backup_list_params.BackupListParams, + ), + ), + cast_to=BackupListResponse, + ) + + async def list_policies( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> BackupListPoliciesResponse: + """ + To list information about the backup policies for all Droplets in the account, + send a GET request to `/v2/droplets/backups/policies`. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/droplets/backups/policies" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/droplets/backups/policies", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + backup_list_policies_params.BackupListPoliciesParams, + ), + ), + cast_to=BackupListPoliciesResponse, + ) + + async def list_supported_policies( + self, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> BackupListSupportedPoliciesResponse: + """ + To retrieve a list of all supported Droplet backup policies, send a GET request + to `/v2/droplets/backups/supported_policies`. + """ + return await self._get( + "/v2/droplets/backups/supported_policies" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/droplets/backups/supported_policies", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=BackupListSupportedPoliciesResponse, + ) + + async def retrieve_policy( + self, + droplet_id: int, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> BackupRetrievePolicyResponse: + """ + To show information about an individual Droplet's backup policy, send a GET + request to `/v2/droplets/$DROPLET_ID/backups/policy`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + f"/v2/droplets/{droplet_id}/backups/policy" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/backups/policy", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=BackupRetrievePolicyResponse, + ) + + +class BackupsResourceWithRawResponse: + def __init__(self, backups: BackupsResource) -> None: + self._backups = backups + + self.list = to_raw_response_wrapper( + backups.list, + ) + self.list_policies = to_raw_response_wrapper( + backups.list_policies, + ) + self.list_supported_policies = to_raw_response_wrapper( + backups.list_supported_policies, + ) + self.retrieve_policy = to_raw_response_wrapper( + backups.retrieve_policy, + ) + + +class AsyncBackupsResourceWithRawResponse: + def __init__(self, backups: AsyncBackupsResource) -> None: + self._backups = backups + + self.list = async_to_raw_response_wrapper( + backups.list, + ) + self.list_policies = async_to_raw_response_wrapper( + backups.list_policies, + ) + self.list_supported_policies = async_to_raw_response_wrapper( + backups.list_supported_policies, + ) + self.retrieve_policy = async_to_raw_response_wrapper( + backups.retrieve_policy, + ) + + +class BackupsResourceWithStreamingResponse: + def __init__(self, backups: BackupsResource) -> None: + self._backups = backups + + self.list = to_streamed_response_wrapper( + backups.list, + ) + self.list_policies = to_streamed_response_wrapper( + backups.list_policies, + ) + self.list_supported_policies = to_streamed_response_wrapper( + backups.list_supported_policies, + ) + self.retrieve_policy = to_streamed_response_wrapper( + backups.retrieve_policy, + ) + + +class AsyncBackupsResourceWithStreamingResponse: + def __init__(self, backups: AsyncBackupsResource) -> None: + self._backups = backups + + self.list = async_to_streamed_response_wrapper( + backups.list, + ) + self.list_policies = async_to_streamed_response_wrapper( + backups.list_policies, + ) + self.list_supported_policies = async_to_streamed_response_wrapper( + backups.list_supported_policies, + ) + self.retrieve_policy = async_to_streamed_response_wrapper( + backups.retrieve_policy, + ) diff --git a/src/gradientai/resources/gpu_droplets/destroy_with_associated_resources.py b/src/gradientai/resources/gpu_droplets/destroy_with_associated_resources.py new file mode 100644 index 00000000..46db6563 --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/destroy_with_associated_resources.py @@ -0,0 +1,624 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List + +import httpx + +from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.gpu_droplets import destroy_with_associated_resource_delete_selective_params +from ...types.gpu_droplets.destroy_with_associated_resource_list_response import ( + DestroyWithAssociatedResourceListResponse, +) +from ...types.gpu_droplets.destroy_with_associated_resource_check_status_response import ( + DestroyWithAssociatedResourceCheckStatusResponse, +) + +__all__ = ["DestroyWithAssociatedResourcesResource", "AsyncDestroyWithAssociatedResourcesResource"] + + +class DestroyWithAssociatedResourcesResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> DestroyWithAssociatedResourcesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return DestroyWithAssociatedResourcesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> DestroyWithAssociatedResourcesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return DestroyWithAssociatedResourcesResourceWithStreamingResponse(self) + + def list( + self, + droplet_id: int, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> DestroyWithAssociatedResourceListResponse: + """ + To list the associated billable resources that can be destroyed along with a + Droplet, send a GET request to the + `/v2/droplets/$DROPLET_ID/destroy_with_associated_resources` endpoint. + + This endpoint will only return resources that you are authorized to see. For + example, to see associated Reserved IPs, include the `reserved_ip:read` scope. + + The response will be a JSON object containing `snapshots`, `volumes`, and + `volume_snapshots` keys. Each will be set to an array of objects containing + information about the associated resources. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + f"/v2/droplets/{droplet_id}/destroy_with_associated_resources" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/destroy_with_associated_resources", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=DestroyWithAssociatedResourceListResponse, + ) + + def check_status( + self, + droplet_id: int, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> DestroyWithAssociatedResourceCheckStatusResponse: + """ + To check on the status of a request to destroy a Droplet with its associated + resources, send a GET request to the + `/v2/droplets/$DROPLET_ID/destroy_with_associated_resources/status` endpoint. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + f"/v2/droplets/{droplet_id}/destroy_with_associated_resources/status" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/destroy_with_associated_resources/status", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=DestroyWithAssociatedResourceCheckStatusResponse, + ) + + def delete_dangerous( + self, + droplet_id: int, + *, + x_dangerous: bool, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To destroy a Droplet along with all of its associated resources, send a DELETE + request to the + `/v2/droplets/$DROPLET_ID/destroy_with_associated_resources/dangerous` endpoint. + The headers of this request must include an `X-Dangerous` key set to `true`. To + preview which resources will be destroyed, first query the Droplet's associated + resources. This operation _can not_ be reverse and should be used with caution. + + A successful response will include a 202 response code and no content. Use the + status endpoint to check on the success or failure of the destruction of the + individual resources. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + extra_headers.update({"X-Dangerous": ("true" if x_dangerous else "false")}) + return self._delete( + f"/v2/droplets/{droplet_id}/destroy_with_associated_resources/dangerous" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/destroy_with_associated_resources/dangerous", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + def delete_selective( + self, + droplet_id: int, + *, + floating_ips: List[str] | NotGiven = NOT_GIVEN, + reserved_ips: List[str] | NotGiven = NOT_GIVEN, + snapshots: List[str] | NotGiven = NOT_GIVEN, + volume_snapshots: List[str] | NotGiven = NOT_GIVEN, + volumes: List[str] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To destroy a Droplet along with a sub-set of its associated resources, send a + DELETE request to the + `/v2/droplets/$DROPLET_ID/destroy_with_associated_resources/selective` endpoint. + The JSON body of the request should include `reserved_ips`, `snapshots`, + `volumes`, or `volume_snapshots` keys each set to an array of IDs for the + associated resources to be destroyed. The IDs can be found by querying the + Droplet's associated resources. Any associated resource not included in the + request will remain and continue to accrue changes on your account. + + A successful response will include a 202 response code and no content. Use the + status endpoint to check on the success or failure of the destruction of the + individual resources. + + Args: + floating_ips: An array of unique identifiers for the floating IPs to be scheduled for + deletion. + + reserved_ips: An array of unique identifiers for the reserved IPs to be scheduled for + deletion. + + snapshots: An array of unique identifiers for the snapshots to be scheduled for deletion. + + volume_snapshots: An array of unique identifiers for the volume snapshots to be scheduled for + deletion. + + volumes: An array of unique identifiers for the volumes to be scheduled for deletion. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._delete( + f"/v2/droplets/{droplet_id}/destroy_with_associated_resources/selective" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/destroy_with_associated_resources/selective", + body=maybe_transform( + { + "floating_ips": floating_ips, + "reserved_ips": reserved_ips, + "snapshots": snapshots, + "volume_snapshots": volume_snapshots, + "volumes": volumes, + }, + destroy_with_associated_resource_delete_selective_params.DestroyWithAssociatedResourceDeleteSelectiveParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + def retry( + self, + droplet_id: int, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + If the status of a request to destroy a Droplet with its associated resources + reported any errors, it can be retried by sending a POST request to the + `/v2/droplets/$DROPLET_ID/destroy_with_associated_resources/retry` endpoint. + + Only one destroy can be active at a time per Droplet. If a retry is issued while + another destroy is in progress for the Droplet a 409 status code will be + returned. A successful response will include a 202 response code and no content. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._post( + f"/v2/droplets/{droplet_id}/destroy_with_associated_resources/retry" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/destroy_with_associated_resources/retry", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class AsyncDestroyWithAssociatedResourcesResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncDestroyWithAssociatedResourcesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncDestroyWithAssociatedResourcesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncDestroyWithAssociatedResourcesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncDestroyWithAssociatedResourcesResourceWithStreamingResponse(self) + + async def list( + self, + droplet_id: int, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> DestroyWithAssociatedResourceListResponse: + """ + To list the associated billable resources that can be destroyed along with a + Droplet, send a GET request to the + `/v2/droplets/$DROPLET_ID/destroy_with_associated_resources` endpoint. + + This endpoint will only return resources that you are authorized to see. For + example, to see associated Reserved IPs, include the `reserved_ip:read` scope. + + The response will be a JSON object containing `snapshots`, `volumes`, and + `volume_snapshots` keys. Each will be set to an array of objects containing + information about the associated resources. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + f"/v2/droplets/{droplet_id}/destroy_with_associated_resources" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/destroy_with_associated_resources", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=DestroyWithAssociatedResourceListResponse, + ) + + async def check_status( + self, + droplet_id: int, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> DestroyWithAssociatedResourceCheckStatusResponse: + """ + To check on the status of a request to destroy a Droplet with its associated + resources, send a GET request to the + `/v2/droplets/$DROPLET_ID/destroy_with_associated_resources/status` endpoint. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + f"/v2/droplets/{droplet_id}/destroy_with_associated_resources/status" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/destroy_with_associated_resources/status", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=DestroyWithAssociatedResourceCheckStatusResponse, + ) + + async def delete_dangerous( + self, + droplet_id: int, + *, + x_dangerous: bool, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To destroy a Droplet along with all of its associated resources, send a DELETE + request to the + `/v2/droplets/$DROPLET_ID/destroy_with_associated_resources/dangerous` endpoint. + The headers of this request must include an `X-Dangerous` key set to `true`. To + preview which resources will be destroyed, first query the Droplet's associated + resources. This operation _can not_ be reverse and should be used with caution. + + A successful response will include a 202 response code and no content. Use the + status endpoint to check on the success or failure of the destruction of the + individual resources. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + extra_headers.update({"X-Dangerous": ("true" if x_dangerous else "false")}) + return await self._delete( + f"/v2/droplets/{droplet_id}/destroy_with_associated_resources/dangerous" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/destroy_with_associated_resources/dangerous", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + async def delete_selective( + self, + droplet_id: int, + *, + floating_ips: List[str] | NotGiven = NOT_GIVEN, + reserved_ips: List[str] | NotGiven = NOT_GIVEN, + snapshots: List[str] | NotGiven = NOT_GIVEN, + volume_snapshots: List[str] | NotGiven = NOT_GIVEN, + volumes: List[str] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To destroy a Droplet along with a sub-set of its associated resources, send a + DELETE request to the + `/v2/droplets/$DROPLET_ID/destroy_with_associated_resources/selective` endpoint. + The JSON body of the request should include `reserved_ips`, `snapshots`, + `volumes`, or `volume_snapshots` keys each set to an array of IDs for the + associated resources to be destroyed. The IDs can be found by querying the + Droplet's associated resources. Any associated resource not included in the + request will remain and continue to accrue changes on your account. + + A successful response will include a 202 response code and no content. Use the + status endpoint to check on the success or failure of the destruction of the + individual resources. + + Args: + floating_ips: An array of unique identifiers for the floating IPs to be scheduled for + deletion. + + reserved_ips: An array of unique identifiers for the reserved IPs to be scheduled for + deletion. + + snapshots: An array of unique identifiers for the snapshots to be scheduled for deletion. + + volume_snapshots: An array of unique identifiers for the volume snapshots to be scheduled for + deletion. + + volumes: An array of unique identifiers for the volumes to be scheduled for deletion. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._delete( + f"/v2/droplets/{droplet_id}/destroy_with_associated_resources/selective" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/destroy_with_associated_resources/selective", + body=await async_maybe_transform( + { + "floating_ips": floating_ips, + "reserved_ips": reserved_ips, + "snapshots": snapshots, + "volume_snapshots": volume_snapshots, + "volumes": volumes, + }, + destroy_with_associated_resource_delete_selective_params.DestroyWithAssociatedResourceDeleteSelectiveParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + async def retry( + self, + droplet_id: int, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + If the status of a request to destroy a Droplet with its associated resources + reported any errors, it can be retried by sending a POST request to the + `/v2/droplets/$DROPLET_ID/destroy_with_associated_resources/retry` endpoint. + + Only one destroy can be active at a time per Droplet. If a retry is issued while + another destroy is in progress for the Droplet a 409 status code will be + returned. A successful response will include a 202 response code and no content. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._post( + f"/v2/droplets/{droplet_id}/destroy_with_associated_resources/retry" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/destroy_with_associated_resources/retry", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class DestroyWithAssociatedResourcesResourceWithRawResponse: + def __init__(self, destroy_with_associated_resources: DestroyWithAssociatedResourcesResource) -> None: + self._destroy_with_associated_resources = destroy_with_associated_resources + + self.list = to_raw_response_wrapper( + destroy_with_associated_resources.list, + ) + self.check_status = to_raw_response_wrapper( + destroy_with_associated_resources.check_status, + ) + self.delete_dangerous = to_raw_response_wrapper( + destroy_with_associated_resources.delete_dangerous, + ) + self.delete_selective = to_raw_response_wrapper( + destroy_with_associated_resources.delete_selective, + ) + self.retry = to_raw_response_wrapper( + destroy_with_associated_resources.retry, + ) + + +class AsyncDestroyWithAssociatedResourcesResourceWithRawResponse: + def __init__(self, destroy_with_associated_resources: AsyncDestroyWithAssociatedResourcesResource) -> None: + self._destroy_with_associated_resources = destroy_with_associated_resources + + self.list = async_to_raw_response_wrapper( + destroy_with_associated_resources.list, + ) + self.check_status = async_to_raw_response_wrapper( + destroy_with_associated_resources.check_status, + ) + self.delete_dangerous = async_to_raw_response_wrapper( + destroy_with_associated_resources.delete_dangerous, + ) + self.delete_selective = async_to_raw_response_wrapper( + destroy_with_associated_resources.delete_selective, + ) + self.retry = async_to_raw_response_wrapper( + destroy_with_associated_resources.retry, + ) + + +class DestroyWithAssociatedResourcesResourceWithStreamingResponse: + def __init__(self, destroy_with_associated_resources: DestroyWithAssociatedResourcesResource) -> None: + self._destroy_with_associated_resources = destroy_with_associated_resources + + self.list = to_streamed_response_wrapper( + destroy_with_associated_resources.list, + ) + self.check_status = to_streamed_response_wrapper( + destroy_with_associated_resources.check_status, + ) + self.delete_dangerous = to_streamed_response_wrapper( + destroy_with_associated_resources.delete_dangerous, + ) + self.delete_selective = to_streamed_response_wrapper( + destroy_with_associated_resources.delete_selective, + ) + self.retry = to_streamed_response_wrapper( + destroy_with_associated_resources.retry, + ) + + +class AsyncDestroyWithAssociatedResourcesResourceWithStreamingResponse: + def __init__(self, destroy_with_associated_resources: AsyncDestroyWithAssociatedResourcesResource) -> None: + self._destroy_with_associated_resources = destroy_with_associated_resources + + self.list = async_to_streamed_response_wrapper( + destroy_with_associated_resources.list, + ) + self.check_status = async_to_streamed_response_wrapper( + destroy_with_associated_resources.check_status, + ) + self.delete_dangerous = async_to_streamed_response_wrapper( + destroy_with_associated_resources.delete_dangerous, + ) + self.delete_selective = async_to_streamed_response_wrapper( + destroy_with_associated_resources.delete_selective, + ) + self.retry = async_to_streamed_response_wrapper( + destroy_with_associated_resources.retry, + ) diff --git a/src/gradientai/resources/gpu_droplets/firewalls/__init__.py b/src/gradientai/resources/gpu_droplets/firewalls/__init__.py new file mode 100644 index 00000000..e9cb832f --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/firewalls/__init__.py @@ -0,0 +1,61 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .tags import ( + TagsResource, + AsyncTagsResource, + TagsResourceWithRawResponse, + AsyncTagsResourceWithRawResponse, + TagsResourceWithStreamingResponse, + AsyncTagsResourceWithStreamingResponse, +) +from .rules import ( + RulesResource, + AsyncRulesResource, + RulesResourceWithRawResponse, + AsyncRulesResourceWithRawResponse, + RulesResourceWithStreamingResponse, + AsyncRulesResourceWithStreamingResponse, +) +from .droplets import ( + DropletsResource, + AsyncDropletsResource, + DropletsResourceWithRawResponse, + AsyncDropletsResourceWithRawResponse, + DropletsResourceWithStreamingResponse, + AsyncDropletsResourceWithStreamingResponse, +) +from .firewalls import ( + FirewallsResource, + AsyncFirewallsResource, + FirewallsResourceWithRawResponse, + AsyncFirewallsResourceWithRawResponse, + FirewallsResourceWithStreamingResponse, + AsyncFirewallsResourceWithStreamingResponse, +) + +__all__ = [ + "DropletsResource", + "AsyncDropletsResource", + "DropletsResourceWithRawResponse", + "AsyncDropletsResourceWithRawResponse", + "DropletsResourceWithStreamingResponse", + "AsyncDropletsResourceWithStreamingResponse", + "TagsResource", + "AsyncTagsResource", + "TagsResourceWithRawResponse", + "AsyncTagsResourceWithRawResponse", + "TagsResourceWithStreamingResponse", + "AsyncTagsResourceWithStreamingResponse", + "RulesResource", + "AsyncRulesResource", + "RulesResourceWithRawResponse", + "AsyncRulesResourceWithRawResponse", + "RulesResourceWithStreamingResponse", + "AsyncRulesResourceWithStreamingResponse", + "FirewallsResource", + "AsyncFirewallsResource", + "FirewallsResourceWithRawResponse", + "AsyncFirewallsResourceWithRawResponse", + "FirewallsResourceWithStreamingResponse", + "AsyncFirewallsResourceWithStreamingResponse", +] diff --git a/src/gradientai/resources/gpu_droplets/firewalls/droplets.py b/src/gradientai/resources/gpu_droplets/firewalls/droplets.py new file mode 100644 index 00000000..025d1ba4 --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/firewalls/droplets.py @@ -0,0 +1,296 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.gpu_droplets.firewalls import droplet_add_params, droplet_remove_params + +__all__ = ["DropletsResource", "AsyncDropletsResource"] + + +class DropletsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> DropletsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return DropletsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> DropletsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return DropletsResourceWithStreamingResponse(self) + + def add( + self, + firewall_id: str, + *, + droplet_ids: Iterable[int], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To assign a Droplet to a firewall, send a POST request to + `/v2/firewalls/$FIREWALL_ID/droplets`. In the body of the request, there should + be a `droplet_ids` attribute containing a list of Droplet IDs. + + No response body will be sent back, but the response code will indicate success. + Specifically, the response code will be a 204, which means that the action was + successful with no returned body data. + + Args: + droplet_ids: An array containing the IDs of the Droplets to be assigned to the firewall. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not firewall_id: + raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._post( + f"/v2/firewalls/{firewall_id}/droplets" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}/droplets", + body=maybe_transform({"droplet_ids": droplet_ids}, droplet_add_params.DropletAddParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + def remove( + self, + firewall_id: str, + *, + droplet_ids: Iterable[int], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To remove a Droplet from a firewall, send a DELETE request to + `/v2/firewalls/$FIREWALL_ID/droplets`. In the body of the request, there should + be a `droplet_ids` attribute containing a list of Droplet IDs. + + No response body will be sent back, but the response code will indicate success. + Specifically, the response code will be a 204, which means that the action was + successful with no returned body data. + + Args: + droplet_ids: An array containing the IDs of the Droplets to be removed from the firewall. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not firewall_id: + raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._delete( + f"/v2/firewalls/{firewall_id}/droplets" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}/droplets", + body=maybe_transform({"droplet_ids": droplet_ids}, droplet_remove_params.DropletRemoveParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class AsyncDropletsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncDropletsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncDropletsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncDropletsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncDropletsResourceWithStreamingResponse(self) + + async def add( + self, + firewall_id: str, + *, + droplet_ids: Iterable[int], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To assign a Droplet to a firewall, send a POST request to + `/v2/firewalls/$FIREWALL_ID/droplets`. In the body of the request, there should + be a `droplet_ids` attribute containing a list of Droplet IDs. + + No response body will be sent back, but the response code will indicate success. + Specifically, the response code will be a 204, which means that the action was + successful with no returned body data. + + Args: + droplet_ids: An array containing the IDs of the Droplets to be assigned to the firewall. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not firewall_id: + raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._post( + f"/v2/firewalls/{firewall_id}/droplets" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}/droplets", + body=await async_maybe_transform({"droplet_ids": droplet_ids}, droplet_add_params.DropletAddParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + async def remove( + self, + firewall_id: str, + *, + droplet_ids: Iterable[int], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To remove a Droplet from a firewall, send a DELETE request to + `/v2/firewalls/$FIREWALL_ID/droplets`. In the body of the request, there should + be a `droplet_ids` attribute containing a list of Droplet IDs. + + No response body will be sent back, but the response code will indicate success. + Specifically, the response code will be a 204, which means that the action was + successful with no returned body data. + + Args: + droplet_ids: An array containing the IDs of the Droplets to be removed from the firewall. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not firewall_id: + raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._delete( + f"/v2/firewalls/{firewall_id}/droplets" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}/droplets", + body=await async_maybe_transform({"droplet_ids": droplet_ids}, droplet_remove_params.DropletRemoveParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class DropletsResourceWithRawResponse: + def __init__(self, droplets: DropletsResource) -> None: + self._droplets = droplets + + self.add = to_raw_response_wrapper( + droplets.add, + ) + self.remove = to_raw_response_wrapper( + droplets.remove, + ) + + +class AsyncDropletsResourceWithRawResponse: + def __init__(self, droplets: AsyncDropletsResource) -> None: + self._droplets = droplets + + self.add = async_to_raw_response_wrapper( + droplets.add, + ) + self.remove = async_to_raw_response_wrapper( + droplets.remove, + ) + + +class DropletsResourceWithStreamingResponse: + def __init__(self, droplets: DropletsResource) -> None: + self._droplets = droplets + + self.add = to_streamed_response_wrapper( + droplets.add, + ) + self.remove = to_streamed_response_wrapper( + droplets.remove, + ) + + +class AsyncDropletsResourceWithStreamingResponse: + def __init__(self, droplets: AsyncDropletsResource) -> None: + self._droplets = droplets + + self.add = async_to_streamed_response_wrapper( + droplets.add, + ) + self.remove = async_to_streamed_response_wrapper( + droplets.remove, + ) diff --git a/src/gradientai/resources/gpu_droplets/firewalls/firewalls.py b/src/gradientai/resources/gpu_droplets/firewalls/firewalls.py new file mode 100644 index 00000000..a6c21928 --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/firewalls/firewalls.py @@ -0,0 +1,647 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from .tags import ( + TagsResource, + AsyncTagsResource, + TagsResourceWithRawResponse, + AsyncTagsResourceWithRawResponse, + TagsResourceWithStreamingResponse, + AsyncTagsResourceWithStreamingResponse, +) +from .rules import ( + RulesResource, + AsyncRulesResource, + RulesResourceWithRawResponse, + AsyncRulesResourceWithRawResponse, + RulesResourceWithStreamingResponse, + AsyncRulesResourceWithStreamingResponse, +) +from .droplets import ( + DropletsResource, + AsyncDropletsResource, + DropletsResourceWithRawResponse, + AsyncDropletsResourceWithRawResponse, + DropletsResourceWithStreamingResponse, + AsyncDropletsResourceWithStreamingResponse, +) +from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.gpu_droplets import firewall_list_params, firewall_create_params, firewall_update_params +from ....types.gpu_droplets.firewall_param import FirewallParam +from ....types.gpu_droplets.firewall_list_response import FirewallListResponse +from ....types.gpu_droplets.firewall_create_response import FirewallCreateResponse +from ....types.gpu_droplets.firewall_update_response import FirewallUpdateResponse +from ....types.gpu_droplets.firewall_retrieve_response import FirewallRetrieveResponse + +__all__ = ["FirewallsResource", "AsyncFirewallsResource"] + + +class FirewallsResource(SyncAPIResource): + @cached_property + def droplets(self) -> DropletsResource: + return DropletsResource(self._client) + + @cached_property + def tags(self) -> TagsResource: + return TagsResource(self._client) + + @cached_property + def rules(self) -> RulesResource: + return RulesResource(self._client) + + @cached_property + def with_raw_response(self) -> FirewallsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return FirewallsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> FirewallsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return FirewallsResourceWithStreamingResponse(self) + + def create( + self, + *, + body: firewall_create_params.Body | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FirewallCreateResponse: + """To create a new firewall, send a POST request to `/v2/firewalls`. + + The request + must contain at least one inbound or outbound access rule. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v2/firewalls" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/firewalls", + body=maybe_transform(body, firewall_create_params.FirewallCreateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FirewallCreateResponse, + ) + + def retrieve( + self, + firewall_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FirewallRetrieveResponse: + """ + To show information about an existing firewall, send a GET request to + `/v2/firewalls/$FIREWALL_ID`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not firewall_id: + raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") + return self._get( + f"/v2/firewalls/{firewall_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FirewallRetrieveResponse, + ) + + def update( + self, + firewall_id: str, + *, + firewall: FirewallParam, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FirewallUpdateResponse: + """ + To update the configuration of an existing firewall, send a PUT request to + `/v2/firewalls/$FIREWALL_ID`. The request should contain a full representation + of the firewall including existing attributes. **Note that any attributes that + are not provided will be reset to their default values.** + + You must have read access (e.g. `droplet:read`) to all resources attached to the + firewall to successfully update the firewall. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not firewall_id: + raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") + return self._put( + f"/v2/firewalls/{firewall_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}", + body=maybe_transform(firewall, firewall_update_params.FirewallUpdateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FirewallUpdateResponse, + ) + + def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FirewallListResponse: + """ + To list all of the firewalls available on your account, send a GET request to + `/v2/firewalls`. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/firewalls" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/firewalls", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + firewall_list_params.FirewallListParams, + ), + ), + cast_to=FirewallListResponse, + ) + + def delete( + self, + firewall_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To delete a firewall send a DELETE request to `/v2/firewalls/$FIREWALL_ID`. + + No response body will be sent back, but the response code will indicate success. + Specifically, the response code will be a 204, which means that the action was + successful with no returned body data. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not firewall_id: + raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._delete( + f"/v2/firewalls/{firewall_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class AsyncFirewallsResource(AsyncAPIResource): + @cached_property + def droplets(self) -> AsyncDropletsResource: + return AsyncDropletsResource(self._client) + + @cached_property + def tags(self) -> AsyncTagsResource: + return AsyncTagsResource(self._client) + + @cached_property + def rules(self) -> AsyncRulesResource: + return AsyncRulesResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncFirewallsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncFirewallsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncFirewallsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncFirewallsResourceWithStreamingResponse(self) + + async def create( + self, + *, + body: firewall_create_params.Body | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FirewallCreateResponse: + """To create a new firewall, send a POST request to `/v2/firewalls`. + + The request + must contain at least one inbound or outbound access rule. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v2/firewalls" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/firewalls", + body=await async_maybe_transform(body, firewall_create_params.FirewallCreateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FirewallCreateResponse, + ) + + async def retrieve( + self, + firewall_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FirewallRetrieveResponse: + """ + To show information about an existing firewall, send a GET request to + `/v2/firewalls/$FIREWALL_ID`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not firewall_id: + raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") + return await self._get( + f"/v2/firewalls/{firewall_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FirewallRetrieveResponse, + ) + + async def update( + self, + firewall_id: str, + *, + firewall: FirewallParam, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FirewallUpdateResponse: + """ + To update the configuration of an existing firewall, send a PUT request to + `/v2/firewalls/$FIREWALL_ID`. The request should contain a full representation + of the firewall including existing attributes. **Note that any attributes that + are not provided will be reset to their default values.** + + You must have read access (e.g. `droplet:read`) to all resources attached to the + firewall to successfully update the firewall. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not firewall_id: + raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") + return await self._put( + f"/v2/firewalls/{firewall_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}", + body=await async_maybe_transform(firewall, firewall_update_params.FirewallUpdateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FirewallUpdateResponse, + ) + + async def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FirewallListResponse: + """ + To list all of the firewalls available on your account, send a GET request to + `/v2/firewalls`. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/firewalls" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/firewalls", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + firewall_list_params.FirewallListParams, + ), + ), + cast_to=FirewallListResponse, + ) + + async def delete( + self, + firewall_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To delete a firewall send a DELETE request to `/v2/firewalls/$FIREWALL_ID`. + + No response body will be sent back, but the response code will indicate success. + Specifically, the response code will be a 204, which means that the action was + successful with no returned body data. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not firewall_id: + raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._delete( + f"/v2/firewalls/{firewall_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class FirewallsResourceWithRawResponse: + def __init__(self, firewalls: FirewallsResource) -> None: + self._firewalls = firewalls + + self.create = to_raw_response_wrapper( + firewalls.create, + ) + self.retrieve = to_raw_response_wrapper( + firewalls.retrieve, + ) + self.update = to_raw_response_wrapper( + firewalls.update, + ) + self.list = to_raw_response_wrapper( + firewalls.list, + ) + self.delete = to_raw_response_wrapper( + firewalls.delete, + ) + + @cached_property + def droplets(self) -> DropletsResourceWithRawResponse: + return DropletsResourceWithRawResponse(self._firewalls.droplets) + + @cached_property + def tags(self) -> TagsResourceWithRawResponse: + return TagsResourceWithRawResponse(self._firewalls.tags) + + @cached_property + def rules(self) -> RulesResourceWithRawResponse: + return RulesResourceWithRawResponse(self._firewalls.rules) + + +class AsyncFirewallsResourceWithRawResponse: + def __init__(self, firewalls: AsyncFirewallsResource) -> None: + self._firewalls = firewalls + + self.create = async_to_raw_response_wrapper( + firewalls.create, + ) + self.retrieve = async_to_raw_response_wrapper( + firewalls.retrieve, + ) + self.update = async_to_raw_response_wrapper( + firewalls.update, + ) + self.list = async_to_raw_response_wrapper( + firewalls.list, + ) + self.delete = async_to_raw_response_wrapper( + firewalls.delete, + ) + + @cached_property + def droplets(self) -> AsyncDropletsResourceWithRawResponse: + return AsyncDropletsResourceWithRawResponse(self._firewalls.droplets) + + @cached_property + def tags(self) -> AsyncTagsResourceWithRawResponse: + return AsyncTagsResourceWithRawResponse(self._firewalls.tags) + + @cached_property + def rules(self) -> AsyncRulesResourceWithRawResponse: + return AsyncRulesResourceWithRawResponse(self._firewalls.rules) + + +class FirewallsResourceWithStreamingResponse: + def __init__(self, firewalls: FirewallsResource) -> None: + self._firewalls = firewalls + + self.create = to_streamed_response_wrapper( + firewalls.create, + ) + self.retrieve = to_streamed_response_wrapper( + firewalls.retrieve, + ) + self.update = to_streamed_response_wrapper( + firewalls.update, + ) + self.list = to_streamed_response_wrapper( + firewalls.list, + ) + self.delete = to_streamed_response_wrapper( + firewalls.delete, + ) + + @cached_property + def droplets(self) -> DropletsResourceWithStreamingResponse: + return DropletsResourceWithStreamingResponse(self._firewalls.droplets) + + @cached_property + def tags(self) -> TagsResourceWithStreamingResponse: + return TagsResourceWithStreamingResponse(self._firewalls.tags) + + @cached_property + def rules(self) -> RulesResourceWithStreamingResponse: + return RulesResourceWithStreamingResponse(self._firewalls.rules) + + +class AsyncFirewallsResourceWithStreamingResponse: + def __init__(self, firewalls: AsyncFirewallsResource) -> None: + self._firewalls = firewalls + + self.create = async_to_streamed_response_wrapper( + firewalls.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + firewalls.retrieve, + ) + self.update = async_to_streamed_response_wrapper( + firewalls.update, + ) + self.list = async_to_streamed_response_wrapper( + firewalls.list, + ) + self.delete = async_to_streamed_response_wrapper( + firewalls.delete, + ) + + @cached_property + def droplets(self) -> AsyncDropletsResourceWithStreamingResponse: + return AsyncDropletsResourceWithStreamingResponse(self._firewalls.droplets) + + @cached_property + def tags(self) -> AsyncTagsResourceWithStreamingResponse: + return AsyncTagsResourceWithStreamingResponse(self._firewalls.tags) + + @cached_property + def rules(self) -> AsyncRulesResourceWithStreamingResponse: + return AsyncRulesResourceWithStreamingResponse(self._firewalls.rules) diff --git a/src/gradientai/resources/gpu_droplets/firewalls/rules.py b/src/gradientai/resources/gpu_droplets/firewalls/rules.py new file mode 100644 index 00000000..61026779 --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/firewalls/rules.py @@ -0,0 +1,320 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable, Optional + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.gpu_droplets.firewalls import rule_add_params, rule_remove_params + +__all__ = ["RulesResource", "AsyncRulesResource"] + + +class RulesResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> RulesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return RulesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> RulesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return RulesResourceWithStreamingResponse(self) + + def add( + self, + firewall_id: str, + *, + inbound_rules: Optional[Iterable[rule_add_params.InboundRule]] | NotGiven = NOT_GIVEN, + outbound_rules: Optional[Iterable[rule_add_params.OutboundRule]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To add additional access rules to a firewall, send a POST request to + `/v2/firewalls/$FIREWALL_ID/rules`. The body of the request may include an + inbound_rules and/or outbound_rules attribute containing an array of rules to be + added. + + No response body will be sent back, but the response code will indicate success. + Specifically, the response code will be a 204, which means that the action was + successful with no returned body data. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not firewall_id: + raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._post( + f"/v2/firewalls/{firewall_id}/rules" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}/rules", + body=maybe_transform( + { + "inbound_rules": inbound_rules, + "outbound_rules": outbound_rules, + }, + rule_add_params.RuleAddParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + def remove( + self, + firewall_id: str, + *, + inbound_rules: Optional[Iterable[rule_remove_params.InboundRule]] | NotGiven = NOT_GIVEN, + outbound_rules: Optional[Iterable[rule_remove_params.OutboundRule]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To remove access rules from a firewall, send a DELETE request to + `/v2/firewalls/$FIREWALL_ID/rules`. The body of the request may include an + `inbound_rules` and/or `outbound_rules` attribute containing an array of rules + to be removed. + + No response body will be sent back, but the response code will indicate success. + Specifically, the response code will be a 204, which means that the action was + successful with no returned body data. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not firewall_id: + raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._delete( + f"/v2/firewalls/{firewall_id}/rules" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}/rules", + body=maybe_transform( + { + "inbound_rules": inbound_rules, + "outbound_rules": outbound_rules, + }, + rule_remove_params.RuleRemoveParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class AsyncRulesResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncRulesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncRulesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncRulesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncRulesResourceWithStreamingResponse(self) + + async def add( + self, + firewall_id: str, + *, + inbound_rules: Optional[Iterable[rule_add_params.InboundRule]] | NotGiven = NOT_GIVEN, + outbound_rules: Optional[Iterable[rule_add_params.OutboundRule]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To add additional access rules to a firewall, send a POST request to + `/v2/firewalls/$FIREWALL_ID/rules`. The body of the request may include an + inbound_rules and/or outbound_rules attribute containing an array of rules to be + added. + + No response body will be sent back, but the response code will indicate success. + Specifically, the response code will be a 204, which means that the action was + successful with no returned body data. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not firewall_id: + raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._post( + f"/v2/firewalls/{firewall_id}/rules" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}/rules", + body=await async_maybe_transform( + { + "inbound_rules": inbound_rules, + "outbound_rules": outbound_rules, + }, + rule_add_params.RuleAddParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + async def remove( + self, + firewall_id: str, + *, + inbound_rules: Optional[Iterable[rule_remove_params.InboundRule]] | NotGiven = NOT_GIVEN, + outbound_rules: Optional[Iterable[rule_remove_params.OutboundRule]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To remove access rules from a firewall, send a DELETE request to + `/v2/firewalls/$FIREWALL_ID/rules`. The body of the request may include an + `inbound_rules` and/or `outbound_rules` attribute containing an array of rules + to be removed. + + No response body will be sent back, but the response code will indicate success. + Specifically, the response code will be a 204, which means that the action was + successful with no returned body data. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not firewall_id: + raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._delete( + f"/v2/firewalls/{firewall_id}/rules" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}/rules", + body=await async_maybe_transform( + { + "inbound_rules": inbound_rules, + "outbound_rules": outbound_rules, + }, + rule_remove_params.RuleRemoveParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class RulesResourceWithRawResponse: + def __init__(self, rules: RulesResource) -> None: + self._rules = rules + + self.add = to_raw_response_wrapper( + rules.add, + ) + self.remove = to_raw_response_wrapper( + rules.remove, + ) + + +class AsyncRulesResourceWithRawResponse: + def __init__(self, rules: AsyncRulesResource) -> None: + self._rules = rules + + self.add = async_to_raw_response_wrapper( + rules.add, + ) + self.remove = async_to_raw_response_wrapper( + rules.remove, + ) + + +class RulesResourceWithStreamingResponse: + def __init__(self, rules: RulesResource) -> None: + self._rules = rules + + self.add = to_streamed_response_wrapper( + rules.add, + ) + self.remove = to_streamed_response_wrapper( + rules.remove, + ) + + +class AsyncRulesResourceWithStreamingResponse: + def __init__(self, rules: AsyncRulesResource) -> None: + self._rules = rules + + self.add = async_to_streamed_response_wrapper( + rules.add, + ) + self.remove = async_to_streamed_response_wrapper( + rules.remove, + ) diff --git a/src/gradientai/resources/gpu_droplets/firewalls/tags.py b/src/gradientai/resources/gpu_droplets/firewalls/tags.py new file mode 100644 index 00000000..725bc014 --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/firewalls/tags.py @@ -0,0 +1,308 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Optional + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.gpu_droplets.firewalls import tag_add_params, tag_remove_params + +__all__ = ["TagsResource", "AsyncTagsResource"] + + +class TagsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> TagsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return TagsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> TagsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return TagsResourceWithStreamingResponse(self) + + def add( + self, + firewall_id: str, + *, + tags: Optional[List[str]], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To assign a tag representing a group of Droplets to a firewall, send a POST + request to `/v2/firewalls/$FIREWALL_ID/tags`. In the body of the request, there + should be a `tags` attribute containing a list of tag names. + + No response body will be sent back, but the response code will indicate success. + Specifically, the response code will be a 204, which means that the action was + successful with no returned body data. + + Args: + tags: A flat array of tag names as strings to be applied to the resource. Tag names + must exist in order to be referenced in a request. + + Requires `tag:create` and `tag:read` scopes. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not firewall_id: + raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._post( + f"/v2/firewalls/{firewall_id}/tags" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}/tags", + body=maybe_transform({"tags": tags}, tag_add_params.TagAddParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + def remove( + self, + firewall_id: str, + *, + tags: Optional[List[str]], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To remove a tag representing a group of Droplets from a firewall, send a DELETE + request to `/v2/firewalls/$FIREWALL_ID/tags`. In the body of the request, there + should be a `tags` attribute containing a list of tag names. + + No response body will be sent back, but the response code will indicate success. + Specifically, the response code will be a 204, which means that the action was + successful with no returned body data. + + Args: + tags: A flat array of tag names as strings to be applied to the resource. Tag names + must exist in order to be referenced in a request. + + Requires `tag:create` and `tag:read` scopes. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not firewall_id: + raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._delete( + f"/v2/firewalls/{firewall_id}/tags" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}/tags", + body=maybe_transform({"tags": tags}, tag_remove_params.TagRemoveParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class AsyncTagsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncTagsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncTagsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncTagsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncTagsResourceWithStreamingResponse(self) + + async def add( + self, + firewall_id: str, + *, + tags: Optional[List[str]], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To assign a tag representing a group of Droplets to a firewall, send a POST + request to `/v2/firewalls/$FIREWALL_ID/tags`. In the body of the request, there + should be a `tags` attribute containing a list of tag names. + + No response body will be sent back, but the response code will indicate success. + Specifically, the response code will be a 204, which means that the action was + successful with no returned body data. + + Args: + tags: A flat array of tag names as strings to be applied to the resource. Tag names + must exist in order to be referenced in a request. + + Requires `tag:create` and `tag:read` scopes. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not firewall_id: + raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._post( + f"/v2/firewalls/{firewall_id}/tags" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}/tags", + body=await async_maybe_transform({"tags": tags}, tag_add_params.TagAddParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + async def remove( + self, + firewall_id: str, + *, + tags: Optional[List[str]], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To remove a tag representing a group of Droplets from a firewall, send a DELETE + request to `/v2/firewalls/$FIREWALL_ID/tags`. In the body of the request, there + should be a `tags` attribute containing a list of tag names. + + No response body will be sent back, but the response code will indicate success. + Specifically, the response code will be a 204, which means that the action was + successful with no returned body data. + + Args: + tags: A flat array of tag names as strings to be applied to the resource. Tag names + must exist in order to be referenced in a request. + + Requires `tag:create` and `tag:read` scopes. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not firewall_id: + raise ValueError(f"Expected a non-empty value for `firewall_id` but received {firewall_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._delete( + f"/v2/firewalls/{firewall_id}/tags" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/firewalls/{firewall_id}/tags", + body=await async_maybe_transform({"tags": tags}, tag_remove_params.TagRemoveParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class TagsResourceWithRawResponse: + def __init__(self, tags: TagsResource) -> None: + self._tags = tags + + self.add = to_raw_response_wrapper( + tags.add, + ) + self.remove = to_raw_response_wrapper( + tags.remove, + ) + + +class AsyncTagsResourceWithRawResponse: + def __init__(self, tags: AsyncTagsResource) -> None: + self._tags = tags + + self.add = async_to_raw_response_wrapper( + tags.add, + ) + self.remove = async_to_raw_response_wrapper( + tags.remove, + ) + + +class TagsResourceWithStreamingResponse: + def __init__(self, tags: TagsResource) -> None: + self._tags = tags + + self.add = to_streamed_response_wrapper( + tags.add, + ) + self.remove = to_streamed_response_wrapper( + tags.remove, + ) + + +class AsyncTagsResourceWithStreamingResponse: + def __init__(self, tags: AsyncTagsResource) -> None: + self._tags = tags + + self.add = async_to_streamed_response_wrapper( + tags.add, + ) + self.remove = async_to_streamed_response_wrapper( + tags.remove, + ) diff --git a/src/gradientai/resources/gpu_droplets/floating_ips/__init__.py b/src/gradientai/resources/gpu_droplets/floating_ips/__init__.py new file mode 100644 index 00000000..bf6871b1 --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/floating_ips/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .actions import ( + ActionsResource, + AsyncActionsResource, + ActionsResourceWithRawResponse, + AsyncActionsResourceWithRawResponse, + ActionsResourceWithStreamingResponse, + AsyncActionsResourceWithStreamingResponse, +) +from .floating_ips import ( + FloatingIPsResource, + AsyncFloatingIPsResource, + FloatingIPsResourceWithRawResponse, + AsyncFloatingIPsResourceWithRawResponse, + FloatingIPsResourceWithStreamingResponse, + AsyncFloatingIPsResourceWithStreamingResponse, +) + +__all__ = [ + "ActionsResource", + "AsyncActionsResource", + "ActionsResourceWithRawResponse", + "AsyncActionsResourceWithRawResponse", + "ActionsResourceWithStreamingResponse", + "AsyncActionsResourceWithStreamingResponse", + "FloatingIPsResource", + "AsyncFloatingIPsResource", + "FloatingIPsResourceWithRawResponse", + "AsyncFloatingIPsResourceWithRawResponse", + "FloatingIPsResourceWithStreamingResponse", + "AsyncFloatingIPsResourceWithStreamingResponse", +] diff --git a/src/gradientai/resources/gpu_droplets/floating_ips/actions.py b/src/gradientai/resources/gpu_droplets/floating_ips/actions.py new file mode 100644 index 00000000..7ba3899d --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/floating_ips/actions.py @@ -0,0 +1,489 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, overload + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import required_args, maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.gpu_droplets.floating_ips import action_create_params +from ....types.gpu_droplets.floating_ips.action_list_response import ActionListResponse +from ....types.gpu_droplets.floating_ips.action_create_response import ActionCreateResponse +from ....types.gpu_droplets.floating_ips.action_retrieve_response import ActionRetrieveResponse + +__all__ = ["ActionsResource", "AsyncActionsResource"] + + +class ActionsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> ActionsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return ActionsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ActionsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return ActionsResourceWithStreamingResponse(self) + + @overload + def create( + self, + floating_ip: str, + *, + type: Literal["assign", "unassign"], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionCreateResponse: + """ + To initiate an action on a floating IP send a POST request to + `/v2/floating_ips/$FLOATING_IP/actions`. In the JSON body to the request, set + the `type` attribute to on of the supported action types: + + | Action | Details | + | ---------- | ------------------------------------- | + | `assign` | Assigns a floating IP to a Droplet | + | `unassign` | Unassign a floating IP from a Droplet | + + Args: + type: The type of action to initiate for the floating IP. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def create( + self, + floating_ip: str, + *, + droplet_id: int, + type: Literal["assign", "unassign"], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionCreateResponse: + """ + To initiate an action on a floating IP send a POST request to + `/v2/floating_ips/$FLOATING_IP/actions`. In the JSON body to the request, set + the `type` attribute to on of the supported action types: + + | Action | Details | + | ---------- | ------------------------------------- | + | `assign` | Assigns a floating IP to a Droplet | + | `unassign` | Unassign a floating IP from a Droplet | + + Args: + droplet_id: The ID of the Droplet that the floating IP will be assigned to. + + type: The type of action to initiate for the floating IP. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["type"], ["droplet_id", "type"]) + def create( + self, + floating_ip: str, + *, + type: Literal["assign", "unassign"], + droplet_id: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionCreateResponse: + if not floating_ip: + raise ValueError(f"Expected a non-empty value for `floating_ip` but received {floating_ip!r}") + return self._post( + f"/v2/floating_ips/{floating_ip}/actions" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/floating_ips/{floating_ip}/actions", + body=maybe_transform( + { + "type": type, + "droplet_id": droplet_id, + }, + action_create_params.ActionCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ActionCreateResponse, + ) + + def retrieve( + self, + action_id: int, + *, + floating_ip: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionRetrieveResponse: + """ + To retrieve the status of a floating IP action, send a GET request to + `/v2/floating_ips/$FLOATING_IP/actions/$ACTION_ID`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not floating_ip: + raise ValueError(f"Expected a non-empty value for `floating_ip` but received {floating_ip!r}") + return self._get( + f"/v2/floating_ips/{floating_ip}/actions/{action_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/floating_ips/{floating_ip}/actions/{action_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ActionRetrieveResponse, + ) + + def list( + self, + floating_ip: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionListResponse: + """ + To retrieve all actions that have been executed on a floating IP, send a GET + request to `/v2/floating_ips/$FLOATING_IP/actions`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not floating_ip: + raise ValueError(f"Expected a non-empty value for `floating_ip` but received {floating_ip!r}") + return self._get( + f"/v2/floating_ips/{floating_ip}/actions" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/floating_ips/{floating_ip}/actions", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ActionListResponse, + ) + + +class AsyncActionsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncActionsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncActionsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncActionsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncActionsResourceWithStreamingResponse(self) + + @overload + async def create( + self, + floating_ip: str, + *, + type: Literal["assign", "unassign"], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionCreateResponse: + """ + To initiate an action on a floating IP send a POST request to + `/v2/floating_ips/$FLOATING_IP/actions`. In the JSON body to the request, set + the `type` attribute to on of the supported action types: + + | Action | Details | + | ---------- | ------------------------------------- | + | `assign` | Assigns a floating IP to a Droplet | + | `unassign` | Unassign a floating IP from a Droplet | + + Args: + type: The type of action to initiate for the floating IP. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def create( + self, + floating_ip: str, + *, + droplet_id: int, + type: Literal["assign", "unassign"], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionCreateResponse: + """ + To initiate an action on a floating IP send a POST request to + `/v2/floating_ips/$FLOATING_IP/actions`. In the JSON body to the request, set + the `type` attribute to on of the supported action types: + + | Action | Details | + | ---------- | ------------------------------------- | + | `assign` | Assigns a floating IP to a Droplet | + | `unassign` | Unassign a floating IP from a Droplet | + + Args: + droplet_id: The ID of the Droplet that the floating IP will be assigned to. + + type: The type of action to initiate for the floating IP. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["type"], ["droplet_id", "type"]) + async def create( + self, + floating_ip: str, + *, + type: Literal["assign", "unassign"], + droplet_id: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionCreateResponse: + if not floating_ip: + raise ValueError(f"Expected a non-empty value for `floating_ip` but received {floating_ip!r}") + return await self._post( + f"/v2/floating_ips/{floating_ip}/actions" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/floating_ips/{floating_ip}/actions", + body=await async_maybe_transform( + { + "type": type, + "droplet_id": droplet_id, + }, + action_create_params.ActionCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ActionCreateResponse, + ) + + async def retrieve( + self, + action_id: int, + *, + floating_ip: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionRetrieveResponse: + """ + To retrieve the status of a floating IP action, send a GET request to + `/v2/floating_ips/$FLOATING_IP/actions/$ACTION_ID`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not floating_ip: + raise ValueError(f"Expected a non-empty value for `floating_ip` but received {floating_ip!r}") + return await self._get( + f"/v2/floating_ips/{floating_ip}/actions/{action_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/floating_ips/{floating_ip}/actions/{action_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ActionRetrieveResponse, + ) + + async def list( + self, + floating_ip: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionListResponse: + """ + To retrieve all actions that have been executed on a floating IP, send a GET + request to `/v2/floating_ips/$FLOATING_IP/actions`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not floating_ip: + raise ValueError(f"Expected a non-empty value for `floating_ip` but received {floating_ip!r}") + return await self._get( + f"/v2/floating_ips/{floating_ip}/actions" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/floating_ips/{floating_ip}/actions", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ActionListResponse, + ) + + +class ActionsResourceWithRawResponse: + def __init__(self, actions: ActionsResource) -> None: + self._actions = actions + + self.create = to_raw_response_wrapper( + actions.create, + ) + self.retrieve = to_raw_response_wrapper( + actions.retrieve, + ) + self.list = to_raw_response_wrapper( + actions.list, + ) + + +class AsyncActionsResourceWithRawResponse: + def __init__(self, actions: AsyncActionsResource) -> None: + self._actions = actions + + self.create = async_to_raw_response_wrapper( + actions.create, + ) + self.retrieve = async_to_raw_response_wrapper( + actions.retrieve, + ) + self.list = async_to_raw_response_wrapper( + actions.list, + ) + + +class ActionsResourceWithStreamingResponse: + def __init__(self, actions: ActionsResource) -> None: + self._actions = actions + + self.create = to_streamed_response_wrapper( + actions.create, + ) + self.retrieve = to_streamed_response_wrapper( + actions.retrieve, + ) + self.list = to_streamed_response_wrapper( + actions.list, + ) + + +class AsyncActionsResourceWithStreamingResponse: + def __init__(self, actions: AsyncActionsResource) -> None: + self._actions = actions + + self.create = async_to_streamed_response_wrapper( + actions.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + actions.retrieve, + ) + self.list = async_to_streamed_response_wrapper( + actions.list, + ) diff --git a/src/gradientai/resources/gpu_droplets/floating_ips/floating_ips.py b/src/gradientai/resources/gpu_droplets/floating_ips/floating_ips.py new file mode 100644 index 00000000..cabe012e --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/floating_ips/floating_ips.py @@ -0,0 +1,635 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import overload + +import httpx + +from .actions import ( + ActionsResource, + AsyncActionsResource, + ActionsResourceWithRawResponse, + AsyncActionsResourceWithRawResponse, + ActionsResourceWithStreamingResponse, + AsyncActionsResourceWithStreamingResponse, +) +from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ...._utils import required_args, maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.gpu_droplets import floating_ip_list_params, floating_ip_create_params +from ....types.gpu_droplets.floating_ip_list_response import FloatingIPListResponse +from ....types.gpu_droplets.floating_ip_create_response import FloatingIPCreateResponse +from ....types.gpu_droplets.floating_ip_retrieve_response import FloatingIPRetrieveResponse + +__all__ = ["FloatingIPsResource", "AsyncFloatingIPsResource"] + + +class FloatingIPsResource(SyncAPIResource): + @cached_property + def actions(self) -> ActionsResource: + return ActionsResource(self._client) + + @cached_property + def with_raw_response(self) -> FloatingIPsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return FloatingIPsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> FloatingIPsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return FloatingIPsResourceWithStreamingResponse(self) + + @overload + def create( + self, + *, + droplet_id: int, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FloatingIPCreateResponse: + """ + On creation, a floating IP must be either assigned to a Droplet or reserved to a + region. + + - To create a new floating IP assigned to a Droplet, send a POST request to + `/v2/floating_ips` with the `droplet_id` attribute. + + - To create a new floating IP reserved to a region, send a POST request to + `/v2/floating_ips` with the `region` attribute. + + **Note**: In addition to the standard rate limiting, only 12 floating IPs may be + created per 60 seconds. + + Args: + droplet_id: The ID of the Droplet that the floating IP will be assigned to. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def create( + self, + *, + region: str, + project_id: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FloatingIPCreateResponse: + """ + On creation, a floating IP must be either assigned to a Droplet or reserved to a + region. + + - To create a new floating IP assigned to a Droplet, send a POST request to + `/v2/floating_ips` with the `droplet_id` attribute. + + - To create a new floating IP reserved to a region, send a POST request to + `/v2/floating_ips` with the `region` attribute. + + **Note**: In addition to the standard rate limiting, only 12 floating IPs may be + created per 60 seconds. + + Args: + region: The slug identifier for the region the floating IP will be reserved to. + + project_id: The UUID of the project to which the floating IP will be assigned. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["droplet_id"], ["region"]) + def create( + self, + *, + droplet_id: int | NotGiven = NOT_GIVEN, + region: str | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FloatingIPCreateResponse: + return self._post( + "/v2/floating_ips" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/floating_ips", + body=maybe_transform( + { + "droplet_id": droplet_id, + "region": region, + "project_id": project_id, + }, + floating_ip_create_params.FloatingIPCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FloatingIPCreateResponse, + ) + + def retrieve( + self, + floating_ip: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FloatingIPRetrieveResponse: + """ + To show information about a floating IP, send a GET request to + `/v2/floating_ips/$FLOATING_IP_ADDR`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not floating_ip: + raise ValueError(f"Expected a non-empty value for `floating_ip` but received {floating_ip!r}") + return self._get( + f"/v2/floating_ips/{floating_ip}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/floating_ips/{floating_ip}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FloatingIPRetrieveResponse, + ) + + def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FloatingIPListResponse: + """ + To list all of the floating IPs available on your account, send a GET request to + `/v2/floating_ips`. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/floating_ips" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/floating_ips", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + floating_ip_list_params.FloatingIPListParams, + ), + ), + cast_to=FloatingIPListResponse, + ) + + def delete( + self, + floating_ip: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To delete a floating IP and remove it from your account, send a DELETE request + to `/v2/floating_ips/$FLOATING_IP_ADDR`. + + A successful request will receive a 204 status code with no body in response. + This indicates that the request was processed successfully. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not floating_ip: + raise ValueError(f"Expected a non-empty value for `floating_ip` but received {floating_ip!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._delete( + f"/v2/floating_ips/{floating_ip}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/floating_ips/{floating_ip}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class AsyncFloatingIPsResource(AsyncAPIResource): + @cached_property + def actions(self) -> AsyncActionsResource: + return AsyncActionsResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncFloatingIPsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncFloatingIPsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncFloatingIPsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncFloatingIPsResourceWithStreamingResponse(self) + + @overload + async def create( + self, + *, + droplet_id: int, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FloatingIPCreateResponse: + """ + On creation, a floating IP must be either assigned to a Droplet or reserved to a + region. + + - To create a new floating IP assigned to a Droplet, send a POST request to + `/v2/floating_ips` with the `droplet_id` attribute. + + - To create a new floating IP reserved to a region, send a POST request to + `/v2/floating_ips` with the `region` attribute. + + **Note**: In addition to the standard rate limiting, only 12 floating IPs may be + created per 60 seconds. + + Args: + droplet_id: The ID of the Droplet that the floating IP will be assigned to. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def create( + self, + *, + region: str, + project_id: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FloatingIPCreateResponse: + """ + On creation, a floating IP must be either assigned to a Droplet or reserved to a + region. + + - To create a new floating IP assigned to a Droplet, send a POST request to + `/v2/floating_ips` with the `droplet_id` attribute. + + - To create a new floating IP reserved to a region, send a POST request to + `/v2/floating_ips` with the `region` attribute. + + **Note**: In addition to the standard rate limiting, only 12 floating IPs may be + created per 60 seconds. + + Args: + region: The slug identifier for the region the floating IP will be reserved to. + + project_id: The UUID of the project to which the floating IP will be assigned. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["droplet_id"], ["region"]) + async def create( + self, + *, + droplet_id: int | NotGiven = NOT_GIVEN, + region: str | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FloatingIPCreateResponse: + return await self._post( + "/v2/floating_ips" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/floating_ips", + body=await async_maybe_transform( + { + "droplet_id": droplet_id, + "region": region, + "project_id": project_id, + }, + floating_ip_create_params.FloatingIPCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FloatingIPCreateResponse, + ) + + async def retrieve( + self, + floating_ip: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FloatingIPRetrieveResponse: + """ + To show information about a floating IP, send a GET request to + `/v2/floating_ips/$FLOATING_IP_ADDR`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not floating_ip: + raise ValueError(f"Expected a non-empty value for `floating_ip` but received {floating_ip!r}") + return await self._get( + f"/v2/floating_ips/{floating_ip}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/floating_ips/{floating_ip}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=FloatingIPRetrieveResponse, + ) + + async def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> FloatingIPListResponse: + """ + To list all of the floating IPs available on your account, send a GET request to + `/v2/floating_ips`. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/floating_ips" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/floating_ips", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + floating_ip_list_params.FloatingIPListParams, + ), + ), + cast_to=FloatingIPListResponse, + ) + + async def delete( + self, + floating_ip: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To delete a floating IP and remove it from your account, send a DELETE request + to `/v2/floating_ips/$FLOATING_IP_ADDR`. + + A successful request will receive a 204 status code with no body in response. + This indicates that the request was processed successfully. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not floating_ip: + raise ValueError(f"Expected a non-empty value for `floating_ip` but received {floating_ip!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._delete( + f"/v2/floating_ips/{floating_ip}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/floating_ips/{floating_ip}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class FloatingIPsResourceWithRawResponse: + def __init__(self, floating_ips: FloatingIPsResource) -> None: + self._floating_ips = floating_ips + + self.create = to_raw_response_wrapper( + floating_ips.create, + ) + self.retrieve = to_raw_response_wrapper( + floating_ips.retrieve, + ) + self.list = to_raw_response_wrapper( + floating_ips.list, + ) + self.delete = to_raw_response_wrapper( + floating_ips.delete, + ) + + @cached_property + def actions(self) -> ActionsResourceWithRawResponse: + return ActionsResourceWithRawResponse(self._floating_ips.actions) + + +class AsyncFloatingIPsResourceWithRawResponse: + def __init__(self, floating_ips: AsyncFloatingIPsResource) -> None: + self._floating_ips = floating_ips + + self.create = async_to_raw_response_wrapper( + floating_ips.create, + ) + self.retrieve = async_to_raw_response_wrapper( + floating_ips.retrieve, + ) + self.list = async_to_raw_response_wrapper( + floating_ips.list, + ) + self.delete = async_to_raw_response_wrapper( + floating_ips.delete, + ) + + @cached_property + def actions(self) -> AsyncActionsResourceWithRawResponse: + return AsyncActionsResourceWithRawResponse(self._floating_ips.actions) + + +class FloatingIPsResourceWithStreamingResponse: + def __init__(self, floating_ips: FloatingIPsResource) -> None: + self._floating_ips = floating_ips + + self.create = to_streamed_response_wrapper( + floating_ips.create, + ) + self.retrieve = to_streamed_response_wrapper( + floating_ips.retrieve, + ) + self.list = to_streamed_response_wrapper( + floating_ips.list, + ) + self.delete = to_streamed_response_wrapper( + floating_ips.delete, + ) + + @cached_property + def actions(self) -> ActionsResourceWithStreamingResponse: + return ActionsResourceWithStreamingResponse(self._floating_ips.actions) + + +class AsyncFloatingIPsResourceWithStreamingResponse: + def __init__(self, floating_ips: AsyncFloatingIPsResource) -> None: + self._floating_ips = floating_ips + + self.create = async_to_streamed_response_wrapper( + floating_ips.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + floating_ips.retrieve, + ) + self.list = async_to_streamed_response_wrapper( + floating_ips.list, + ) + self.delete = async_to_streamed_response_wrapper( + floating_ips.delete, + ) + + @cached_property + def actions(self) -> AsyncActionsResourceWithStreamingResponse: + return AsyncActionsResourceWithStreamingResponse(self._floating_ips.actions) diff --git a/src/gradientai/resources/gpu_droplets/gpu_droplets.py b/src/gradientai/resources/gpu_droplets/gpu_droplets.py new file mode 100644 index 00000000..cbb07830 --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/gpu_droplets.py @@ -0,0 +1,2008 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Any, List, Union, Optional, cast +from typing_extensions import Literal, overload + +import httpx + +from .sizes import ( + SizesResource, + AsyncSizesResource, + SizesResourceWithRawResponse, + AsyncSizesResourceWithRawResponse, + SizesResourceWithStreamingResponse, + AsyncSizesResourceWithStreamingResponse, +) +from ...types import ( + gpu_droplet_list_params, + gpu_droplet_create_params, + gpu_droplet_list_kernels_params, + gpu_droplet_delete_by_tag_params, + gpu_droplet_list_firewalls_params, + gpu_droplet_list_snapshots_params, +) +from .actions import ( + ActionsResource, + AsyncActionsResource, + ActionsResourceWithRawResponse, + AsyncActionsResourceWithRawResponse, + ActionsResourceWithStreamingResponse, + AsyncActionsResourceWithStreamingResponse, +) +from .backups import ( + BackupsResource, + AsyncBackupsResource, + BackupsResourceWithRawResponse, + AsyncBackupsResourceWithRawResponse, + BackupsResourceWithStreamingResponse, + AsyncBackupsResourceWithStreamingResponse, +) +from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ..._utils import required_args, maybe_transform, async_maybe_transform +from ..._compat import cached_property +from .autoscale import ( + AutoscaleResource, + AsyncAutoscaleResource, + AutoscaleResourceWithRawResponse, + AsyncAutoscaleResourceWithRawResponse, + AutoscaleResourceWithStreamingResponse, + AsyncAutoscaleResourceWithStreamingResponse, +) +from .snapshots import ( + SnapshotsResource, + AsyncSnapshotsResource, + SnapshotsResourceWithRawResponse, + AsyncSnapshotsResourceWithRawResponse, + SnapshotsResourceWithStreamingResponse, + AsyncSnapshotsResourceWithStreamingResponse, +) +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from .images.images import ( + ImagesResource, + AsyncImagesResource, + ImagesResourceWithRawResponse, + AsyncImagesResourceWithRawResponse, + ImagesResourceWithStreamingResponse, + AsyncImagesResourceWithStreamingResponse, +) +from ..._base_client import make_request_options +from .account.account import ( + AccountResource, + AsyncAccountResource, + AccountResourceWithRawResponse, + AsyncAccountResourceWithRawResponse, + AccountResourceWithStreamingResponse, + AsyncAccountResourceWithStreamingResponse, +) +from .volumes.volumes import ( + VolumesResource, + AsyncVolumesResource, + VolumesResourceWithRawResponse, + AsyncVolumesResourceWithRawResponse, + VolumesResourceWithStreamingResponse, + AsyncVolumesResourceWithStreamingResponse, +) +from .firewalls.firewalls import ( + FirewallsResource, + AsyncFirewallsResource, + FirewallsResourceWithRawResponse, + AsyncFirewallsResourceWithRawResponse, + FirewallsResourceWithStreamingResponse, + AsyncFirewallsResourceWithStreamingResponse, +) +from .floating_ips.floating_ips import ( + FloatingIPsResource, + AsyncFloatingIPsResource, + FloatingIPsResourceWithRawResponse, + AsyncFloatingIPsResourceWithRawResponse, + FloatingIPsResourceWithStreamingResponse, + AsyncFloatingIPsResourceWithStreamingResponse, +) +from .load_balancers.load_balancers import ( + LoadBalancersResource, + AsyncLoadBalancersResource, + LoadBalancersResourceWithRawResponse, + AsyncLoadBalancersResourceWithRawResponse, + LoadBalancersResourceWithStreamingResponse, + AsyncLoadBalancersResourceWithStreamingResponse, +) +from ...types.gpu_droplet_list_response import GPUDropletListResponse +from .destroy_with_associated_resources import ( + DestroyWithAssociatedResourcesResource, + AsyncDestroyWithAssociatedResourcesResource, + DestroyWithAssociatedResourcesResourceWithRawResponse, + AsyncDestroyWithAssociatedResourcesResourceWithRawResponse, + DestroyWithAssociatedResourcesResourceWithStreamingResponse, + AsyncDestroyWithAssociatedResourcesResourceWithStreamingResponse, +) +from ...types.droplet_backup_policy_param import DropletBackupPolicyParam +from ...types.gpu_droplet_create_response import GPUDropletCreateResponse +from ...types.gpu_droplet_retrieve_response import GPUDropletRetrieveResponse +from ...types.gpu_droplet_list_kernels_response import GPUDropletListKernelsResponse +from ...types.gpu_droplet_list_firewalls_response import GPUDropletListFirewallsResponse +from ...types.gpu_droplet_list_neighbors_response import GPUDropletListNeighborsResponse +from ...types.gpu_droplet_list_snapshots_response import GPUDropletListSnapshotsResponse + +__all__ = ["GPUDropletsResource", "AsyncGPUDropletsResource"] + + +class GPUDropletsResource(SyncAPIResource): + @cached_property + def backups(self) -> BackupsResource: + return BackupsResource(self._client) + + @cached_property + def actions(self) -> ActionsResource: + return ActionsResource(self._client) + + @cached_property + def destroy_with_associated_resources(self) -> DestroyWithAssociatedResourcesResource: + return DestroyWithAssociatedResourcesResource(self._client) + + @cached_property + def autoscale(self) -> AutoscaleResource: + return AutoscaleResource(self._client) + + @cached_property + def firewalls(self) -> FirewallsResource: + return FirewallsResource(self._client) + + @cached_property + def floating_ips(self) -> FloatingIPsResource: + return FloatingIPsResource(self._client) + + @cached_property + def images(self) -> ImagesResource: + return ImagesResource(self._client) + + @cached_property + def load_balancers(self) -> LoadBalancersResource: + return LoadBalancersResource(self._client) + + @cached_property + def sizes(self) -> SizesResource: + return SizesResource(self._client) + + @cached_property + def snapshots(self) -> SnapshotsResource: + return SnapshotsResource(self._client) + + @cached_property + def volumes(self) -> VolumesResource: + return VolumesResource(self._client) + + @cached_property + def account(self) -> AccountResource: + return AccountResource(self._client) + + @cached_property + def with_raw_response(self) -> GPUDropletsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return GPUDropletsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> GPUDropletsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return GPUDropletsResourceWithStreamingResponse(self) + + @overload + def create( + self, + *, + image: Union[str, int], + name: str, + size: str, + backup_policy: DropletBackupPolicyParam | NotGiven = NOT_GIVEN, + backups: bool | NotGiven = NOT_GIVEN, + ipv6: bool | NotGiven = NOT_GIVEN, + monitoring: bool | NotGiven = NOT_GIVEN, + private_networking: bool | NotGiven = NOT_GIVEN, + region: str | NotGiven = NOT_GIVEN, + ssh_keys: List[Union[str, int]] | NotGiven = NOT_GIVEN, + tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + user_data: str | NotGiven = NOT_GIVEN, + volumes: List[str] | NotGiven = NOT_GIVEN, + vpc_uuid: str | NotGiven = NOT_GIVEN, + with_droplet_agent: bool | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> GPUDropletCreateResponse: + """ + To create a new Droplet, send a POST request to `/v2/droplets` setting the + required attributes. + + A Droplet will be created using the provided information. The response body will + contain a JSON object with a key called `droplet`. The value will be an object + containing the standard attributes for your new Droplet. The response code, 202 + Accepted, does not indicate the success or failure of the operation, just that + the request has been accepted for processing. The `actions` returned as part of + the response's `links` object can be used to check the status of the Droplet + create event. + + ### Create Multiple Droplets + + Creating multiple Droplets is very similar to creating a single Droplet. Instead + of sending `name` as a string, send `names` as an array of strings. A Droplet + will be created for each name you send using the associated information. Up to + ten Droplets may be created this way at a time. + + Rather than returning a single Droplet, the response body will contain a JSON + array with a key called `droplets`. This will be set to an array of JSON + objects, each of which will contain the standard Droplet attributes. The + response code, 202 Accepted, does not indicate the success or failure of any + operation, just that the request has been accepted for processing. The array of + `actions` returned as part of the response's `links` object can be used to check + the status of each individual Droplet create event. + + Args: + image: The image ID of a public or private image or the slug identifier for a public + image. This image will be the base image for your Droplet. Requires `image:read` + scope. + + name: The human-readable string you wish to use when displaying the Droplet name. The + name, if set to a domain name managed in the DigitalOcean DNS management system, + will configure a PTR record for the Droplet. The name set during creation will + also determine the hostname for the Droplet in its internal configuration. + + size: The slug identifier for the size that you wish to select for this Droplet. + + backup_policy: An object specifying the backup policy for the Droplet. If omitted and `backups` + is `true`, the backup plan will default to daily. + + backups: A boolean indicating whether automated backups should be enabled for the + Droplet. + + ipv6: A boolean indicating whether to enable IPv6 on the Droplet. + + monitoring: A boolean indicating whether to install the DigitalOcean agent for monitoring. + + private_networking: This parameter has been deprecated. Use `vpc_uuid` instead to specify a VPC + network for the Droplet. If no `vpc_uuid` is provided, the Droplet will be + placed in your account's default VPC for the region. + + region: The slug identifier for the region that you wish to deploy the Droplet in. If + the specific datacenter is not not important, a slug prefix (e.g. `nyc`) can be + used to deploy the Droplet in any of the that region's locations (`nyc1`, + `nyc2`, or `nyc3`). If the region is omitted from the create request completely, + the Droplet may deploy in any region. + + ssh_keys: An array containing the IDs or fingerprints of the SSH keys that you wish to + embed in the Droplet's root account upon creation. You must add the keys to your + team before they can be embedded on a Droplet. Requires `ssh_key:read` scope. + + tags: A flat array of tag names as strings to apply to the Droplet after it is + created. Tag names can either be existing or new tags. Requires `tag:create` + scope. + + user_data: A string containing 'user data' which may be used to configure the Droplet on + first boot, often a 'cloud-config' file or Bash script. It must be plain text + and may not exceed 64 KiB in size. + + volumes: An array of IDs for block storage volumes that will be attached to the Droplet + once created. The volumes must not already be attached to an existing Droplet. + Requires `block_storage:read` scpoe. + + vpc_uuid: A string specifying the UUID of the VPC to which the Droplet will be assigned. + If excluded, the Droplet will be assigned to your account's default VPC for the + region. Requires `vpc:read` scope. + + with_droplet_agent: A boolean indicating whether to install the DigitalOcean agent used for + providing access to the Droplet web console in the control panel. By default, + the agent is installed on new Droplets but installation errors (i.e. OS not + supported) are ignored. To prevent it from being installed, set to `false`. To + make installation errors fatal, explicitly set it to `true`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def create( + self, + *, + image: Union[str, int], + names: List[str], + size: str, + backup_policy: DropletBackupPolicyParam | NotGiven = NOT_GIVEN, + backups: bool | NotGiven = NOT_GIVEN, + ipv6: bool | NotGiven = NOT_GIVEN, + monitoring: bool | NotGiven = NOT_GIVEN, + private_networking: bool | NotGiven = NOT_GIVEN, + region: str | NotGiven = NOT_GIVEN, + ssh_keys: List[Union[str, int]] | NotGiven = NOT_GIVEN, + tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + user_data: str | NotGiven = NOT_GIVEN, + volumes: List[str] | NotGiven = NOT_GIVEN, + vpc_uuid: str | NotGiven = NOT_GIVEN, + with_droplet_agent: bool | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> GPUDropletCreateResponse: + """ + To create a new Droplet, send a POST request to `/v2/droplets` setting the + required attributes. + + A Droplet will be created using the provided information. The response body will + contain a JSON object with a key called `droplet`. The value will be an object + containing the standard attributes for your new Droplet. The response code, 202 + Accepted, does not indicate the success or failure of the operation, just that + the request has been accepted for processing. The `actions` returned as part of + the response's `links` object can be used to check the status of the Droplet + create event. + + ### Create Multiple Droplets + + Creating multiple Droplets is very similar to creating a single Droplet. Instead + of sending `name` as a string, send `names` as an array of strings. A Droplet + will be created for each name you send using the associated information. Up to + ten Droplets may be created this way at a time. + + Rather than returning a single Droplet, the response body will contain a JSON + array with a key called `droplets`. This will be set to an array of JSON + objects, each of which will contain the standard Droplet attributes. The + response code, 202 Accepted, does not indicate the success or failure of any + operation, just that the request has been accepted for processing. The array of + `actions` returned as part of the response's `links` object can be used to check + the status of each individual Droplet create event. + + Args: + image: The image ID of a public or private image or the slug identifier for a public + image. This image will be the base image for your Droplet. Requires `image:read` + scope. + + names: An array of human human-readable strings you wish to use when displaying the + Droplet name. Each name, if set to a domain name managed in the DigitalOcean DNS + management system, will configure a PTR record for the Droplet. Each name set + during creation will also determine the hostname for the Droplet in its internal + configuration. + + size: The slug identifier for the size that you wish to select for this Droplet. + + backup_policy: An object specifying the backup policy for the Droplet. If omitted and `backups` + is `true`, the backup plan will default to daily. + + backups: A boolean indicating whether automated backups should be enabled for the + Droplet. + + ipv6: A boolean indicating whether to enable IPv6 on the Droplet. + + monitoring: A boolean indicating whether to install the DigitalOcean agent for monitoring. + + private_networking: This parameter has been deprecated. Use `vpc_uuid` instead to specify a VPC + network for the Droplet. If no `vpc_uuid` is provided, the Droplet will be + placed in your account's default VPC for the region. + + region: The slug identifier for the region that you wish to deploy the Droplet in. If + the specific datacenter is not not important, a slug prefix (e.g. `nyc`) can be + used to deploy the Droplet in any of the that region's locations (`nyc1`, + `nyc2`, or `nyc3`). If the region is omitted from the create request completely, + the Droplet may deploy in any region. + + ssh_keys: An array containing the IDs or fingerprints of the SSH keys that you wish to + embed in the Droplet's root account upon creation. You must add the keys to your + team before they can be embedded on a Droplet. Requires `ssh_key:read` scope. + + tags: A flat array of tag names as strings to apply to the Droplet after it is + created. Tag names can either be existing or new tags. Requires `tag:create` + scope. + + user_data: A string containing 'user data' which may be used to configure the Droplet on + first boot, often a 'cloud-config' file or Bash script. It must be plain text + and may not exceed 64 KiB in size. + + volumes: An array of IDs for block storage volumes that will be attached to the Droplet + once created. The volumes must not already be attached to an existing Droplet. + Requires `block_storage:read` scpoe. + + vpc_uuid: A string specifying the UUID of the VPC to which the Droplet will be assigned. + If excluded, the Droplet will be assigned to your account's default VPC for the + region. Requires `vpc:read` scope. + + with_droplet_agent: A boolean indicating whether to install the DigitalOcean agent used for + providing access to the Droplet web console in the control panel. By default, + the agent is installed on new Droplets but installation errors (i.e. OS not + supported) are ignored. To prevent it from being installed, set to `false`. To + make installation errors fatal, explicitly set it to `true`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["image", "name", "size"], ["image", "names", "size"]) + def create( + self, + *, + image: Union[str, int], + name: str | NotGiven = NOT_GIVEN, + size: str, + backup_policy: DropletBackupPolicyParam | NotGiven = NOT_GIVEN, + backups: bool | NotGiven = NOT_GIVEN, + ipv6: bool | NotGiven = NOT_GIVEN, + monitoring: bool | NotGiven = NOT_GIVEN, + private_networking: bool | NotGiven = NOT_GIVEN, + region: str | NotGiven = NOT_GIVEN, + ssh_keys: List[Union[str, int]] | NotGiven = NOT_GIVEN, + tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + user_data: str | NotGiven = NOT_GIVEN, + volumes: List[str] | NotGiven = NOT_GIVEN, + vpc_uuid: str | NotGiven = NOT_GIVEN, + with_droplet_agent: bool | NotGiven = NOT_GIVEN, + names: List[str] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> GPUDropletCreateResponse: + return cast( + GPUDropletCreateResponse, + self._post( + "/v2/droplets" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/droplets", + body=maybe_transform( + { + "image": image, + "name": name, + "size": size, + "backup_policy": backup_policy, + "backups": backups, + "ipv6": ipv6, + "monitoring": monitoring, + "private_networking": private_networking, + "region": region, + "ssh_keys": ssh_keys, + "tags": tags, + "user_data": user_data, + "volumes": volumes, + "vpc_uuid": vpc_uuid, + "with_droplet_agent": with_droplet_agent, + "names": names, + }, + gpu_droplet_create_params.GPUDropletCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=cast( + Any, GPUDropletCreateResponse + ), # Union types cannot be passed in as arguments in the type system + ), + ) + + def retrieve( + self, + droplet_id: int, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> GPUDropletRetrieveResponse: + """ + To show information about an individual Droplet, send a GET request to + `/v2/droplets/$DROPLET_ID`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + f"/v2/droplets/{droplet_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=GPUDropletRetrieveResponse, + ) + + def list( + self, + *, + name: str | NotGiven = NOT_GIVEN, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + tag_name: str | NotGiven = NOT_GIVEN, + type: Literal["droplets", "gpus"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> GPUDropletListResponse: + """ + To list all Droplets in your account, send a GET request to `/v2/droplets`. + + The response body will be a JSON object with a key of `droplets`. This will be + set to an array containing objects each representing a Droplet. These will + contain the standard Droplet attributes. + + ### Filtering Results by Tag + + It's possible to request filtered results by including certain query parameters. + To only list Droplets assigned to a specific tag, include the `tag_name` query + parameter set to the name of the tag in your GET request. For example, + `/v2/droplets?tag_name=$TAG_NAME`. + + ### GPU Droplets + + By default, only non-GPU Droplets are returned. To list only GPU Droplets, set + the `type` query parameter to `gpus`. For example, `/v2/droplets?type=gpus`. + + Args: + name: Used to filter list response by Droplet name returning only exact matches. It is + case-insensitive and can not be combined with `tag_name`. + + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + tag_name: Used to filter Droplets by a specific tag. Can not be combined with `name` or + `type`. Requires `tag:read` scope. + + type: When `type` is set to `gpus`, only GPU Droplets will be returned. By default, + only non-GPU Droplets are returned. Can not be combined with `tag_name`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/droplets" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/droplets", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "name": name, + "page": page, + "per_page": per_page, + "tag_name": tag_name, + "type": type, + }, + gpu_droplet_list_params.GPUDropletListParams, + ), + ), + cast_to=GPUDropletListResponse, + ) + + def delete( + self, + droplet_id: int, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To delete a Droplet, send a DELETE request to `/v2/droplets/$DROPLET_ID`. + + A successful request will receive a 204 status code with no body in response. + This indicates that the request was processed successfully. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._delete( + f"/v2/droplets/{droplet_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + def delete_by_tag( + self, + *, + tag_name: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To delete **all** Droplets assigned to a specific tag, include the `tag_name` + query parameter set to the name of the tag in your DELETE request. For example, + `/v2/droplets?tag_name=$TAG_NAME`. + + This endpoint requires `tag:read` scope. + + A successful request will receive a 204 status code with no body in response. + This indicates that the request was processed successfully. + + Args: + tag_name: Specifies Droplets to be deleted by tag. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._delete( + "/v2/droplets" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/droplets", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + {"tag_name": tag_name}, gpu_droplet_delete_by_tag_params.GPUDropletDeleteByTagParams + ), + ), + cast_to=NoneType, + ) + + def list_firewalls( + self, + droplet_id: int, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> GPUDropletListFirewallsResponse: + """ + To retrieve a list of all firewalls available to a Droplet, send a GET request + to `/v2/droplets/$DROPLET_ID/firewalls` + + The response will be a JSON object that has a key called `firewalls`. This will + be set to an array of `firewall` objects, each of which contain the standard + `firewall` attributes. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + f"/v2/droplets/{droplet_id}/firewalls" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/firewalls", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + gpu_droplet_list_firewalls_params.GPUDropletListFirewallsParams, + ), + ), + cast_to=GPUDropletListFirewallsResponse, + ) + + def list_kernels( + self, + droplet_id: int, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> GPUDropletListKernelsResponse: + """ + To retrieve a list of all kernels available to a Droplet, send a GET request to + `/v2/droplets/$DROPLET_ID/kernels` + + The response will be a JSON object that has a key called `kernels`. This will be + set to an array of `kernel` objects, each of which contain the standard `kernel` + attributes. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + f"/v2/droplets/{droplet_id}/kernels" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/kernels", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + gpu_droplet_list_kernels_params.GPUDropletListKernelsParams, + ), + ), + cast_to=GPUDropletListKernelsResponse, + ) + + def list_neighbors( + self, + droplet_id: int, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> GPUDropletListNeighborsResponse: + """To retrieve a list of any "neighbors" (i.e. + + Droplets that are co-located on the + same physical hardware) for a specific Droplet, send a GET request to + `/v2/droplets/$DROPLET_ID/neighbors`. + + The results will be returned as a JSON object with a key of `droplets`. This + will be set to an array containing objects representing any other Droplets that + share the same physical hardware. An empty array indicates that the Droplet is + not co-located any other Droplets associated with your account. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + f"/v2/droplets/{droplet_id}/neighbors" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/neighbors", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=GPUDropletListNeighborsResponse, + ) + + def list_snapshots( + self, + droplet_id: int, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> GPUDropletListSnapshotsResponse: + """ + To retrieve the snapshots that have been created from a Droplet, send a GET + request to `/v2/droplets/$DROPLET_ID/snapshots`. + + You will get back a JSON object that has a `snapshots` key. This will be set to + an array of snapshot objects, each of which contain the standard Droplet + snapshot attributes. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + f"/v2/droplets/{droplet_id}/snapshots" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/snapshots", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + gpu_droplet_list_snapshots_params.GPUDropletListSnapshotsParams, + ), + ), + cast_to=GPUDropletListSnapshotsResponse, + ) + + +class AsyncGPUDropletsResource(AsyncAPIResource): + @cached_property + def backups(self) -> AsyncBackupsResource: + return AsyncBackupsResource(self._client) + + @cached_property + def actions(self) -> AsyncActionsResource: + return AsyncActionsResource(self._client) + + @cached_property + def destroy_with_associated_resources(self) -> AsyncDestroyWithAssociatedResourcesResource: + return AsyncDestroyWithAssociatedResourcesResource(self._client) + + @cached_property + def autoscale(self) -> AsyncAutoscaleResource: + return AsyncAutoscaleResource(self._client) + + @cached_property + def firewalls(self) -> AsyncFirewallsResource: + return AsyncFirewallsResource(self._client) + + @cached_property + def floating_ips(self) -> AsyncFloatingIPsResource: + return AsyncFloatingIPsResource(self._client) + + @cached_property + def images(self) -> AsyncImagesResource: + return AsyncImagesResource(self._client) + + @cached_property + def load_balancers(self) -> AsyncLoadBalancersResource: + return AsyncLoadBalancersResource(self._client) + + @cached_property + def sizes(self) -> AsyncSizesResource: + return AsyncSizesResource(self._client) + + @cached_property + def snapshots(self) -> AsyncSnapshotsResource: + return AsyncSnapshotsResource(self._client) + + @cached_property + def volumes(self) -> AsyncVolumesResource: + return AsyncVolumesResource(self._client) + + @cached_property + def account(self) -> AsyncAccountResource: + return AsyncAccountResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncGPUDropletsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncGPUDropletsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncGPUDropletsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncGPUDropletsResourceWithStreamingResponse(self) + + @overload + async def create( + self, + *, + image: Union[str, int], + name: str, + size: str, + backup_policy: DropletBackupPolicyParam | NotGiven = NOT_GIVEN, + backups: bool | NotGiven = NOT_GIVEN, + ipv6: bool | NotGiven = NOT_GIVEN, + monitoring: bool | NotGiven = NOT_GIVEN, + private_networking: bool | NotGiven = NOT_GIVEN, + region: str | NotGiven = NOT_GIVEN, + ssh_keys: List[Union[str, int]] | NotGiven = NOT_GIVEN, + tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + user_data: str | NotGiven = NOT_GIVEN, + volumes: List[str] | NotGiven = NOT_GIVEN, + vpc_uuid: str | NotGiven = NOT_GIVEN, + with_droplet_agent: bool | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> GPUDropletCreateResponse: + """ + To create a new Droplet, send a POST request to `/v2/droplets` setting the + required attributes. + + A Droplet will be created using the provided information. The response body will + contain a JSON object with a key called `droplet`. The value will be an object + containing the standard attributes for your new Droplet. The response code, 202 + Accepted, does not indicate the success or failure of the operation, just that + the request has been accepted for processing. The `actions` returned as part of + the response's `links` object can be used to check the status of the Droplet + create event. + + ### Create Multiple Droplets + + Creating multiple Droplets is very similar to creating a single Droplet. Instead + of sending `name` as a string, send `names` as an array of strings. A Droplet + will be created for each name you send using the associated information. Up to + ten Droplets may be created this way at a time. + + Rather than returning a single Droplet, the response body will contain a JSON + array with a key called `droplets`. This will be set to an array of JSON + objects, each of which will contain the standard Droplet attributes. The + response code, 202 Accepted, does not indicate the success or failure of any + operation, just that the request has been accepted for processing. The array of + `actions` returned as part of the response's `links` object can be used to check + the status of each individual Droplet create event. + + Args: + image: The image ID of a public or private image or the slug identifier for a public + image. This image will be the base image for your Droplet. Requires `image:read` + scope. + + name: The human-readable string you wish to use when displaying the Droplet name. The + name, if set to a domain name managed in the DigitalOcean DNS management system, + will configure a PTR record for the Droplet. The name set during creation will + also determine the hostname for the Droplet in its internal configuration. + + size: The slug identifier for the size that you wish to select for this Droplet. + + backup_policy: An object specifying the backup policy for the Droplet. If omitted and `backups` + is `true`, the backup plan will default to daily. + + backups: A boolean indicating whether automated backups should be enabled for the + Droplet. + + ipv6: A boolean indicating whether to enable IPv6 on the Droplet. + + monitoring: A boolean indicating whether to install the DigitalOcean agent for monitoring. + + private_networking: This parameter has been deprecated. Use `vpc_uuid` instead to specify a VPC + network for the Droplet. If no `vpc_uuid` is provided, the Droplet will be + placed in your account's default VPC for the region. + + region: The slug identifier for the region that you wish to deploy the Droplet in. If + the specific datacenter is not not important, a slug prefix (e.g. `nyc`) can be + used to deploy the Droplet in any of the that region's locations (`nyc1`, + `nyc2`, or `nyc3`). If the region is omitted from the create request completely, + the Droplet may deploy in any region. + + ssh_keys: An array containing the IDs or fingerprints of the SSH keys that you wish to + embed in the Droplet's root account upon creation. You must add the keys to your + team before they can be embedded on a Droplet. Requires `ssh_key:read` scope. + + tags: A flat array of tag names as strings to apply to the Droplet after it is + created. Tag names can either be existing or new tags. Requires `tag:create` + scope. + + user_data: A string containing 'user data' which may be used to configure the Droplet on + first boot, often a 'cloud-config' file or Bash script. It must be plain text + and may not exceed 64 KiB in size. + + volumes: An array of IDs for block storage volumes that will be attached to the Droplet + once created. The volumes must not already be attached to an existing Droplet. + Requires `block_storage:read` scpoe. + + vpc_uuid: A string specifying the UUID of the VPC to which the Droplet will be assigned. + If excluded, the Droplet will be assigned to your account's default VPC for the + region. Requires `vpc:read` scope. + + with_droplet_agent: A boolean indicating whether to install the DigitalOcean agent used for + providing access to the Droplet web console in the control panel. By default, + the agent is installed on new Droplets but installation errors (i.e. OS not + supported) are ignored. To prevent it from being installed, set to `false`. To + make installation errors fatal, explicitly set it to `true`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def create( + self, + *, + image: Union[str, int], + names: List[str], + size: str, + backup_policy: DropletBackupPolicyParam | NotGiven = NOT_GIVEN, + backups: bool | NotGiven = NOT_GIVEN, + ipv6: bool | NotGiven = NOT_GIVEN, + monitoring: bool | NotGiven = NOT_GIVEN, + private_networking: bool | NotGiven = NOT_GIVEN, + region: str | NotGiven = NOT_GIVEN, + ssh_keys: List[Union[str, int]] | NotGiven = NOT_GIVEN, + tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + user_data: str | NotGiven = NOT_GIVEN, + volumes: List[str] | NotGiven = NOT_GIVEN, + vpc_uuid: str | NotGiven = NOT_GIVEN, + with_droplet_agent: bool | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> GPUDropletCreateResponse: + """ + To create a new Droplet, send a POST request to `/v2/droplets` setting the + required attributes. + + A Droplet will be created using the provided information. The response body will + contain a JSON object with a key called `droplet`. The value will be an object + containing the standard attributes for your new Droplet. The response code, 202 + Accepted, does not indicate the success or failure of the operation, just that + the request has been accepted for processing. The `actions` returned as part of + the response's `links` object can be used to check the status of the Droplet + create event. + + ### Create Multiple Droplets + + Creating multiple Droplets is very similar to creating a single Droplet. Instead + of sending `name` as a string, send `names` as an array of strings. A Droplet + will be created for each name you send using the associated information. Up to + ten Droplets may be created this way at a time. + + Rather than returning a single Droplet, the response body will contain a JSON + array with a key called `droplets`. This will be set to an array of JSON + objects, each of which will contain the standard Droplet attributes. The + response code, 202 Accepted, does not indicate the success or failure of any + operation, just that the request has been accepted for processing. The array of + `actions` returned as part of the response's `links` object can be used to check + the status of each individual Droplet create event. + + Args: + image: The image ID of a public or private image or the slug identifier for a public + image. This image will be the base image for your Droplet. Requires `image:read` + scope. + + names: An array of human human-readable strings you wish to use when displaying the + Droplet name. Each name, if set to a domain name managed in the DigitalOcean DNS + management system, will configure a PTR record for the Droplet. Each name set + during creation will also determine the hostname for the Droplet in its internal + configuration. + + size: The slug identifier for the size that you wish to select for this Droplet. + + backup_policy: An object specifying the backup policy for the Droplet. If omitted and `backups` + is `true`, the backup plan will default to daily. + + backups: A boolean indicating whether automated backups should be enabled for the + Droplet. + + ipv6: A boolean indicating whether to enable IPv6 on the Droplet. + + monitoring: A boolean indicating whether to install the DigitalOcean agent for monitoring. + + private_networking: This parameter has been deprecated. Use `vpc_uuid` instead to specify a VPC + network for the Droplet. If no `vpc_uuid` is provided, the Droplet will be + placed in your account's default VPC for the region. + + region: The slug identifier for the region that you wish to deploy the Droplet in. If + the specific datacenter is not not important, a slug prefix (e.g. `nyc`) can be + used to deploy the Droplet in any of the that region's locations (`nyc1`, + `nyc2`, or `nyc3`). If the region is omitted from the create request completely, + the Droplet may deploy in any region. + + ssh_keys: An array containing the IDs or fingerprints of the SSH keys that you wish to + embed in the Droplet's root account upon creation. You must add the keys to your + team before they can be embedded on a Droplet. Requires `ssh_key:read` scope. + + tags: A flat array of tag names as strings to apply to the Droplet after it is + created. Tag names can either be existing or new tags. Requires `tag:create` + scope. + + user_data: A string containing 'user data' which may be used to configure the Droplet on + first boot, often a 'cloud-config' file or Bash script. It must be plain text + and may not exceed 64 KiB in size. + + volumes: An array of IDs for block storage volumes that will be attached to the Droplet + once created. The volumes must not already be attached to an existing Droplet. + Requires `block_storage:read` scpoe. + + vpc_uuid: A string specifying the UUID of the VPC to which the Droplet will be assigned. + If excluded, the Droplet will be assigned to your account's default VPC for the + region. Requires `vpc:read` scope. + + with_droplet_agent: A boolean indicating whether to install the DigitalOcean agent used for + providing access to the Droplet web console in the control panel. By default, + the agent is installed on new Droplets but installation errors (i.e. OS not + supported) are ignored. To prevent it from being installed, set to `false`. To + make installation errors fatal, explicitly set it to `true`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["image", "name", "size"], ["image", "names", "size"]) + async def create( + self, + *, + image: Union[str, int], + name: str | NotGiven = NOT_GIVEN, + size: str, + backup_policy: DropletBackupPolicyParam | NotGiven = NOT_GIVEN, + backups: bool | NotGiven = NOT_GIVEN, + ipv6: bool | NotGiven = NOT_GIVEN, + monitoring: bool | NotGiven = NOT_GIVEN, + private_networking: bool | NotGiven = NOT_GIVEN, + region: str | NotGiven = NOT_GIVEN, + ssh_keys: List[Union[str, int]] | NotGiven = NOT_GIVEN, + tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + user_data: str | NotGiven = NOT_GIVEN, + volumes: List[str] | NotGiven = NOT_GIVEN, + vpc_uuid: str | NotGiven = NOT_GIVEN, + with_droplet_agent: bool | NotGiven = NOT_GIVEN, + names: List[str] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> GPUDropletCreateResponse: + return cast( + GPUDropletCreateResponse, + await self._post( + "/v2/droplets" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/droplets", + body=await async_maybe_transform( + { + "image": image, + "name": name, + "size": size, + "backup_policy": backup_policy, + "backups": backups, + "ipv6": ipv6, + "monitoring": monitoring, + "private_networking": private_networking, + "region": region, + "ssh_keys": ssh_keys, + "tags": tags, + "user_data": user_data, + "volumes": volumes, + "vpc_uuid": vpc_uuid, + "with_droplet_agent": with_droplet_agent, + "names": names, + }, + gpu_droplet_create_params.GPUDropletCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=cast( + Any, GPUDropletCreateResponse + ), # Union types cannot be passed in as arguments in the type system + ), + ) + + async def retrieve( + self, + droplet_id: int, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> GPUDropletRetrieveResponse: + """ + To show information about an individual Droplet, send a GET request to + `/v2/droplets/$DROPLET_ID`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + f"/v2/droplets/{droplet_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=GPUDropletRetrieveResponse, + ) + + async def list( + self, + *, + name: str | NotGiven = NOT_GIVEN, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + tag_name: str | NotGiven = NOT_GIVEN, + type: Literal["droplets", "gpus"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> GPUDropletListResponse: + """ + To list all Droplets in your account, send a GET request to `/v2/droplets`. + + The response body will be a JSON object with a key of `droplets`. This will be + set to an array containing objects each representing a Droplet. These will + contain the standard Droplet attributes. + + ### Filtering Results by Tag + + It's possible to request filtered results by including certain query parameters. + To only list Droplets assigned to a specific tag, include the `tag_name` query + parameter set to the name of the tag in your GET request. For example, + `/v2/droplets?tag_name=$TAG_NAME`. + + ### GPU Droplets + + By default, only non-GPU Droplets are returned. To list only GPU Droplets, set + the `type` query parameter to `gpus`. For example, `/v2/droplets?type=gpus`. + + Args: + name: Used to filter list response by Droplet name returning only exact matches. It is + case-insensitive and can not be combined with `tag_name`. + + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + tag_name: Used to filter Droplets by a specific tag. Can not be combined with `name` or + `type`. Requires `tag:read` scope. + + type: When `type` is set to `gpus`, only GPU Droplets will be returned. By default, + only non-GPU Droplets are returned. Can not be combined with `tag_name`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/droplets" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/droplets", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "name": name, + "page": page, + "per_page": per_page, + "tag_name": tag_name, + "type": type, + }, + gpu_droplet_list_params.GPUDropletListParams, + ), + ), + cast_to=GPUDropletListResponse, + ) + + async def delete( + self, + droplet_id: int, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To delete a Droplet, send a DELETE request to `/v2/droplets/$DROPLET_ID`. + + A successful request will receive a 204 status code with no body in response. + This indicates that the request was processed successfully. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._delete( + f"/v2/droplets/{droplet_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + async def delete_by_tag( + self, + *, + tag_name: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To delete **all** Droplets assigned to a specific tag, include the `tag_name` + query parameter set to the name of the tag in your DELETE request. For example, + `/v2/droplets?tag_name=$TAG_NAME`. + + This endpoint requires `tag:read` scope. + + A successful request will receive a 204 status code with no body in response. + This indicates that the request was processed successfully. + + Args: + tag_name: Specifies Droplets to be deleted by tag. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._delete( + "/v2/droplets" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/droplets", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + {"tag_name": tag_name}, gpu_droplet_delete_by_tag_params.GPUDropletDeleteByTagParams + ), + ), + cast_to=NoneType, + ) + + async def list_firewalls( + self, + droplet_id: int, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> GPUDropletListFirewallsResponse: + """ + To retrieve a list of all firewalls available to a Droplet, send a GET request + to `/v2/droplets/$DROPLET_ID/firewalls` + + The response will be a JSON object that has a key called `firewalls`. This will + be set to an array of `firewall` objects, each of which contain the standard + `firewall` attributes. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + f"/v2/droplets/{droplet_id}/firewalls" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/firewalls", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + gpu_droplet_list_firewalls_params.GPUDropletListFirewallsParams, + ), + ), + cast_to=GPUDropletListFirewallsResponse, + ) + + async def list_kernels( + self, + droplet_id: int, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> GPUDropletListKernelsResponse: + """ + To retrieve a list of all kernels available to a Droplet, send a GET request to + `/v2/droplets/$DROPLET_ID/kernels` + + The response will be a JSON object that has a key called `kernels`. This will be + set to an array of `kernel` objects, each of which contain the standard `kernel` + attributes. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + f"/v2/droplets/{droplet_id}/kernels" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/kernels", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + gpu_droplet_list_kernels_params.GPUDropletListKernelsParams, + ), + ), + cast_to=GPUDropletListKernelsResponse, + ) + + async def list_neighbors( + self, + droplet_id: int, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> GPUDropletListNeighborsResponse: + """To retrieve a list of any "neighbors" (i.e. + + Droplets that are co-located on the + same physical hardware) for a specific Droplet, send a GET request to + `/v2/droplets/$DROPLET_ID/neighbors`. + + The results will be returned as a JSON object with a key of `droplets`. This + will be set to an array containing objects representing any other Droplets that + share the same physical hardware. An empty array indicates that the Droplet is + not co-located any other Droplets associated with your account. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + f"/v2/droplets/{droplet_id}/neighbors" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/neighbors", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=GPUDropletListNeighborsResponse, + ) + + async def list_snapshots( + self, + droplet_id: int, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> GPUDropletListSnapshotsResponse: + """ + To retrieve the snapshots that have been created from a Droplet, send a GET + request to `/v2/droplets/$DROPLET_ID/snapshots`. + + You will get back a JSON object that has a `snapshots` key. This will be set to + an array of snapshot objects, each of which contain the standard Droplet + snapshot attributes. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + f"/v2/droplets/{droplet_id}/snapshots" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/droplets/{droplet_id}/snapshots", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + gpu_droplet_list_snapshots_params.GPUDropletListSnapshotsParams, + ), + ), + cast_to=GPUDropletListSnapshotsResponse, + ) + + +class GPUDropletsResourceWithRawResponse: + def __init__(self, gpu_droplets: GPUDropletsResource) -> None: + self._gpu_droplets = gpu_droplets + + self.create = to_raw_response_wrapper( + gpu_droplets.create, + ) + self.retrieve = to_raw_response_wrapper( + gpu_droplets.retrieve, + ) + self.list = to_raw_response_wrapper( + gpu_droplets.list, + ) + self.delete = to_raw_response_wrapper( + gpu_droplets.delete, + ) + self.delete_by_tag = to_raw_response_wrapper( + gpu_droplets.delete_by_tag, + ) + self.list_firewalls = to_raw_response_wrapper( + gpu_droplets.list_firewalls, + ) + self.list_kernels = to_raw_response_wrapper( + gpu_droplets.list_kernels, + ) + self.list_neighbors = to_raw_response_wrapper( + gpu_droplets.list_neighbors, + ) + self.list_snapshots = to_raw_response_wrapper( + gpu_droplets.list_snapshots, + ) + + @cached_property + def backups(self) -> BackupsResourceWithRawResponse: + return BackupsResourceWithRawResponse(self._gpu_droplets.backups) + + @cached_property + def actions(self) -> ActionsResourceWithRawResponse: + return ActionsResourceWithRawResponse(self._gpu_droplets.actions) + + @cached_property + def destroy_with_associated_resources(self) -> DestroyWithAssociatedResourcesResourceWithRawResponse: + return DestroyWithAssociatedResourcesResourceWithRawResponse( + self._gpu_droplets.destroy_with_associated_resources + ) + + @cached_property + def autoscale(self) -> AutoscaleResourceWithRawResponse: + return AutoscaleResourceWithRawResponse(self._gpu_droplets.autoscale) + + @cached_property + def firewalls(self) -> FirewallsResourceWithRawResponse: + return FirewallsResourceWithRawResponse(self._gpu_droplets.firewalls) + + @cached_property + def floating_ips(self) -> FloatingIPsResourceWithRawResponse: + return FloatingIPsResourceWithRawResponse(self._gpu_droplets.floating_ips) + + @cached_property + def images(self) -> ImagesResourceWithRawResponse: + return ImagesResourceWithRawResponse(self._gpu_droplets.images) + + @cached_property + def load_balancers(self) -> LoadBalancersResourceWithRawResponse: + return LoadBalancersResourceWithRawResponse(self._gpu_droplets.load_balancers) + + @cached_property + def sizes(self) -> SizesResourceWithRawResponse: + return SizesResourceWithRawResponse(self._gpu_droplets.sizes) + + @cached_property + def snapshots(self) -> SnapshotsResourceWithRawResponse: + return SnapshotsResourceWithRawResponse(self._gpu_droplets.snapshots) + + @cached_property + def volumes(self) -> VolumesResourceWithRawResponse: + return VolumesResourceWithRawResponse(self._gpu_droplets.volumes) + + @cached_property + def account(self) -> AccountResourceWithRawResponse: + return AccountResourceWithRawResponse(self._gpu_droplets.account) + + +class AsyncGPUDropletsResourceWithRawResponse: + def __init__(self, gpu_droplets: AsyncGPUDropletsResource) -> None: + self._gpu_droplets = gpu_droplets + + self.create = async_to_raw_response_wrapper( + gpu_droplets.create, + ) + self.retrieve = async_to_raw_response_wrapper( + gpu_droplets.retrieve, + ) + self.list = async_to_raw_response_wrapper( + gpu_droplets.list, + ) + self.delete = async_to_raw_response_wrapper( + gpu_droplets.delete, + ) + self.delete_by_tag = async_to_raw_response_wrapper( + gpu_droplets.delete_by_tag, + ) + self.list_firewalls = async_to_raw_response_wrapper( + gpu_droplets.list_firewalls, + ) + self.list_kernels = async_to_raw_response_wrapper( + gpu_droplets.list_kernels, + ) + self.list_neighbors = async_to_raw_response_wrapper( + gpu_droplets.list_neighbors, + ) + self.list_snapshots = async_to_raw_response_wrapper( + gpu_droplets.list_snapshots, + ) + + @cached_property + def backups(self) -> AsyncBackupsResourceWithRawResponse: + return AsyncBackupsResourceWithRawResponse(self._gpu_droplets.backups) + + @cached_property + def actions(self) -> AsyncActionsResourceWithRawResponse: + return AsyncActionsResourceWithRawResponse(self._gpu_droplets.actions) + + @cached_property + def destroy_with_associated_resources(self) -> AsyncDestroyWithAssociatedResourcesResourceWithRawResponse: + return AsyncDestroyWithAssociatedResourcesResourceWithRawResponse( + self._gpu_droplets.destroy_with_associated_resources + ) + + @cached_property + def autoscale(self) -> AsyncAutoscaleResourceWithRawResponse: + return AsyncAutoscaleResourceWithRawResponse(self._gpu_droplets.autoscale) + + @cached_property + def firewalls(self) -> AsyncFirewallsResourceWithRawResponse: + return AsyncFirewallsResourceWithRawResponse(self._gpu_droplets.firewalls) + + @cached_property + def floating_ips(self) -> AsyncFloatingIPsResourceWithRawResponse: + return AsyncFloatingIPsResourceWithRawResponse(self._gpu_droplets.floating_ips) + + @cached_property + def images(self) -> AsyncImagesResourceWithRawResponse: + return AsyncImagesResourceWithRawResponse(self._gpu_droplets.images) + + @cached_property + def load_balancers(self) -> AsyncLoadBalancersResourceWithRawResponse: + return AsyncLoadBalancersResourceWithRawResponse(self._gpu_droplets.load_balancers) + + @cached_property + def sizes(self) -> AsyncSizesResourceWithRawResponse: + return AsyncSizesResourceWithRawResponse(self._gpu_droplets.sizes) + + @cached_property + def snapshots(self) -> AsyncSnapshotsResourceWithRawResponse: + return AsyncSnapshotsResourceWithRawResponse(self._gpu_droplets.snapshots) + + @cached_property + def volumes(self) -> AsyncVolumesResourceWithRawResponse: + return AsyncVolumesResourceWithRawResponse(self._gpu_droplets.volumes) + + @cached_property + def account(self) -> AsyncAccountResourceWithRawResponse: + return AsyncAccountResourceWithRawResponse(self._gpu_droplets.account) + + +class GPUDropletsResourceWithStreamingResponse: + def __init__(self, gpu_droplets: GPUDropletsResource) -> None: + self._gpu_droplets = gpu_droplets + + self.create = to_streamed_response_wrapper( + gpu_droplets.create, + ) + self.retrieve = to_streamed_response_wrapper( + gpu_droplets.retrieve, + ) + self.list = to_streamed_response_wrapper( + gpu_droplets.list, + ) + self.delete = to_streamed_response_wrapper( + gpu_droplets.delete, + ) + self.delete_by_tag = to_streamed_response_wrapper( + gpu_droplets.delete_by_tag, + ) + self.list_firewalls = to_streamed_response_wrapper( + gpu_droplets.list_firewalls, + ) + self.list_kernels = to_streamed_response_wrapper( + gpu_droplets.list_kernels, + ) + self.list_neighbors = to_streamed_response_wrapper( + gpu_droplets.list_neighbors, + ) + self.list_snapshots = to_streamed_response_wrapper( + gpu_droplets.list_snapshots, + ) + + @cached_property + def backups(self) -> BackupsResourceWithStreamingResponse: + return BackupsResourceWithStreamingResponse(self._gpu_droplets.backups) + + @cached_property + def actions(self) -> ActionsResourceWithStreamingResponse: + return ActionsResourceWithStreamingResponse(self._gpu_droplets.actions) + + @cached_property + def destroy_with_associated_resources(self) -> DestroyWithAssociatedResourcesResourceWithStreamingResponse: + return DestroyWithAssociatedResourcesResourceWithStreamingResponse( + self._gpu_droplets.destroy_with_associated_resources + ) + + @cached_property + def autoscale(self) -> AutoscaleResourceWithStreamingResponse: + return AutoscaleResourceWithStreamingResponse(self._gpu_droplets.autoscale) + + @cached_property + def firewalls(self) -> FirewallsResourceWithStreamingResponse: + return FirewallsResourceWithStreamingResponse(self._gpu_droplets.firewalls) + + @cached_property + def floating_ips(self) -> FloatingIPsResourceWithStreamingResponse: + return FloatingIPsResourceWithStreamingResponse(self._gpu_droplets.floating_ips) + + @cached_property + def images(self) -> ImagesResourceWithStreamingResponse: + return ImagesResourceWithStreamingResponse(self._gpu_droplets.images) + + @cached_property + def load_balancers(self) -> LoadBalancersResourceWithStreamingResponse: + return LoadBalancersResourceWithStreamingResponse(self._gpu_droplets.load_balancers) + + @cached_property + def sizes(self) -> SizesResourceWithStreamingResponse: + return SizesResourceWithStreamingResponse(self._gpu_droplets.sizes) + + @cached_property + def snapshots(self) -> SnapshotsResourceWithStreamingResponse: + return SnapshotsResourceWithStreamingResponse(self._gpu_droplets.snapshots) + + @cached_property + def volumes(self) -> VolumesResourceWithStreamingResponse: + return VolumesResourceWithStreamingResponse(self._gpu_droplets.volumes) + + @cached_property + def account(self) -> AccountResourceWithStreamingResponse: + return AccountResourceWithStreamingResponse(self._gpu_droplets.account) + + +class AsyncGPUDropletsResourceWithStreamingResponse: + def __init__(self, gpu_droplets: AsyncGPUDropletsResource) -> None: + self._gpu_droplets = gpu_droplets + + self.create = async_to_streamed_response_wrapper( + gpu_droplets.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + gpu_droplets.retrieve, + ) + self.list = async_to_streamed_response_wrapper( + gpu_droplets.list, + ) + self.delete = async_to_streamed_response_wrapper( + gpu_droplets.delete, + ) + self.delete_by_tag = async_to_streamed_response_wrapper( + gpu_droplets.delete_by_tag, + ) + self.list_firewalls = async_to_streamed_response_wrapper( + gpu_droplets.list_firewalls, + ) + self.list_kernels = async_to_streamed_response_wrapper( + gpu_droplets.list_kernels, + ) + self.list_neighbors = async_to_streamed_response_wrapper( + gpu_droplets.list_neighbors, + ) + self.list_snapshots = async_to_streamed_response_wrapper( + gpu_droplets.list_snapshots, + ) + + @cached_property + def backups(self) -> AsyncBackupsResourceWithStreamingResponse: + return AsyncBackupsResourceWithStreamingResponse(self._gpu_droplets.backups) + + @cached_property + def actions(self) -> AsyncActionsResourceWithStreamingResponse: + return AsyncActionsResourceWithStreamingResponse(self._gpu_droplets.actions) + + @cached_property + def destroy_with_associated_resources(self) -> AsyncDestroyWithAssociatedResourcesResourceWithStreamingResponse: + return AsyncDestroyWithAssociatedResourcesResourceWithStreamingResponse( + self._gpu_droplets.destroy_with_associated_resources + ) + + @cached_property + def autoscale(self) -> AsyncAutoscaleResourceWithStreamingResponse: + return AsyncAutoscaleResourceWithStreamingResponse(self._gpu_droplets.autoscale) + + @cached_property + def firewalls(self) -> AsyncFirewallsResourceWithStreamingResponse: + return AsyncFirewallsResourceWithStreamingResponse(self._gpu_droplets.firewalls) + + @cached_property + def floating_ips(self) -> AsyncFloatingIPsResourceWithStreamingResponse: + return AsyncFloatingIPsResourceWithStreamingResponse(self._gpu_droplets.floating_ips) + + @cached_property + def images(self) -> AsyncImagesResourceWithStreamingResponse: + return AsyncImagesResourceWithStreamingResponse(self._gpu_droplets.images) + + @cached_property + def load_balancers(self) -> AsyncLoadBalancersResourceWithStreamingResponse: + return AsyncLoadBalancersResourceWithStreamingResponse(self._gpu_droplets.load_balancers) + + @cached_property + def sizes(self) -> AsyncSizesResourceWithStreamingResponse: + return AsyncSizesResourceWithStreamingResponse(self._gpu_droplets.sizes) + + @cached_property + def snapshots(self) -> AsyncSnapshotsResourceWithStreamingResponse: + return AsyncSnapshotsResourceWithStreamingResponse(self._gpu_droplets.snapshots) + + @cached_property + def volumes(self) -> AsyncVolumesResourceWithStreamingResponse: + return AsyncVolumesResourceWithStreamingResponse(self._gpu_droplets.volumes) + + @cached_property + def account(self) -> AsyncAccountResourceWithStreamingResponse: + return AsyncAccountResourceWithStreamingResponse(self._gpu_droplets.account) diff --git a/src/gradientai/resources/gpu_droplets/images/__init__.py b/src/gradientai/resources/gpu_droplets/images/__init__.py new file mode 100644 index 00000000..477fd657 --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/images/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .images import ( + ImagesResource, + AsyncImagesResource, + ImagesResourceWithRawResponse, + AsyncImagesResourceWithRawResponse, + ImagesResourceWithStreamingResponse, + AsyncImagesResourceWithStreamingResponse, +) +from .actions import ( + ActionsResource, + AsyncActionsResource, + ActionsResourceWithRawResponse, + AsyncActionsResourceWithRawResponse, + ActionsResourceWithStreamingResponse, + AsyncActionsResourceWithStreamingResponse, +) + +__all__ = [ + "ActionsResource", + "AsyncActionsResource", + "ActionsResourceWithRawResponse", + "AsyncActionsResourceWithRawResponse", + "ActionsResourceWithStreamingResponse", + "AsyncActionsResourceWithStreamingResponse", + "ImagesResource", + "AsyncImagesResource", + "ImagesResourceWithRawResponse", + "AsyncImagesResourceWithRawResponse", + "ImagesResourceWithStreamingResponse", + "AsyncImagesResourceWithStreamingResponse", +] diff --git a/src/gradientai/resources/gpu_droplets/images/actions.py b/src/gradientai/resources/gpu_droplets/images/actions.py new file mode 100644 index 00000000..9428418b --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/images/actions.py @@ -0,0 +1,560 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, overload + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import required_args, maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.shared.action import Action +from ....types.gpu_droplets.images import action_create_params +from ....types.gpu_droplets.images.action_list_response import ActionListResponse + +__all__ = ["ActionsResource", "AsyncActionsResource"] + + +class ActionsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> ActionsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return ActionsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ActionsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return ActionsResourceWithStreamingResponse(self) + + @overload + def create( + self, + image_id: int, + *, + type: Literal["convert", "transfer"], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Action: + """ + The following actions are available on an Image. + + ## Convert an Image to a Snapshot + + To convert an image, for example, a backup to a snapshot, send a POST request to + `/v2/images/$IMAGE_ID/actions`. Set the `type` attribute to `convert`. + + ## Transfer an Image + + To transfer an image to another region, send a POST request to + `/v2/images/$IMAGE_ID/actions`. Set the `type` attribute to `transfer` and set + `region` attribute to the slug identifier of the region you wish to transfer to. + + Args: + type: The action to be taken on the image. Can be either `convert` or `transfer`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def create( + self, + image_id: int, + *, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ], + type: Literal["convert", "transfer"], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Action: + """ + The following actions are available on an Image. + + ## Convert an Image to a Snapshot + + To convert an image, for example, a backup to a snapshot, send a POST request to + `/v2/images/$IMAGE_ID/actions`. Set the `type` attribute to `convert`. + + ## Transfer an Image + + To transfer an image to another region, send a POST request to + `/v2/images/$IMAGE_ID/actions`. Set the `type` attribute to `transfer` and set + `region` attribute to the slug identifier of the region you wish to transfer to. + + Args: + region: The slug identifier for the region where the resource will initially be + available. + + type: The action to be taken on the image. Can be either `convert` or `transfer`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["type"], ["region", "type"]) + def create( + self, + image_id: int, + *, + type: Literal["convert", "transfer"], + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Action: + return self._post( + f"/v2/images/{image_id}/actions" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/images/{image_id}/actions", + body=maybe_transform( + { + "type": type, + "region": region, + }, + action_create_params.ActionCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Action, + ) + + def retrieve( + self, + action_id: int, + *, + image_id: int, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Action: + """ + To retrieve the status of an image action, send a GET request to + `/v2/images/$IMAGE_ID/actions/$IMAGE_ACTION_ID`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + f"/v2/images/{image_id}/actions/{action_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/images/{image_id}/actions/{action_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Action, + ) + + def list( + self, + image_id: int, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionListResponse: + """ + To retrieve all actions that have been executed on an image, send a GET request + to `/v2/images/$IMAGE_ID/actions`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + f"/v2/images/{image_id}/actions" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/images/{image_id}/actions", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ActionListResponse, + ) + + +class AsyncActionsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncActionsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncActionsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncActionsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncActionsResourceWithStreamingResponse(self) + + @overload + async def create( + self, + image_id: int, + *, + type: Literal["convert", "transfer"], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Action: + """ + The following actions are available on an Image. + + ## Convert an Image to a Snapshot + + To convert an image, for example, a backup to a snapshot, send a POST request to + `/v2/images/$IMAGE_ID/actions`. Set the `type` attribute to `convert`. + + ## Transfer an Image + + To transfer an image to another region, send a POST request to + `/v2/images/$IMAGE_ID/actions`. Set the `type` attribute to `transfer` and set + `region` attribute to the slug identifier of the region you wish to transfer to. + + Args: + type: The action to be taken on the image. Can be either `convert` or `transfer`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def create( + self, + image_id: int, + *, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ], + type: Literal["convert", "transfer"], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Action: + """ + The following actions are available on an Image. + + ## Convert an Image to a Snapshot + + To convert an image, for example, a backup to a snapshot, send a POST request to + `/v2/images/$IMAGE_ID/actions`. Set the `type` attribute to `convert`. + + ## Transfer an Image + + To transfer an image to another region, send a POST request to + `/v2/images/$IMAGE_ID/actions`. Set the `type` attribute to `transfer` and set + `region` attribute to the slug identifier of the region you wish to transfer to. + + Args: + region: The slug identifier for the region where the resource will initially be + available. + + type: The action to be taken on the image. Can be either `convert` or `transfer`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["type"], ["region", "type"]) + async def create( + self, + image_id: int, + *, + type: Literal["convert", "transfer"], + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Action: + return await self._post( + f"/v2/images/{image_id}/actions" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/images/{image_id}/actions", + body=await async_maybe_transform( + { + "type": type, + "region": region, + }, + action_create_params.ActionCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Action, + ) + + async def retrieve( + self, + action_id: int, + *, + image_id: int, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Action: + """ + To retrieve the status of an image action, send a GET request to + `/v2/images/$IMAGE_ID/actions/$IMAGE_ACTION_ID`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + f"/v2/images/{image_id}/actions/{action_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/images/{image_id}/actions/{action_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Action, + ) + + async def list( + self, + image_id: int, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionListResponse: + """ + To retrieve all actions that have been executed on an image, send a GET request + to `/v2/images/$IMAGE_ID/actions`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + f"/v2/images/{image_id}/actions" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/images/{image_id}/actions", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ActionListResponse, + ) + + +class ActionsResourceWithRawResponse: + def __init__(self, actions: ActionsResource) -> None: + self._actions = actions + + self.create = to_raw_response_wrapper( + actions.create, + ) + self.retrieve = to_raw_response_wrapper( + actions.retrieve, + ) + self.list = to_raw_response_wrapper( + actions.list, + ) + + +class AsyncActionsResourceWithRawResponse: + def __init__(self, actions: AsyncActionsResource) -> None: + self._actions = actions + + self.create = async_to_raw_response_wrapper( + actions.create, + ) + self.retrieve = async_to_raw_response_wrapper( + actions.retrieve, + ) + self.list = async_to_raw_response_wrapper( + actions.list, + ) + + +class ActionsResourceWithStreamingResponse: + def __init__(self, actions: ActionsResource) -> None: + self._actions = actions + + self.create = to_streamed_response_wrapper( + actions.create, + ) + self.retrieve = to_streamed_response_wrapper( + actions.retrieve, + ) + self.list = to_streamed_response_wrapper( + actions.list, + ) + + +class AsyncActionsResourceWithStreamingResponse: + def __init__(self, actions: AsyncActionsResource) -> None: + self._actions = actions + + self.create = async_to_streamed_response_wrapper( + actions.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + actions.retrieve, + ) + self.list = async_to_streamed_response_wrapper( + actions.list, + ) diff --git a/src/gradientai/resources/gpu_droplets/images/images.py b/src/gradientai/resources/gpu_droplets/images/images.py new file mode 100644 index 00000000..2c70e793 --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/images/images.py @@ -0,0 +1,867 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union, Optional +from typing_extensions import Literal + +import httpx + +from .actions import ( + ActionsResource, + AsyncActionsResource, + ActionsResourceWithRawResponse, + AsyncActionsResourceWithRawResponse, + ActionsResourceWithStreamingResponse, + AsyncActionsResourceWithStreamingResponse, +) +from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.gpu_droplets import image_list_params, image_create_params, image_update_params +from ....types.gpu_droplets.image_list_response import ImageListResponse +from ....types.gpu_droplets.image_create_response import ImageCreateResponse +from ....types.gpu_droplets.image_update_response import ImageUpdateResponse +from ....types.gpu_droplets.image_retrieve_response import ImageRetrieveResponse + +__all__ = ["ImagesResource", "AsyncImagesResource"] + + +class ImagesResource(SyncAPIResource): + @cached_property + def actions(self) -> ActionsResource: + return ActionsResource(self._client) + + @cached_property + def with_raw_response(self) -> ImagesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return ImagesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ImagesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return ImagesResourceWithStreamingResponse(self) + + def create( + self, + *, + description: str | NotGiven = NOT_GIVEN, + distribution: Literal[ + "Arch Linux", + "CentOS", + "CoreOS", + "Debian", + "Fedora", + "Fedora Atomic", + "FreeBSD", + "Gentoo", + "openSUSE", + "RancherOS", + "Rocky Linux", + "Ubuntu", + "Unknown", + ] + | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + url: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ImageCreateResponse: + """To create a new custom image, send a POST request to /v2/images. + + The body must + contain a url attribute pointing to a Linux virtual machine image to be imported + into DigitalOcean. The image must be in the raw, qcow2, vhdx, vdi, or vmdk + format. It may be compressed using gzip or bzip2 and must be smaller than 100 GB + after being decompressed. + + Args: + description: An optional free-form text field to describe an image. + + distribution: The name of a custom image's distribution. Currently, the valid values are + `Arch Linux`, `CentOS`, `CoreOS`, `Debian`, `Fedora`, `Fedora Atomic`, + `FreeBSD`, `Gentoo`, `openSUSE`, `RancherOS`, `Rocky Linux`, `Ubuntu`, and + `Unknown`. Any other value will be accepted but ignored, and `Unknown` will be + used in its place. + + name: The display name that has been given to an image. This is what is shown in the + control panel and is generally a descriptive title for the image in question. + + region: The slug identifier for the region where the resource will initially be + available. + + tags: A flat array of tag names as strings to be applied to the resource. Tag names + may be for either existing or new tags. + + Requires `tag:create` scope. + + url: A URL from which the custom Linux virtual machine image may be retrieved. The + image it points to must be in the raw, qcow2, vhdx, vdi, or vmdk format. It may + be compressed using gzip or bzip2 and must be smaller than 100 GB after being + decompressed. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/v2/images" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/images", + body=maybe_transform( + { + "description": description, + "distribution": distribution, + "name": name, + "region": region, + "tags": tags, + "url": url, + }, + image_create_params.ImageCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ImageCreateResponse, + ) + + def retrieve( + self, + image_id: Union[int, str], + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ImageRetrieveResponse: + """ + To retrieve information about an image, send a `GET` request to + `/v2/images/$IDENTIFIER`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + f"/v2/images/{image_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/images/{image_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ImageRetrieveResponse, + ) + + def update( + self, + image_id: int, + *, + description: str | NotGiven = NOT_GIVEN, + distribution: Literal[ + "Arch Linux", + "CentOS", + "CoreOS", + "Debian", + "Fedora", + "Fedora Atomic", + "FreeBSD", + "Gentoo", + "openSUSE", + "RancherOS", + "Rocky Linux", + "Ubuntu", + "Unknown", + ] + | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ImageUpdateResponse: + """To update an image, send a `PUT` request to `/v2/images/$IMAGE_ID`. + + Set the + `name` attribute to the new value you would like to use. For custom images, the + `description` and `distribution` attributes may also be updated. + + Args: + description: An optional free-form text field to describe an image. + + distribution: The name of a custom image's distribution. Currently, the valid values are + `Arch Linux`, `CentOS`, `CoreOS`, `Debian`, `Fedora`, `Fedora Atomic`, + `FreeBSD`, `Gentoo`, `openSUSE`, `RancherOS`, `Rocky Linux`, `Ubuntu`, and + `Unknown`. Any other value will be accepted but ignored, and `Unknown` will be + used in its place. + + name: The display name that has been given to an image. This is what is shown in the + control panel and is generally a descriptive title for the image in question. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._put( + f"/v2/images/{image_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/images/{image_id}", + body=maybe_transform( + { + "description": description, + "distribution": distribution, + "name": name, + }, + image_update_params.ImageUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ImageUpdateResponse, + ) + + def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + private: bool | NotGiven = NOT_GIVEN, + tag_name: str | NotGiven = NOT_GIVEN, + type: Literal["application", "distribution"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ImageListResponse: + """ + To list all of the images available on your account, send a GET request to + /v2/images. + + ## Filtering Results + + --- + + It's possible to request filtered results by including certain query parameters. + + **Image Type** + + Either 1-Click Application or OS Distribution images can be filtered by using + the `type` query parameter. + + > Important: The `type` query parameter does not directly relate to the `type` + > attribute. + + To retrieve only **_distribution_** images, include the `type` query parameter + set to distribution, `/v2/images?type=distribution`. + + To retrieve only **_application_** images, include the `type` query parameter + set to application, `/v2/images?type=application`. + + **User Images** + + To retrieve only the private images of a user, include the `private` query + parameter set to true, `/v2/images?private=true`. + + **Tags** + + To list all images assigned to a specific tag, include the `tag_name` query + parameter set to the name of the tag in your GET request. For example, + `/v2/images?tag_name=$TAG_NAME`. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + private: Used to filter only user images. + + tag_name: Used to filter images by a specific tag. + + type: Filters results based on image type which can be either `application` or + `distribution`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/images" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/images", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + "private": private, + "tag_name": tag_name, + "type": type, + }, + image_list_params.ImageListParams, + ), + ), + cast_to=ImageListResponse, + ) + + def delete( + self, + image_id: int, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To delete a snapshot or custom image, send a `DELETE` request to + `/v2/images/$IMAGE_ID`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._delete( + f"/v2/images/{image_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/images/{image_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class AsyncImagesResource(AsyncAPIResource): + @cached_property + def actions(self) -> AsyncActionsResource: + return AsyncActionsResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncImagesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncImagesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncImagesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncImagesResourceWithStreamingResponse(self) + + async def create( + self, + *, + description: str | NotGiven = NOT_GIVEN, + distribution: Literal[ + "Arch Linux", + "CentOS", + "CoreOS", + "Debian", + "Fedora", + "Fedora Atomic", + "FreeBSD", + "Gentoo", + "openSUSE", + "RancherOS", + "Rocky Linux", + "Ubuntu", + "Unknown", + ] + | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + url: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ImageCreateResponse: + """To create a new custom image, send a POST request to /v2/images. + + The body must + contain a url attribute pointing to a Linux virtual machine image to be imported + into DigitalOcean. The image must be in the raw, qcow2, vhdx, vdi, or vmdk + format. It may be compressed using gzip or bzip2 and must be smaller than 100 GB + after being decompressed. + + Args: + description: An optional free-form text field to describe an image. + + distribution: The name of a custom image's distribution. Currently, the valid values are + `Arch Linux`, `CentOS`, `CoreOS`, `Debian`, `Fedora`, `Fedora Atomic`, + `FreeBSD`, `Gentoo`, `openSUSE`, `RancherOS`, `Rocky Linux`, `Ubuntu`, and + `Unknown`. Any other value will be accepted but ignored, and `Unknown` will be + used in its place. + + name: The display name that has been given to an image. This is what is shown in the + control panel and is generally a descriptive title for the image in question. + + region: The slug identifier for the region where the resource will initially be + available. + + tags: A flat array of tag names as strings to be applied to the resource. Tag names + may be for either existing or new tags. + + Requires `tag:create` scope. + + url: A URL from which the custom Linux virtual machine image may be retrieved. The + image it points to must be in the raw, qcow2, vhdx, vdi, or vmdk format. It may + be compressed using gzip or bzip2 and must be smaller than 100 GB after being + decompressed. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/v2/images" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/images", + body=await async_maybe_transform( + { + "description": description, + "distribution": distribution, + "name": name, + "region": region, + "tags": tags, + "url": url, + }, + image_create_params.ImageCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ImageCreateResponse, + ) + + async def retrieve( + self, + image_id: Union[int, str], + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ImageRetrieveResponse: + """ + To retrieve information about an image, send a `GET` request to + `/v2/images/$IDENTIFIER`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + f"/v2/images/{image_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/images/{image_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ImageRetrieveResponse, + ) + + async def update( + self, + image_id: int, + *, + description: str | NotGiven = NOT_GIVEN, + distribution: Literal[ + "Arch Linux", + "CentOS", + "CoreOS", + "Debian", + "Fedora", + "Fedora Atomic", + "FreeBSD", + "Gentoo", + "openSUSE", + "RancherOS", + "Rocky Linux", + "Ubuntu", + "Unknown", + ] + | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ImageUpdateResponse: + """To update an image, send a `PUT` request to `/v2/images/$IMAGE_ID`. + + Set the + `name` attribute to the new value you would like to use. For custom images, the + `description` and `distribution` attributes may also be updated. + + Args: + description: An optional free-form text field to describe an image. + + distribution: The name of a custom image's distribution. Currently, the valid values are + `Arch Linux`, `CentOS`, `CoreOS`, `Debian`, `Fedora`, `Fedora Atomic`, + `FreeBSD`, `Gentoo`, `openSUSE`, `RancherOS`, `Rocky Linux`, `Ubuntu`, and + `Unknown`. Any other value will be accepted but ignored, and `Unknown` will be + used in its place. + + name: The display name that has been given to an image. This is what is shown in the + control panel and is generally a descriptive title for the image in question. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._put( + f"/v2/images/{image_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/images/{image_id}", + body=await async_maybe_transform( + { + "description": description, + "distribution": distribution, + "name": name, + }, + image_update_params.ImageUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ImageUpdateResponse, + ) + + async def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + private: bool | NotGiven = NOT_GIVEN, + tag_name: str | NotGiven = NOT_GIVEN, + type: Literal["application", "distribution"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ImageListResponse: + """ + To list all of the images available on your account, send a GET request to + /v2/images. + + ## Filtering Results + + --- + + It's possible to request filtered results by including certain query parameters. + + **Image Type** + + Either 1-Click Application or OS Distribution images can be filtered by using + the `type` query parameter. + + > Important: The `type` query parameter does not directly relate to the `type` + > attribute. + + To retrieve only **_distribution_** images, include the `type` query parameter + set to distribution, `/v2/images?type=distribution`. + + To retrieve only **_application_** images, include the `type` query parameter + set to application, `/v2/images?type=application`. + + **User Images** + + To retrieve only the private images of a user, include the `private` query + parameter set to true, `/v2/images?private=true`. + + **Tags** + + To list all images assigned to a specific tag, include the `tag_name` query + parameter set to the name of the tag in your GET request. For example, + `/v2/images?tag_name=$TAG_NAME`. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + private: Used to filter only user images. + + tag_name: Used to filter images by a specific tag. + + type: Filters results based on image type which can be either `application` or + `distribution`. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/images" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/images", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + "private": private, + "tag_name": tag_name, + "type": type, + }, + image_list_params.ImageListParams, + ), + ), + cast_to=ImageListResponse, + ) + + async def delete( + self, + image_id: int, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To delete a snapshot or custom image, send a `DELETE` request to + `/v2/images/$IMAGE_ID`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._delete( + f"/v2/images/{image_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/images/{image_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class ImagesResourceWithRawResponse: + def __init__(self, images: ImagesResource) -> None: + self._images = images + + self.create = to_raw_response_wrapper( + images.create, + ) + self.retrieve = to_raw_response_wrapper( + images.retrieve, + ) + self.update = to_raw_response_wrapper( + images.update, + ) + self.list = to_raw_response_wrapper( + images.list, + ) + self.delete = to_raw_response_wrapper( + images.delete, + ) + + @cached_property + def actions(self) -> ActionsResourceWithRawResponse: + return ActionsResourceWithRawResponse(self._images.actions) + + +class AsyncImagesResourceWithRawResponse: + def __init__(self, images: AsyncImagesResource) -> None: + self._images = images + + self.create = async_to_raw_response_wrapper( + images.create, + ) + self.retrieve = async_to_raw_response_wrapper( + images.retrieve, + ) + self.update = async_to_raw_response_wrapper( + images.update, + ) + self.list = async_to_raw_response_wrapper( + images.list, + ) + self.delete = async_to_raw_response_wrapper( + images.delete, + ) + + @cached_property + def actions(self) -> AsyncActionsResourceWithRawResponse: + return AsyncActionsResourceWithRawResponse(self._images.actions) + + +class ImagesResourceWithStreamingResponse: + def __init__(self, images: ImagesResource) -> None: + self._images = images + + self.create = to_streamed_response_wrapper( + images.create, + ) + self.retrieve = to_streamed_response_wrapper( + images.retrieve, + ) + self.update = to_streamed_response_wrapper( + images.update, + ) + self.list = to_streamed_response_wrapper( + images.list, + ) + self.delete = to_streamed_response_wrapper( + images.delete, + ) + + @cached_property + def actions(self) -> ActionsResourceWithStreamingResponse: + return ActionsResourceWithStreamingResponse(self._images.actions) + + +class AsyncImagesResourceWithStreamingResponse: + def __init__(self, images: AsyncImagesResource) -> None: + self._images = images + + self.create = async_to_streamed_response_wrapper( + images.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + images.retrieve, + ) + self.update = async_to_streamed_response_wrapper( + images.update, + ) + self.list = async_to_streamed_response_wrapper( + images.list, + ) + self.delete = async_to_streamed_response_wrapper( + images.delete, + ) + + @cached_property + def actions(self) -> AsyncActionsResourceWithStreamingResponse: + return AsyncActionsResourceWithStreamingResponse(self._images.actions) diff --git a/src/gradientai/resources/gpu_droplets/load_balancers/__init__.py b/src/gradientai/resources/gpu_droplets/load_balancers/__init__.py new file mode 100644 index 00000000..2cede1c8 --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/load_balancers/__init__.py @@ -0,0 +1,47 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .droplets import ( + DropletsResource, + AsyncDropletsResource, + DropletsResourceWithRawResponse, + AsyncDropletsResourceWithRawResponse, + DropletsResourceWithStreamingResponse, + AsyncDropletsResourceWithStreamingResponse, +) +from .load_balancers import ( + LoadBalancersResource, + AsyncLoadBalancersResource, + LoadBalancersResourceWithRawResponse, + AsyncLoadBalancersResourceWithRawResponse, + LoadBalancersResourceWithStreamingResponse, + AsyncLoadBalancersResourceWithStreamingResponse, +) +from .forwarding_rules import ( + ForwardingRulesResource, + AsyncForwardingRulesResource, + ForwardingRulesResourceWithRawResponse, + AsyncForwardingRulesResourceWithRawResponse, + ForwardingRulesResourceWithStreamingResponse, + AsyncForwardingRulesResourceWithStreamingResponse, +) + +__all__ = [ + "DropletsResource", + "AsyncDropletsResource", + "DropletsResourceWithRawResponse", + "AsyncDropletsResourceWithRawResponse", + "DropletsResourceWithStreamingResponse", + "AsyncDropletsResourceWithStreamingResponse", + "ForwardingRulesResource", + "AsyncForwardingRulesResource", + "ForwardingRulesResourceWithRawResponse", + "AsyncForwardingRulesResourceWithRawResponse", + "ForwardingRulesResourceWithStreamingResponse", + "AsyncForwardingRulesResourceWithStreamingResponse", + "LoadBalancersResource", + "AsyncLoadBalancersResource", + "LoadBalancersResourceWithRawResponse", + "AsyncLoadBalancersResourceWithRawResponse", + "LoadBalancersResourceWithStreamingResponse", + "AsyncLoadBalancersResourceWithStreamingResponse", +] diff --git a/src/gradientai/resources/gpu_droplets/load_balancers/droplets.py b/src/gradientai/resources/gpu_droplets/load_balancers/droplets.py new file mode 100644 index 00000000..2553a729 --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/load_balancers/droplets.py @@ -0,0 +1,302 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.gpu_droplets.load_balancers import droplet_add_params, droplet_remove_params + +__all__ = ["DropletsResource", "AsyncDropletsResource"] + + +class DropletsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> DropletsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return DropletsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> DropletsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return DropletsResourceWithStreamingResponse(self) + + def add( + self, + lb_id: str, + *, + droplet_ids: Iterable[int], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To assign a Droplet to a load balancer instance, send a POST request to + `/v2/load_balancers/$LOAD_BALANCER_ID/droplets`. In the body of the request, + there should be a `droplet_ids` attribute containing a list of Droplet IDs. + Individual Droplets can not be added to a load balancer configured with a + Droplet tag. Attempting to do so will result in a "422 Unprocessable Entity" + response from the API. + + No response body will be sent back, but the response code will indicate success. + Specifically, the response code will be a 204, which means that the action was + successful with no returned body data. + + Args: + droplet_ids: An array containing the IDs of the Droplets assigned to the load balancer. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not lb_id: + raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._post( + f"/v2/load_balancers/{lb_id}/droplets" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}/droplets", + body=maybe_transform({"droplet_ids": droplet_ids}, droplet_add_params.DropletAddParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + def remove( + self, + lb_id: str, + *, + droplet_ids: Iterable[int], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To remove a Droplet from a load balancer instance, send a DELETE request to + `/v2/load_balancers/$LOAD_BALANCER_ID/droplets`. In the body of the request, + there should be a `droplet_ids` attribute containing a list of Droplet IDs. + + No response body will be sent back, but the response code will indicate success. + Specifically, the response code will be a 204, which means that the action was + successful with no returned body data. + + Args: + droplet_ids: An array containing the IDs of the Droplets assigned to the load balancer. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not lb_id: + raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._delete( + f"/v2/load_balancers/{lb_id}/droplets" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}/droplets", + body=maybe_transform({"droplet_ids": droplet_ids}, droplet_remove_params.DropletRemoveParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class AsyncDropletsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncDropletsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncDropletsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncDropletsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncDropletsResourceWithStreamingResponse(self) + + async def add( + self, + lb_id: str, + *, + droplet_ids: Iterable[int], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To assign a Droplet to a load balancer instance, send a POST request to + `/v2/load_balancers/$LOAD_BALANCER_ID/droplets`. In the body of the request, + there should be a `droplet_ids` attribute containing a list of Droplet IDs. + Individual Droplets can not be added to a load balancer configured with a + Droplet tag. Attempting to do so will result in a "422 Unprocessable Entity" + response from the API. + + No response body will be sent back, but the response code will indicate success. + Specifically, the response code will be a 204, which means that the action was + successful with no returned body data. + + Args: + droplet_ids: An array containing the IDs of the Droplets assigned to the load balancer. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not lb_id: + raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._post( + f"/v2/load_balancers/{lb_id}/droplets" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}/droplets", + body=await async_maybe_transform({"droplet_ids": droplet_ids}, droplet_add_params.DropletAddParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + async def remove( + self, + lb_id: str, + *, + droplet_ids: Iterable[int], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To remove a Droplet from a load balancer instance, send a DELETE request to + `/v2/load_balancers/$LOAD_BALANCER_ID/droplets`. In the body of the request, + there should be a `droplet_ids` attribute containing a list of Droplet IDs. + + No response body will be sent back, but the response code will indicate success. + Specifically, the response code will be a 204, which means that the action was + successful with no returned body data. + + Args: + droplet_ids: An array containing the IDs of the Droplets assigned to the load balancer. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not lb_id: + raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._delete( + f"/v2/load_balancers/{lb_id}/droplets" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}/droplets", + body=await async_maybe_transform({"droplet_ids": droplet_ids}, droplet_remove_params.DropletRemoveParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class DropletsResourceWithRawResponse: + def __init__(self, droplets: DropletsResource) -> None: + self._droplets = droplets + + self.add = to_raw_response_wrapper( + droplets.add, + ) + self.remove = to_raw_response_wrapper( + droplets.remove, + ) + + +class AsyncDropletsResourceWithRawResponse: + def __init__(self, droplets: AsyncDropletsResource) -> None: + self._droplets = droplets + + self.add = async_to_raw_response_wrapper( + droplets.add, + ) + self.remove = async_to_raw_response_wrapper( + droplets.remove, + ) + + +class DropletsResourceWithStreamingResponse: + def __init__(self, droplets: DropletsResource) -> None: + self._droplets = droplets + + self.add = to_streamed_response_wrapper( + droplets.add, + ) + self.remove = to_streamed_response_wrapper( + droplets.remove, + ) + + +class AsyncDropletsResourceWithStreamingResponse: + def __init__(self, droplets: AsyncDropletsResource) -> None: + self._droplets = droplets + + self.add = async_to_streamed_response_wrapper( + droplets.add, + ) + self.remove = async_to_streamed_response_wrapper( + droplets.remove, + ) diff --git a/src/gradientai/resources/gpu_droplets/load_balancers/forwarding_rules.py b/src/gradientai/resources/gpu_droplets/load_balancers/forwarding_rules.py new file mode 100644 index 00000000..2ba20f88 --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/load_balancers/forwarding_rules.py @@ -0,0 +1,301 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.gpu_droplets.load_balancers import forwarding_rule_add_params, forwarding_rule_remove_params +from ....types.gpu_droplets.forwarding_rule_param import ForwardingRuleParam + +__all__ = ["ForwardingRulesResource", "AsyncForwardingRulesResource"] + + +class ForwardingRulesResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> ForwardingRulesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return ForwardingRulesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ForwardingRulesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return ForwardingRulesResourceWithStreamingResponse(self) + + def add( + self, + lb_id: str, + *, + forwarding_rules: Iterable[ForwardingRuleParam], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To add an additional forwarding rule to a load balancer instance, send a POST + request to `/v2/load_balancers/$LOAD_BALANCER_ID/forwarding_rules`. In the body + of the request, there should be a `forwarding_rules` attribute containing an + array of rules to be added. + + No response body will be sent back, but the response code will indicate success. + Specifically, the response code will be a 204, which means that the action was + successful with no returned body data. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not lb_id: + raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._post( + f"/v2/load_balancers/{lb_id}/forwarding_rules" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}/forwarding_rules", + body=maybe_transform( + {"forwarding_rules": forwarding_rules}, forwarding_rule_add_params.ForwardingRuleAddParams + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + def remove( + self, + lb_id: str, + *, + forwarding_rules: Iterable[ForwardingRuleParam], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To remove forwarding rules from a load balancer instance, send a DELETE request + to `/v2/load_balancers/$LOAD_BALANCER_ID/forwarding_rules`. In the body of the + request, there should be a `forwarding_rules` attribute containing an array of + rules to be removed. + + No response body will be sent back, but the response code will indicate success. + Specifically, the response code will be a 204, which means that the action was + successful with no returned body data. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not lb_id: + raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._delete( + f"/v2/load_balancers/{lb_id}/forwarding_rules" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}/forwarding_rules", + body=maybe_transform( + {"forwarding_rules": forwarding_rules}, forwarding_rule_remove_params.ForwardingRuleRemoveParams + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class AsyncForwardingRulesResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncForwardingRulesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncForwardingRulesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncForwardingRulesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncForwardingRulesResourceWithStreamingResponse(self) + + async def add( + self, + lb_id: str, + *, + forwarding_rules: Iterable[ForwardingRuleParam], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To add an additional forwarding rule to a load balancer instance, send a POST + request to `/v2/load_balancers/$LOAD_BALANCER_ID/forwarding_rules`. In the body + of the request, there should be a `forwarding_rules` attribute containing an + array of rules to be added. + + No response body will be sent back, but the response code will indicate success. + Specifically, the response code will be a 204, which means that the action was + successful with no returned body data. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not lb_id: + raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._post( + f"/v2/load_balancers/{lb_id}/forwarding_rules" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}/forwarding_rules", + body=await async_maybe_transform( + {"forwarding_rules": forwarding_rules}, forwarding_rule_add_params.ForwardingRuleAddParams + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + async def remove( + self, + lb_id: str, + *, + forwarding_rules: Iterable[ForwardingRuleParam], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To remove forwarding rules from a load balancer instance, send a DELETE request + to `/v2/load_balancers/$LOAD_BALANCER_ID/forwarding_rules`. In the body of the + request, there should be a `forwarding_rules` attribute containing an array of + rules to be removed. + + No response body will be sent back, but the response code will indicate success. + Specifically, the response code will be a 204, which means that the action was + successful with no returned body data. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not lb_id: + raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._delete( + f"/v2/load_balancers/{lb_id}/forwarding_rules" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}/forwarding_rules", + body=await async_maybe_transform( + {"forwarding_rules": forwarding_rules}, forwarding_rule_remove_params.ForwardingRuleRemoveParams + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class ForwardingRulesResourceWithRawResponse: + def __init__(self, forwarding_rules: ForwardingRulesResource) -> None: + self._forwarding_rules = forwarding_rules + + self.add = to_raw_response_wrapper( + forwarding_rules.add, + ) + self.remove = to_raw_response_wrapper( + forwarding_rules.remove, + ) + + +class AsyncForwardingRulesResourceWithRawResponse: + def __init__(self, forwarding_rules: AsyncForwardingRulesResource) -> None: + self._forwarding_rules = forwarding_rules + + self.add = async_to_raw_response_wrapper( + forwarding_rules.add, + ) + self.remove = async_to_raw_response_wrapper( + forwarding_rules.remove, + ) + + +class ForwardingRulesResourceWithStreamingResponse: + def __init__(self, forwarding_rules: ForwardingRulesResource) -> None: + self._forwarding_rules = forwarding_rules + + self.add = to_streamed_response_wrapper( + forwarding_rules.add, + ) + self.remove = to_streamed_response_wrapper( + forwarding_rules.remove, + ) + + +class AsyncForwardingRulesResourceWithStreamingResponse: + def __init__(self, forwarding_rules: AsyncForwardingRulesResource) -> None: + self._forwarding_rules = forwarding_rules + + self.add = async_to_streamed_response_wrapper( + forwarding_rules.add, + ) + self.remove = async_to_streamed_response_wrapper( + forwarding_rules.remove, + ) diff --git a/src/gradientai/resources/gpu_droplets/load_balancers/load_balancers.py b/src/gradientai/resources/gpu_droplets/load_balancers/load_balancers.py new file mode 100644 index 00000000..c724b6d9 --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/load_balancers/load_balancers.py @@ -0,0 +1,2205 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Iterable +from typing_extensions import Literal, overload + +import httpx + +from .droplets import ( + DropletsResource, + AsyncDropletsResource, + DropletsResourceWithRawResponse, + AsyncDropletsResourceWithRawResponse, + DropletsResourceWithStreamingResponse, + AsyncDropletsResourceWithStreamingResponse, +) +from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ...._utils import required_args, maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from .forwarding_rules import ( + ForwardingRulesResource, + AsyncForwardingRulesResource, + ForwardingRulesResourceWithRawResponse, + AsyncForwardingRulesResourceWithRawResponse, + ForwardingRulesResourceWithStreamingResponse, + AsyncForwardingRulesResourceWithStreamingResponse, +) +from ....types.gpu_droplets import ( + load_balancer_list_params, + load_balancer_create_params, + load_balancer_update_params, +) +from ....types.gpu_droplets.domains_param import DomainsParam +from ....types.gpu_droplets.lb_firewall_param import LbFirewallParam +from ....types.gpu_droplets.glb_settings_param import GlbSettingsParam +from ....types.gpu_droplets.health_check_param import HealthCheckParam +from ....types.gpu_droplets.forwarding_rule_param import ForwardingRuleParam +from ....types.gpu_droplets.sticky_sessions_param import StickySessionsParam +from ....types.gpu_droplets.load_balancer_list_response import LoadBalancerListResponse +from ....types.gpu_droplets.load_balancer_create_response import LoadBalancerCreateResponse +from ....types.gpu_droplets.load_balancer_update_response import LoadBalancerUpdateResponse +from ....types.gpu_droplets.load_balancer_retrieve_response import LoadBalancerRetrieveResponse + +__all__ = ["LoadBalancersResource", "AsyncLoadBalancersResource"] + + +class LoadBalancersResource(SyncAPIResource): + @cached_property + def droplets(self) -> DropletsResource: + return DropletsResource(self._client) + + @cached_property + def forwarding_rules(self) -> ForwardingRulesResource: + return ForwardingRulesResource(self._client) + + @cached_property + def with_raw_response(self) -> LoadBalancersResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return LoadBalancersResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> LoadBalancersResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return LoadBalancersResourceWithStreamingResponse(self) + + @overload + def create( + self, + *, + forwarding_rules: Iterable[ForwardingRuleParam], + algorithm: Literal["round_robin", "least_connections"] | NotGiven = NOT_GIVEN, + disable_lets_encrypt_dns_records: bool | NotGiven = NOT_GIVEN, + domains: Iterable[DomainsParam] | NotGiven = NOT_GIVEN, + droplet_ids: Iterable[int] | NotGiven = NOT_GIVEN, + enable_backend_keepalive: bool | NotGiven = NOT_GIVEN, + enable_proxy_protocol: bool | NotGiven = NOT_GIVEN, + firewall: LbFirewallParam | NotGiven = NOT_GIVEN, + glb_settings: GlbSettingsParam | NotGiven = NOT_GIVEN, + health_check: HealthCheckParam | NotGiven = NOT_GIVEN, + http_idle_timeout_seconds: int | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + network: Literal["EXTERNAL", "INTERNAL"] | NotGiven = NOT_GIVEN, + network_stack: Literal["IPV4", "DUALSTACK"] | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + redirect_http_to_https: bool | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, + size_unit: int | NotGiven = NOT_GIVEN, + sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, + target_load_balancer_ids: List[str] | NotGiven = NOT_GIVEN, + tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, + type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, + vpc_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> LoadBalancerCreateResponse: + """ + To create a new load balancer instance, send a POST request to + `/v2/load_balancers`. + + You can specify the Droplets that will sit behind the load balancer using one of + two methods: + + - Set `droplet_ids` to a list of specific Droplet IDs. + - Set `tag` to the name of a tag. All Droplets with this tag applied will be + assigned to the load balancer. Additional Droplets will be automatically + assigned as they are tagged. + + These methods are mutually exclusive. + + Args: + forwarding_rules: An array of objects specifying the forwarding rules for a load balancer. + + algorithm: This field has been deprecated. You can no longer specify an algorithm for load + balancers. + + disable_lets_encrypt_dns_records: A boolean value indicating whether to disable automatic DNS record creation for + Let's Encrypt certificates that are added to the load balancer. + + domains: An array of objects specifying the domain configurations for a Global load + balancer. + + droplet_ids: An array containing the IDs of the Droplets assigned to the load balancer. + + enable_backend_keepalive: A boolean value indicating whether HTTP keepalive connections are maintained to + target Droplets. + + enable_proxy_protocol: A boolean value indicating whether PROXY Protocol is in use. + + firewall: An object specifying allow and deny rules to control traffic to the load + balancer. + + glb_settings: An object specifying forwarding configurations for a Global load balancer. + + health_check: An object specifying health check settings for the load balancer. + + http_idle_timeout_seconds: An integer value which configures the idle timeout for HTTP requests to the + target droplets. + + name: A human-readable name for a load balancer instance. + + network: A string indicating whether the load balancer should be external or internal. + Internal load balancers have no public IPs and are only accessible to resources + on the same VPC network. This property cannot be updated after creating the load + balancer. + + network_stack: A string indicating whether the load balancer will support IPv4 or both IPv4 and + IPv6 networking. This property cannot be updated after creating the load + balancer. + + project_id: The ID of the project that the load balancer is associated with. If no ID is + provided at creation, the load balancer associates with the user's default + project. If an invalid project ID is provided, the load balancer will not be + created. + + redirect_http_to_https: A boolean value indicating whether HTTP requests to the load balancer on port 80 + will be redirected to HTTPS on port 443. + + region: The slug identifier for the region where the resource will initially be + available. + + size: This field has been replaced by the `size_unit` field for all regions except in + AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load + balancer having a set number of nodes. + + - `lb-small` = 1 node + - `lb-medium` = 3 nodes + - `lb-large` = 6 nodes + + You can resize load balancers after creation up to once per hour. You cannot + resize a load balancer within the first hour of its creation. + + size_unit: How many nodes the load balancer contains. Each additional node increases the + load balancer's ability to manage more connections. Load balancers can be scaled + up or down, and you can change the number of nodes after creation up to once per + hour. This field is currently not available in the AMS2, NYC2, or SFO1 regions. + Use the `size` field to scale load balancers that reside in these regions. + + sticky_sessions: An object specifying sticky sessions settings for the load balancer. + + target_load_balancer_ids: An array containing the UUIDs of the Regional load balancers to be used as + target backends for a Global load balancer. + + tls_cipher_policy: A string indicating the policy for the TLS cipher suites used by the load + balancer. The possible values are `DEFAULT` or `STRONG`. The default value is + `DEFAULT`. + + type: A string indicating whether the load balancer should be a standard regional HTTP + load balancer, a regional network load balancer that routes traffic at the + TCP/UDP transport layer, or a global load balancer. + + vpc_uuid: A string specifying the UUID of the VPC to which the load balancer is assigned. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def create( + self, + *, + forwarding_rules: Iterable[ForwardingRuleParam], + algorithm: Literal["round_robin", "least_connections"] | NotGiven = NOT_GIVEN, + disable_lets_encrypt_dns_records: bool | NotGiven = NOT_GIVEN, + domains: Iterable[DomainsParam] | NotGiven = NOT_GIVEN, + enable_backend_keepalive: bool | NotGiven = NOT_GIVEN, + enable_proxy_protocol: bool | NotGiven = NOT_GIVEN, + firewall: LbFirewallParam | NotGiven = NOT_GIVEN, + glb_settings: GlbSettingsParam | NotGiven = NOT_GIVEN, + health_check: HealthCheckParam | NotGiven = NOT_GIVEN, + http_idle_timeout_seconds: int | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + network: Literal["EXTERNAL", "INTERNAL"] | NotGiven = NOT_GIVEN, + network_stack: Literal["IPV4", "DUALSTACK"] | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + redirect_http_to_https: bool | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, + size_unit: int | NotGiven = NOT_GIVEN, + sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, + tag: str | NotGiven = NOT_GIVEN, + target_load_balancer_ids: List[str] | NotGiven = NOT_GIVEN, + tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, + type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, + vpc_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> LoadBalancerCreateResponse: + """ + To create a new load balancer instance, send a POST request to + `/v2/load_balancers`. + + You can specify the Droplets that will sit behind the load balancer using one of + two methods: + + - Set `droplet_ids` to a list of specific Droplet IDs. + - Set `tag` to the name of a tag. All Droplets with this tag applied will be + assigned to the load balancer. Additional Droplets will be automatically + assigned as they are tagged. + + These methods are mutually exclusive. + + Args: + forwarding_rules: An array of objects specifying the forwarding rules for a load balancer. + + algorithm: This field has been deprecated. You can no longer specify an algorithm for load + balancers. + + disable_lets_encrypt_dns_records: A boolean value indicating whether to disable automatic DNS record creation for + Let's Encrypt certificates that are added to the load balancer. + + domains: An array of objects specifying the domain configurations for a Global load + balancer. + + enable_backend_keepalive: A boolean value indicating whether HTTP keepalive connections are maintained to + target Droplets. + + enable_proxy_protocol: A boolean value indicating whether PROXY Protocol is in use. + + firewall: An object specifying allow and deny rules to control traffic to the load + balancer. + + glb_settings: An object specifying forwarding configurations for a Global load balancer. + + health_check: An object specifying health check settings for the load balancer. + + http_idle_timeout_seconds: An integer value which configures the idle timeout for HTTP requests to the + target droplets. + + name: A human-readable name for a load balancer instance. + + network: A string indicating whether the load balancer should be external or internal. + Internal load balancers have no public IPs and are only accessible to resources + on the same VPC network. This property cannot be updated after creating the load + balancer. + + network_stack: A string indicating whether the load balancer will support IPv4 or both IPv4 and + IPv6 networking. This property cannot be updated after creating the load + balancer. + + project_id: The ID of the project that the load balancer is associated with. If no ID is + provided at creation, the load balancer associates with the user's default + project. If an invalid project ID is provided, the load balancer will not be + created. + + redirect_http_to_https: A boolean value indicating whether HTTP requests to the load balancer on port 80 + will be redirected to HTTPS on port 443. + + region: The slug identifier for the region where the resource will initially be + available. + + size: This field has been replaced by the `size_unit` field for all regions except in + AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load + balancer having a set number of nodes. + + - `lb-small` = 1 node + - `lb-medium` = 3 nodes + - `lb-large` = 6 nodes + + You can resize load balancers after creation up to once per hour. You cannot + resize a load balancer within the first hour of its creation. + + size_unit: How many nodes the load balancer contains. Each additional node increases the + load balancer's ability to manage more connections. Load balancers can be scaled + up or down, and you can change the number of nodes after creation up to once per + hour. This field is currently not available in the AMS2, NYC2, or SFO1 regions. + Use the `size` field to scale load balancers that reside in these regions. + + sticky_sessions: An object specifying sticky sessions settings for the load balancer. + + tag: The name of a Droplet tag corresponding to Droplets assigned to the load + balancer. + + target_load_balancer_ids: An array containing the UUIDs of the Regional load balancers to be used as + target backends for a Global load balancer. + + tls_cipher_policy: A string indicating the policy for the TLS cipher suites used by the load + balancer. The possible values are `DEFAULT` or `STRONG`. The default value is + `DEFAULT`. + + type: A string indicating whether the load balancer should be a standard regional HTTP + load balancer, a regional network load balancer that routes traffic at the + TCP/UDP transport layer, or a global load balancer. + + vpc_uuid: A string specifying the UUID of the VPC to which the load balancer is assigned. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["forwarding_rules"]) + def create( + self, + *, + forwarding_rules: Iterable[ForwardingRuleParam], + algorithm: Literal["round_robin", "least_connections"] | NotGiven = NOT_GIVEN, + disable_lets_encrypt_dns_records: bool | NotGiven = NOT_GIVEN, + domains: Iterable[DomainsParam] | NotGiven = NOT_GIVEN, + droplet_ids: Iterable[int] | NotGiven = NOT_GIVEN, + enable_backend_keepalive: bool | NotGiven = NOT_GIVEN, + enable_proxy_protocol: bool | NotGiven = NOT_GIVEN, + firewall: LbFirewallParam | NotGiven = NOT_GIVEN, + glb_settings: GlbSettingsParam | NotGiven = NOT_GIVEN, + health_check: HealthCheckParam | NotGiven = NOT_GIVEN, + http_idle_timeout_seconds: int | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + network: Literal["EXTERNAL", "INTERNAL"] | NotGiven = NOT_GIVEN, + network_stack: Literal["IPV4", "DUALSTACK"] | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + redirect_http_to_https: bool | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, + size_unit: int | NotGiven = NOT_GIVEN, + sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, + target_load_balancer_ids: List[str] | NotGiven = NOT_GIVEN, + tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, + type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, + vpc_uuid: str | NotGiven = NOT_GIVEN, + tag: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> LoadBalancerCreateResponse: + return self._post( + "/v2/load_balancers" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/load_balancers", + body=maybe_transform( + { + "forwarding_rules": forwarding_rules, + "algorithm": algorithm, + "disable_lets_encrypt_dns_records": disable_lets_encrypt_dns_records, + "domains": domains, + "droplet_ids": droplet_ids, + "enable_backend_keepalive": enable_backend_keepalive, + "enable_proxy_protocol": enable_proxy_protocol, + "firewall": firewall, + "glb_settings": glb_settings, + "health_check": health_check, + "http_idle_timeout_seconds": http_idle_timeout_seconds, + "name": name, + "network": network, + "network_stack": network_stack, + "project_id": project_id, + "redirect_http_to_https": redirect_http_to_https, + "region": region, + "size": size, + "size_unit": size_unit, + "sticky_sessions": sticky_sessions, + "target_load_balancer_ids": target_load_balancer_ids, + "tls_cipher_policy": tls_cipher_policy, + "type": type, + "vpc_uuid": vpc_uuid, + "tag": tag, + }, + load_balancer_create_params.LoadBalancerCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=LoadBalancerCreateResponse, + ) + + def retrieve( + self, + lb_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> LoadBalancerRetrieveResponse: + """ + To show information about a load balancer instance, send a GET request to + `/v2/load_balancers/$LOAD_BALANCER_ID`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not lb_id: + raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}") + return self._get( + f"/v2/load_balancers/{lb_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=LoadBalancerRetrieveResponse, + ) + + @overload + def update( + self, + lb_id: str, + *, + forwarding_rules: Iterable[ForwardingRuleParam], + algorithm: Literal["round_robin", "least_connections"] | NotGiven = NOT_GIVEN, + disable_lets_encrypt_dns_records: bool | NotGiven = NOT_GIVEN, + domains: Iterable[DomainsParam] | NotGiven = NOT_GIVEN, + droplet_ids: Iterable[int] | NotGiven = NOT_GIVEN, + enable_backend_keepalive: bool | NotGiven = NOT_GIVEN, + enable_proxy_protocol: bool | NotGiven = NOT_GIVEN, + firewall: LbFirewallParam | NotGiven = NOT_GIVEN, + glb_settings: GlbSettingsParam | NotGiven = NOT_GIVEN, + health_check: HealthCheckParam | NotGiven = NOT_GIVEN, + http_idle_timeout_seconds: int | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + network: Literal["EXTERNAL", "INTERNAL"] | NotGiven = NOT_GIVEN, + network_stack: Literal["IPV4", "DUALSTACK"] | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + redirect_http_to_https: bool | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, + size_unit: int | NotGiven = NOT_GIVEN, + sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, + target_load_balancer_ids: List[str] | NotGiven = NOT_GIVEN, + tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, + type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, + vpc_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> LoadBalancerUpdateResponse: + """ + To update a load balancer's settings, send a PUT request to + `/v2/load_balancers/$LOAD_BALANCER_ID`. The request should contain a full + representation of the load balancer including existing attributes. It may + contain _one of_ the `droplets_ids` or `tag` attributes as they are mutually + exclusive. **Note that any attribute that is not provided will be reset to its + default value.** + + Args: + forwarding_rules: An array of objects specifying the forwarding rules for a load balancer. + + algorithm: This field has been deprecated. You can no longer specify an algorithm for load + balancers. + + disable_lets_encrypt_dns_records: A boolean value indicating whether to disable automatic DNS record creation for + Let's Encrypt certificates that are added to the load balancer. + + domains: An array of objects specifying the domain configurations for a Global load + balancer. + + droplet_ids: An array containing the IDs of the Droplets assigned to the load balancer. + + enable_backend_keepalive: A boolean value indicating whether HTTP keepalive connections are maintained to + target Droplets. + + enable_proxy_protocol: A boolean value indicating whether PROXY Protocol is in use. + + firewall: An object specifying allow and deny rules to control traffic to the load + balancer. + + glb_settings: An object specifying forwarding configurations for a Global load balancer. + + health_check: An object specifying health check settings for the load balancer. + + http_idle_timeout_seconds: An integer value which configures the idle timeout for HTTP requests to the + target droplets. + + name: A human-readable name for a load balancer instance. + + network: A string indicating whether the load balancer should be external or internal. + Internal load balancers have no public IPs and are only accessible to resources + on the same VPC network. This property cannot be updated after creating the load + balancer. + + network_stack: A string indicating whether the load balancer will support IPv4 or both IPv4 and + IPv6 networking. This property cannot be updated after creating the load + balancer. + + project_id: The ID of the project that the load balancer is associated with. If no ID is + provided at creation, the load balancer associates with the user's default + project. If an invalid project ID is provided, the load balancer will not be + created. + + redirect_http_to_https: A boolean value indicating whether HTTP requests to the load balancer on port 80 + will be redirected to HTTPS on port 443. + + region: The slug identifier for the region where the resource will initially be + available. + + size: This field has been replaced by the `size_unit` field for all regions except in + AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load + balancer having a set number of nodes. + + - `lb-small` = 1 node + - `lb-medium` = 3 nodes + - `lb-large` = 6 nodes + + You can resize load balancers after creation up to once per hour. You cannot + resize a load balancer within the first hour of its creation. + + size_unit: How many nodes the load balancer contains. Each additional node increases the + load balancer's ability to manage more connections. Load balancers can be scaled + up or down, and you can change the number of nodes after creation up to once per + hour. This field is currently not available in the AMS2, NYC2, or SFO1 regions. + Use the `size` field to scale load balancers that reside in these regions. + + sticky_sessions: An object specifying sticky sessions settings for the load balancer. + + target_load_balancer_ids: An array containing the UUIDs of the Regional load balancers to be used as + target backends for a Global load balancer. + + tls_cipher_policy: A string indicating the policy for the TLS cipher suites used by the load + balancer. The possible values are `DEFAULT` or `STRONG`. The default value is + `DEFAULT`. + + type: A string indicating whether the load balancer should be a standard regional HTTP + load balancer, a regional network load balancer that routes traffic at the + TCP/UDP transport layer, or a global load balancer. + + vpc_uuid: A string specifying the UUID of the VPC to which the load balancer is assigned. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def update( + self, + lb_id: str, + *, + forwarding_rules: Iterable[ForwardingRuleParam], + algorithm: Literal["round_robin", "least_connections"] | NotGiven = NOT_GIVEN, + disable_lets_encrypt_dns_records: bool | NotGiven = NOT_GIVEN, + domains: Iterable[DomainsParam] | NotGiven = NOT_GIVEN, + enable_backend_keepalive: bool | NotGiven = NOT_GIVEN, + enable_proxy_protocol: bool | NotGiven = NOT_GIVEN, + firewall: LbFirewallParam | NotGiven = NOT_GIVEN, + glb_settings: GlbSettingsParam | NotGiven = NOT_GIVEN, + health_check: HealthCheckParam | NotGiven = NOT_GIVEN, + http_idle_timeout_seconds: int | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + network: Literal["EXTERNAL", "INTERNAL"] | NotGiven = NOT_GIVEN, + network_stack: Literal["IPV4", "DUALSTACK"] | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + redirect_http_to_https: bool | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, + size_unit: int | NotGiven = NOT_GIVEN, + sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, + tag: str | NotGiven = NOT_GIVEN, + target_load_balancer_ids: List[str] | NotGiven = NOT_GIVEN, + tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, + type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, + vpc_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> LoadBalancerUpdateResponse: + """ + To update a load balancer's settings, send a PUT request to + `/v2/load_balancers/$LOAD_BALANCER_ID`. The request should contain a full + representation of the load balancer including existing attributes. It may + contain _one of_ the `droplets_ids` or `tag` attributes as they are mutually + exclusive. **Note that any attribute that is not provided will be reset to its + default value.** + + Args: + forwarding_rules: An array of objects specifying the forwarding rules for a load balancer. + + algorithm: This field has been deprecated. You can no longer specify an algorithm for load + balancers. + + disable_lets_encrypt_dns_records: A boolean value indicating whether to disable automatic DNS record creation for + Let's Encrypt certificates that are added to the load balancer. + + domains: An array of objects specifying the domain configurations for a Global load + balancer. + + enable_backend_keepalive: A boolean value indicating whether HTTP keepalive connections are maintained to + target Droplets. + + enable_proxy_protocol: A boolean value indicating whether PROXY Protocol is in use. + + firewall: An object specifying allow and deny rules to control traffic to the load + balancer. + + glb_settings: An object specifying forwarding configurations for a Global load balancer. + + health_check: An object specifying health check settings for the load balancer. + + http_idle_timeout_seconds: An integer value which configures the idle timeout for HTTP requests to the + target droplets. + + name: A human-readable name for a load balancer instance. + + network: A string indicating whether the load balancer should be external or internal. + Internal load balancers have no public IPs and are only accessible to resources + on the same VPC network. This property cannot be updated after creating the load + balancer. + + network_stack: A string indicating whether the load balancer will support IPv4 or both IPv4 and + IPv6 networking. This property cannot be updated after creating the load + balancer. + + project_id: The ID of the project that the load balancer is associated with. If no ID is + provided at creation, the load balancer associates with the user's default + project. If an invalid project ID is provided, the load balancer will not be + created. + + redirect_http_to_https: A boolean value indicating whether HTTP requests to the load balancer on port 80 + will be redirected to HTTPS on port 443. + + region: The slug identifier for the region where the resource will initially be + available. + + size: This field has been replaced by the `size_unit` field for all regions except in + AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load + balancer having a set number of nodes. + + - `lb-small` = 1 node + - `lb-medium` = 3 nodes + - `lb-large` = 6 nodes + + You can resize load balancers after creation up to once per hour. You cannot + resize a load balancer within the first hour of its creation. + + size_unit: How many nodes the load balancer contains. Each additional node increases the + load balancer's ability to manage more connections. Load balancers can be scaled + up or down, and you can change the number of nodes after creation up to once per + hour. This field is currently not available in the AMS2, NYC2, or SFO1 regions. + Use the `size` field to scale load balancers that reside in these regions. + + sticky_sessions: An object specifying sticky sessions settings for the load balancer. + + tag: The name of a Droplet tag corresponding to Droplets assigned to the load + balancer. + + target_load_balancer_ids: An array containing the UUIDs of the Regional load balancers to be used as + target backends for a Global load balancer. + + tls_cipher_policy: A string indicating the policy for the TLS cipher suites used by the load + balancer. The possible values are `DEFAULT` or `STRONG`. The default value is + `DEFAULT`. + + type: A string indicating whether the load balancer should be a standard regional HTTP + load balancer, a regional network load balancer that routes traffic at the + TCP/UDP transport layer, or a global load balancer. + + vpc_uuid: A string specifying the UUID of the VPC to which the load balancer is assigned. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["forwarding_rules"]) + def update( + self, + lb_id: str, + *, + forwarding_rules: Iterable[ForwardingRuleParam], + algorithm: Literal["round_robin", "least_connections"] | NotGiven = NOT_GIVEN, + disable_lets_encrypt_dns_records: bool | NotGiven = NOT_GIVEN, + domains: Iterable[DomainsParam] | NotGiven = NOT_GIVEN, + droplet_ids: Iterable[int] | NotGiven = NOT_GIVEN, + enable_backend_keepalive: bool | NotGiven = NOT_GIVEN, + enable_proxy_protocol: bool | NotGiven = NOT_GIVEN, + firewall: LbFirewallParam | NotGiven = NOT_GIVEN, + glb_settings: GlbSettingsParam | NotGiven = NOT_GIVEN, + health_check: HealthCheckParam | NotGiven = NOT_GIVEN, + http_idle_timeout_seconds: int | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + network: Literal["EXTERNAL", "INTERNAL"] | NotGiven = NOT_GIVEN, + network_stack: Literal["IPV4", "DUALSTACK"] | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + redirect_http_to_https: bool | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, + size_unit: int | NotGiven = NOT_GIVEN, + sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, + target_load_balancer_ids: List[str] | NotGiven = NOT_GIVEN, + tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, + type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, + vpc_uuid: str | NotGiven = NOT_GIVEN, + tag: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> LoadBalancerUpdateResponse: + if not lb_id: + raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}") + return self._put( + f"/v2/load_balancers/{lb_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}", + body=maybe_transform( + { + "forwarding_rules": forwarding_rules, + "algorithm": algorithm, + "disable_lets_encrypt_dns_records": disable_lets_encrypt_dns_records, + "domains": domains, + "droplet_ids": droplet_ids, + "enable_backend_keepalive": enable_backend_keepalive, + "enable_proxy_protocol": enable_proxy_protocol, + "firewall": firewall, + "glb_settings": glb_settings, + "health_check": health_check, + "http_idle_timeout_seconds": http_idle_timeout_seconds, + "name": name, + "network": network, + "network_stack": network_stack, + "project_id": project_id, + "redirect_http_to_https": redirect_http_to_https, + "region": region, + "size": size, + "size_unit": size_unit, + "sticky_sessions": sticky_sessions, + "target_load_balancer_ids": target_load_balancer_ids, + "tls_cipher_policy": tls_cipher_policy, + "type": type, + "vpc_uuid": vpc_uuid, + "tag": tag, + }, + load_balancer_update_params.LoadBalancerUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=LoadBalancerUpdateResponse, + ) + + def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> LoadBalancerListResponse: + """ + To list all of the load balancer instances on your account, send a GET request + to `/v2/load_balancers`. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/load_balancers" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/load_balancers", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + load_balancer_list_params.LoadBalancerListParams, + ), + ), + cast_to=LoadBalancerListResponse, + ) + + def delete( + self, + lb_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To delete a load balancer instance, disassociating any Droplets assigned to it + and removing it from your account, send a DELETE request to + `/v2/load_balancers/$LOAD_BALANCER_ID`. + + A successful request will receive a 204 status code with no body in response. + This indicates that the request was processed successfully. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not lb_id: + raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._delete( + f"/v2/load_balancers/{lb_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + def delete_cache( + self, + lb_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To delete a Global load balancer CDN cache, send a DELETE request to + `/v2/load_balancers/$LOAD_BALANCER_ID/cache`. + + A successful request will receive a 204 status code with no body in response. + This indicates that the request was processed successfully. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not lb_id: + raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._delete( + f"/v2/load_balancers/{lb_id}/cache" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}/cache", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class AsyncLoadBalancersResource(AsyncAPIResource): + @cached_property + def droplets(self) -> AsyncDropletsResource: + return AsyncDropletsResource(self._client) + + @cached_property + def forwarding_rules(self) -> AsyncForwardingRulesResource: + return AsyncForwardingRulesResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncLoadBalancersResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncLoadBalancersResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncLoadBalancersResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncLoadBalancersResourceWithStreamingResponse(self) + + @overload + async def create( + self, + *, + forwarding_rules: Iterable[ForwardingRuleParam], + algorithm: Literal["round_robin", "least_connections"] | NotGiven = NOT_GIVEN, + disable_lets_encrypt_dns_records: bool | NotGiven = NOT_GIVEN, + domains: Iterable[DomainsParam] | NotGiven = NOT_GIVEN, + droplet_ids: Iterable[int] | NotGiven = NOT_GIVEN, + enable_backend_keepalive: bool | NotGiven = NOT_GIVEN, + enable_proxy_protocol: bool | NotGiven = NOT_GIVEN, + firewall: LbFirewallParam | NotGiven = NOT_GIVEN, + glb_settings: GlbSettingsParam | NotGiven = NOT_GIVEN, + health_check: HealthCheckParam | NotGiven = NOT_GIVEN, + http_idle_timeout_seconds: int | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + network: Literal["EXTERNAL", "INTERNAL"] | NotGiven = NOT_GIVEN, + network_stack: Literal["IPV4", "DUALSTACK"] | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + redirect_http_to_https: bool | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, + size_unit: int | NotGiven = NOT_GIVEN, + sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, + target_load_balancer_ids: List[str] | NotGiven = NOT_GIVEN, + tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, + type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, + vpc_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> LoadBalancerCreateResponse: + """ + To create a new load balancer instance, send a POST request to + `/v2/load_balancers`. + + You can specify the Droplets that will sit behind the load balancer using one of + two methods: + + - Set `droplet_ids` to a list of specific Droplet IDs. + - Set `tag` to the name of a tag. All Droplets with this tag applied will be + assigned to the load balancer. Additional Droplets will be automatically + assigned as they are tagged. + + These methods are mutually exclusive. + + Args: + forwarding_rules: An array of objects specifying the forwarding rules for a load balancer. + + algorithm: This field has been deprecated. You can no longer specify an algorithm for load + balancers. + + disable_lets_encrypt_dns_records: A boolean value indicating whether to disable automatic DNS record creation for + Let's Encrypt certificates that are added to the load balancer. + + domains: An array of objects specifying the domain configurations for a Global load + balancer. + + droplet_ids: An array containing the IDs of the Droplets assigned to the load balancer. + + enable_backend_keepalive: A boolean value indicating whether HTTP keepalive connections are maintained to + target Droplets. + + enable_proxy_protocol: A boolean value indicating whether PROXY Protocol is in use. + + firewall: An object specifying allow and deny rules to control traffic to the load + balancer. + + glb_settings: An object specifying forwarding configurations for a Global load balancer. + + health_check: An object specifying health check settings for the load balancer. + + http_idle_timeout_seconds: An integer value which configures the idle timeout for HTTP requests to the + target droplets. + + name: A human-readable name for a load balancer instance. + + network: A string indicating whether the load balancer should be external or internal. + Internal load balancers have no public IPs and are only accessible to resources + on the same VPC network. This property cannot be updated after creating the load + balancer. + + network_stack: A string indicating whether the load balancer will support IPv4 or both IPv4 and + IPv6 networking. This property cannot be updated after creating the load + balancer. + + project_id: The ID of the project that the load balancer is associated with. If no ID is + provided at creation, the load balancer associates with the user's default + project. If an invalid project ID is provided, the load balancer will not be + created. + + redirect_http_to_https: A boolean value indicating whether HTTP requests to the load balancer on port 80 + will be redirected to HTTPS on port 443. + + region: The slug identifier for the region where the resource will initially be + available. + + size: This field has been replaced by the `size_unit` field for all regions except in + AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load + balancer having a set number of nodes. + + - `lb-small` = 1 node + - `lb-medium` = 3 nodes + - `lb-large` = 6 nodes + + You can resize load balancers after creation up to once per hour. You cannot + resize a load balancer within the first hour of its creation. + + size_unit: How many nodes the load balancer contains. Each additional node increases the + load balancer's ability to manage more connections. Load balancers can be scaled + up or down, and you can change the number of nodes after creation up to once per + hour. This field is currently not available in the AMS2, NYC2, or SFO1 regions. + Use the `size` field to scale load balancers that reside in these regions. + + sticky_sessions: An object specifying sticky sessions settings for the load balancer. + + target_load_balancer_ids: An array containing the UUIDs of the Regional load balancers to be used as + target backends for a Global load balancer. + + tls_cipher_policy: A string indicating the policy for the TLS cipher suites used by the load + balancer. The possible values are `DEFAULT` or `STRONG`. The default value is + `DEFAULT`. + + type: A string indicating whether the load balancer should be a standard regional HTTP + load balancer, a regional network load balancer that routes traffic at the + TCP/UDP transport layer, or a global load balancer. + + vpc_uuid: A string specifying the UUID of the VPC to which the load balancer is assigned. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def create( + self, + *, + forwarding_rules: Iterable[ForwardingRuleParam], + algorithm: Literal["round_robin", "least_connections"] | NotGiven = NOT_GIVEN, + disable_lets_encrypt_dns_records: bool | NotGiven = NOT_GIVEN, + domains: Iterable[DomainsParam] | NotGiven = NOT_GIVEN, + enable_backend_keepalive: bool | NotGiven = NOT_GIVEN, + enable_proxy_protocol: bool | NotGiven = NOT_GIVEN, + firewall: LbFirewallParam | NotGiven = NOT_GIVEN, + glb_settings: GlbSettingsParam | NotGiven = NOT_GIVEN, + health_check: HealthCheckParam | NotGiven = NOT_GIVEN, + http_idle_timeout_seconds: int | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + network: Literal["EXTERNAL", "INTERNAL"] | NotGiven = NOT_GIVEN, + network_stack: Literal["IPV4", "DUALSTACK"] | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + redirect_http_to_https: bool | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, + size_unit: int | NotGiven = NOT_GIVEN, + sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, + tag: str | NotGiven = NOT_GIVEN, + target_load_balancer_ids: List[str] | NotGiven = NOT_GIVEN, + tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, + type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, + vpc_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> LoadBalancerCreateResponse: + """ + To create a new load balancer instance, send a POST request to + `/v2/load_balancers`. + + You can specify the Droplets that will sit behind the load balancer using one of + two methods: + + - Set `droplet_ids` to a list of specific Droplet IDs. + - Set `tag` to the name of a tag. All Droplets with this tag applied will be + assigned to the load balancer. Additional Droplets will be automatically + assigned as they are tagged. + + These methods are mutually exclusive. + + Args: + forwarding_rules: An array of objects specifying the forwarding rules for a load balancer. + + algorithm: This field has been deprecated. You can no longer specify an algorithm for load + balancers. + + disable_lets_encrypt_dns_records: A boolean value indicating whether to disable automatic DNS record creation for + Let's Encrypt certificates that are added to the load balancer. + + domains: An array of objects specifying the domain configurations for a Global load + balancer. + + enable_backend_keepalive: A boolean value indicating whether HTTP keepalive connections are maintained to + target Droplets. + + enable_proxy_protocol: A boolean value indicating whether PROXY Protocol is in use. + + firewall: An object specifying allow and deny rules to control traffic to the load + balancer. + + glb_settings: An object specifying forwarding configurations for a Global load balancer. + + health_check: An object specifying health check settings for the load balancer. + + http_idle_timeout_seconds: An integer value which configures the idle timeout for HTTP requests to the + target droplets. + + name: A human-readable name for a load balancer instance. + + network: A string indicating whether the load balancer should be external or internal. + Internal load balancers have no public IPs and are only accessible to resources + on the same VPC network. This property cannot be updated after creating the load + balancer. + + network_stack: A string indicating whether the load balancer will support IPv4 or both IPv4 and + IPv6 networking. This property cannot be updated after creating the load + balancer. + + project_id: The ID of the project that the load balancer is associated with. If no ID is + provided at creation, the load balancer associates with the user's default + project. If an invalid project ID is provided, the load balancer will not be + created. + + redirect_http_to_https: A boolean value indicating whether HTTP requests to the load balancer on port 80 + will be redirected to HTTPS on port 443. + + region: The slug identifier for the region where the resource will initially be + available. + + size: This field has been replaced by the `size_unit` field for all regions except in + AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load + balancer having a set number of nodes. + + - `lb-small` = 1 node + - `lb-medium` = 3 nodes + - `lb-large` = 6 nodes + + You can resize load balancers after creation up to once per hour. You cannot + resize a load balancer within the first hour of its creation. + + size_unit: How many nodes the load balancer contains. Each additional node increases the + load balancer's ability to manage more connections. Load balancers can be scaled + up or down, and you can change the number of nodes after creation up to once per + hour. This field is currently not available in the AMS2, NYC2, or SFO1 regions. + Use the `size` field to scale load balancers that reside in these regions. + + sticky_sessions: An object specifying sticky sessions settings for the load balancer. + + tag: The name of a Droplet tag corresponding to Droplets assigned to the load + balancer. + + target_load_balancer_ids: An array containing the UUIDs of the Regional load balancers to be used as + target backends for a Global load balancer. + + tls_cipher_policy: A string indicating the policy for the TLS cipher suites used by the load + balancer. The possible values are `DEFAULT` or `STRONG`. The default value is + `DEFAULT`. + + type: A string indicating whether the load balancer should be a standard regional HTTP + load balancer, a regional network load balancer that routes traffic at the + TCP/UDP transport layer, or a global load balancer. + + vpc_uuid: A string specifying the UUID of the VPC to which the load balancer is assigned. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["forwarding_rules"]) + async def create( + self, + *, + forwarding_rules: Iterable[ForwardingRuleParam], + algorithm: Literal["round_robin", "least_connections"] | NotGiven = NOT_GIVEN, + disable_lets_encrypt_dns_records: bool | NotGiven = NOT_GIVEN, + domains: Iterable[DomainsParam] | NotGiven = NOT_GIVEN, + droplet_ids: Iterable[int] | NotGiven = NOT_GIVEN, + enable_backend_keepalive: bool | NotGiven = NOT_GIVEN, + enable_proxy_protocol: bool | NotGiven = NOT_GIVEN, + firewall: LbFirewallParam | NotGiven = NOT_GIVEN, + glb_settings: GlbSettingsParam | NotGiven = NOT_GIVEN, + health_check: HealthCheckParam | NotGiven = NOT_GIVEN, + http_idle_timeout_seconds: int | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + network: Literal["EXTERNAL", "INTERNAL"] | NotGiven = NOT_GIVEN, + network_stack: Literal["IPV4", "DUALSTACK"] | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + redirect_http_to_https: bool | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, + size_unit: int | NotGiven = NOT_GIVEN, + sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, + target_load_balancer_ids: List[str] | NotGiven = NOT_GIVEN, + tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, + type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, + vpc_uuid: str | NotGiven = NOT_GIVEN, + tag: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> LoadBalancerCreateResponse: + return await self._post( + "/v2/load_balancers" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/load_balancers", + body=await async_maybe_transform( + { + "forwarding_rules": forwarding_rules, + "algorithm": algorithm, + "disable_lets_encrypt_dns_records": disable_lets_encrypt_dns_records, + "domains": domains, + "droplet_ids": droplet_ids, + "enable_backend_keepalive": enable_backend_keepalive, + "enable_proxy_protocol": enable_proxy_protocol, + "firewall": firewall, + "glb_settings": glb_settings, + "health_check": health_check, + "http_idle_timeout_seconds": http_idle_timeout_seconds, + "name": name, + "network": network, + "network_stack": network_stack, + "project_id": project_id, + "redirect_http_to_https": redirect_http_to_https, + "region": region, + "size": size, + "size_unit": size_unit, + "sticky_sessions": sticky_sessions, + "target_load_balancer_ids": target_load_balancer_ids, + "tls_cipher_policy": tls_cipher_policy, + "type": type, + "vpc_uuid": vpc_uuid, + "tag": tag, + }, + load_balancer_create_params.LoadBalancerCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=LoadBalancerCreateResponse, + ) + + async def retrieve( + self, + lb_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> LoadBalancerRetrieveResponse: + """ + To show information about a load balancer instance, send a GET request to + `/v2/load_balancers/$LOAD_BALANCER_ID`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not lb_id: + raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}") + return await self._get( + f"/v2/load_balancers/{lb_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=LoadBalancerRetrieveResponse, + ) + + @overload + async def update( + self, + lb_id: str, + *, + forwarding_rules: Iterable[ForwardingRuleParam], + algorithm: Literal["round_robin", "least_connections"] | NotGiven = NOT_GIVEN, + disable_lets_encrypt_dns_records: bool | NotGiven = NOT_GIVEN, + domains: Iterable[DomainsParam] | NotGiven = NOT_GIVEN, + droplet_ids: Iterable[int] | NotGiven = NOT_GIVEN, + enable_backend_keepalive: bool | NotGiven = NOT_GIVEN, + enable_proxy_protocol: bool | NotGiven = NOT_GIVEN, + firewall: LbFirewallParam | NotGiven = NOT_GIVEN, + glb_settings: GlbSettingsParam | NotGiven = NOT_GIVEN, + health_check: HealthCheckParam | NotGiven = NOT_GIVEN, + http_idle_timeout_seconds: int | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + network: Literal["EXTERNAL", "INTERNAL"] | NotGiven = NOT_GIVEN, + network_stack: Literal["IPV4", "DUALSTACK"] | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + redirect_http_to_https: bool | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, + size_unit: int | NotGiven = NOT_GIVEN, + sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, + target_load_balancer_ids: List[str] | NotGiven = NOT_GIVEN, + tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, + type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, + vpc_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> LoadBalancerUpdateResponse: + """ + To update a load balancer's settings, send a PUT request to + `/v2/load_balancers/$LOAD_BALANCER_ID`. The request should contain a full + representation of the load balancer including existing attributes. It may + contain _one of_ the `droplets_ids` or `tag` attributes as they are mutually + exclusive. **Note that any attribute that is not provided will be reset to its + default value.** + + Args: + forwarding_rules: An array of objects specifying the forwarding rules for a load balancer. + + algorithm: This field has been deprecated. You can no longer specify an algorithm for load + balancers. + + disable_lets_encrypt_dns_records: A boolean value indicating whether to disable automatic DNS record creation for + Let's Encrypt certificates that are added to the load balancer. + + domains: An array of objects specifying the domain configurations for a Global load + balancer. + + droplet_ids: An array containing the IDs of the Droplets assigned to the load balancer. + + enable_backend_keepalive: A boolean value indicating whether HTTP keepalive connections are maintained to + target Droplets. + + enable_proxy_protocol: A boolean value indicating whether PROXY Protocol is in use. + + firewall: An object specifying allow and deny rules to control traffic to the load + balancer. + + glb_settings: An object specifying forwarding configurations for a Global load balancer. + + health_check: An object specifying health check settings for the load balancer. + + http_idle_timeout_seconds: An integer value which configures the idle timeout for HTTP requests to the + target droplets. + + name: A human-readable name for a load balancer instance. + + network: A string indicating whether the load balancer should be external or internal. + Internal load balancers have no public IPs and are only accessible to resources + on the same VPC network. This property cannot be updated after creating the load + balancer. + + network_stack: A string indicating whether the load balancer will support IPv4 or both IPv4 and + IPv6 networking. This property cannot be updated after creating the load + balancer. + + project_id: The ID of the project that the load balancer is associated with. If no ID is + provided at creation, the load balancer associates with the user's default + project. If an invalid project ID is provided, the load balancer will not be + created. + + redirect_http_to_https: A boolean value indicating whether HTTP requests to the load balancer on port 80 + will be redirected to HTTPS on port 443. + + region: The slug identifier for the region where the resource will initially be + available. + + size: This field has been replaced by the `size_unit` field for all regions except in + AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load + balancer having a set number of nodes. + + - `lb-small` = 1 node + - `lb-medium` = 3 nodes + - `lb-large` = 6 nodes + + You can resize load balancers after creation up to once per hour. You cannot + resize a load balancer within the first hour of its creation. + + size_unit: How many nodes the load balancer contains. Each additional node increases the + load balancer's ability to manage more connections. Load balancers can be scaled + up or down, and you can change the number of nodes after creation up to once per + hour. This field is currently not available in the AMS2, NYC2, or SFO1 regions. + Use the `size` field to scale load balancers that reside in these regions. + + sticky_sessions: An object specifying sticky sessions settings for the load balancer. + + target_load_balancer_ids: An array containing the UUIDs of the Regional load balancers to be used as + target backends for a Global load balancer. + + tls_cipher_policy: A string indicating the policy for the TLS cipher suites used by the load + balancer. The possible values are `DEFAULT` or `STRONG`. The default value is + `DEFAULT`. + + type: A string indicating whether the load balancer should be a standard regional HTTP + load balancer, a regional network load balancer that routes traffic at the + TCP/UDP transport layer, or a global load balancer. + + vpc_uuid: A string specifying the UUID of the VPC to which the load balancer is assigned. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def update( + self, + lb_id: str, + *, + forwarding_rules: Iterable[ForwardingRuleParam], + algorithm: Literal["round_robin", "least_connections"] | NotGiven = NOT_GIVEN, + disable_lets_encrypt_dns_records: bool | NotGiven = NOT_GIVEN, + domains: Iterable[DomainsParam] | NotGiven = NOT_GIVEN, + enable_backend_keepalive: bool | NotGiven = NOT_GIVEN, + enable_proxy_protocol: bool | NotGiven = NOT_GIVEN, + firewall: LbFirewallParam | NotGiven = NOT_GIVEN, + glb_settings: GlbSettingsParam | NotGiven = NOT_GIVEN, + health_check: HealthCheckParam | NotGiven = NOT_GIVEN, + http_idle_timeout_seconds: int | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + network: Literal["EXTERNAL", "INTERNAL"] | NotGiven = NOT_GIVEN, + network_stack: Literal["IPV4", "DUALSTACK"] | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + redirect_http_to_https: bool | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, + size_unit: int | NotGiven = NOT_GIVEN, + sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, + tag: str | NotGiven = NOT_GIVEN, + target_load_balancer_ids: List[str] | NotGiven = NOT_GIVEN, + tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, + type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, + vpc_uuid: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> LoadBalancerUpdateResponse: + """ + To update a load balancer's settings, send a PUT request to + `/v2/load_balancers/$LOAD_BALANCER_ID`. The request should contain a full + representation of the load balancer including existing attributes. It may + contain _one of_ the `droplets_ids` or `tag` attributes as they are mutually + exclusive. **Note that any attribute that is not provided will be reset to its + default value.** + + Args: + forwarding_rules: An array of objects specifying the forwarding rules for a load balancer. + + algorithm: This field has been deprecated. You can no longer specify an algorithm for load + balancers. + + disable_lets_encrypt_dns_records: A boolean value indicating whether to disable automatic DNS record creation for + Let's Encrypt certificates that are added to the load balancer. + + domains: An array of objects specifying the domain configurations for a Global load + balancer. + + enable_backend_keepalive: A boolean value indicating whether HTTP keepalive connections are maintained to + target Droplets. + + enable_proxy_protocol: A boolean value indicating whether PROXY Protocol is in use. + + firewall: An object specifying allow and deny rules to control traffic to the load + balancer. + + glb_settings: An object specifying forwarding configurations for a Global load balancer. + + health_check: An object specifying health check settings for the load balancer. + + http_idle_timeout_seconds: An integer value which configures the idle timeout for HTTP requests to the + target droplets. + + name: A human-readable name for a load balancer instance. + + network: A string indicating whether the load balancer should be external or internal. + Internal load balancers have no public IPs and are only accessible to resources + on the same VPC network. This property cannot be updated after creating the load + balancer. + + network_stack: A string indicating whether the load balancer will support IPv4 or both IPv4 and + IPv6 networking. This property cannot be updated after creating the load + balancer. + + project_id: The ID of the project that the load balancer is associated with. If no ID is + provided at creation, the load balancer associates with the user's default + project. If an invalid project ID is provided, the load balancer will not be + created. + + redirect_http_to_https: A boolean value indicating whether HTTP requests to the load balancer on port 80 + will be redirected to HTTPS on port 443. + + region: The slug identifier for the region where the resource will initially be + available. + + size: This field has been replaced by the `size_unit` field for all regions except in + AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load + balancer having a set number of nodes. + + - `lb-small` = 1 node + - `lb-medium` = 3 nodes + - `lb-large` = 6 nodes + + You can resize load balancers after creation up to once per hour. You cannot + resize a load balancer within the first hour of its creation. + + size_unit: How many nodes the load balancer contains. Each additional node increases the + load balancer's ability to manage more connections. Load balancers can be scaled + up or down, and you can change the number of nodes after creation up to once per + hour. This field is currently not available in the AMS2, NYC2, or SFO1 regions. + Use the `size` field to scale load balancers that reside in these regions. + + sticky_sessions: An object specifying sticky sessions settings for the load balancer. + + tag: The name of a Droplet tag corresponding to Droplets assigned to the load + balancer. + + target_load_balancer_ids: An array containing the UUIDs of the Regional load balancers to be used as + target backends for a Global load balancer. + + tls_cipher_policy: A string indicating the policy for the TLS cipher suites used by the load + balancer. The possible values are `DEFAULT` or `STRONG`. The default value is + `DEFAULT`. + + type: A string indicating whether the load balancer should be a standard regional HTTP + load balancer, a regional network load balancer that routes traffic at the + TCP/UDP transport layer, or a global load balancer. + + vpc_uuid: A string specifying the UUID of the VPC to which the load balancer is assigned. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["forwarding_rules"]) + async def update( + self, + lb_id: str, + *, + forwarding_rules: Iterable[ForwardingRuleParam], + algorithm: Literal["round_robin", "least_connections"] | NotGiven = NOT_GIVEN, + disable_lets_encrypt_dns_records: bool | NotGiven = NOT_GIVEN, + domains: Iterable[DomainsParam] | NotGiven = NOT_GIVEN, + droplet_ids: Iterable[int] | NotGiven = NOT_GIVEN, + enable_backend_keepalive: bool | NotGiven = NOT_GIVEN, + enable_proxy_protocol: bool | NotGiven = NOT_GIVEN, + firewall: LbFirewallParam | NotGiven = NOT_GIVEN, + glb_settings: GlbSettingsParam | NotGiven = NOT_GIVEN, + health_check: HealthCheckParam | NotGiven = NOT_GIVEN, + http_idle_timeout_seconds: int | NotGiven = NOT_GIVEN, + name: str | NotGiven = NOT_GIVEN, + network: Literal["EXTERNAL", "INTERNAL"] | NotGiven = NOT_GIVEN, + network_stack: Literal["IPV4", "DUALSTACK"] | NotGiven = NOT_GIVEN, + project_id: str | NotGiven = NOT_GIVEN, + redirect_http_to_https: bool | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + size: Literal["lb-small", "lb-medium", "lb-large"] | NotGiven = NOT_GIVEN, + size_unit: int | NotGiven = NOT_GIVEN, + sticky_sessions: StickySessionsParam | NotGiven = NOT_GIVEN, + target_load_balancer_ids: List[str] | NotGiven = NOT_GIVEN, + tls_cipher_policy: Literal["DEFAULT", "STRONG"] | NotGiven = NOT_GIVEN, + type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] | NotGiven = NOT_GIVEN, + vpc_uuid: str | NotGiven = NOT_GIVEN, + tag: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> LoadBalancerUpdateResponse: + if not lb_id: + raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}") + return await self._put( + f"/v2/load_balancers/{lb_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}", + body=await async_maybe_transform( + { + "forwarding_rules": forwarding_rules, + "algorithm": algorithm, + "disable_lets_encrypt_dns_records": disable_lets_encrypt_dns_records, + "domains": domains, + "droplet_ids": droplet_ids, + "enable_backend_keepalive": enable_backend_keepalive, + "enable_proxy_protocol": enable_proxy_protocol, + "firewall": firewall, + "glb_settings": glb_settings, + "health_check": health_check, + "http_idle_timeout_seconds": http_idle_timeout_seconds, + "name": name, + "network": network, + "network_stack": network_stack, + "project_id": project_id, + "redirect_http_to_https": redirect_http_to_https, + "region": region, + "size": size, + "size_unit": size_unit, + "sticky_sessions": sticky_sessions, + "target_load_balancer_ids": target_load_balancer_ids, + "tls_cipher_policy": tls_cipher_policy, + "type": type, + "vpc_uuid": vpc_uuid, + "tag": tag, + }, + load_balancer_update_params.LoadBalancerUpdateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=LoadBalancerUpdateResponse, + ) + + async def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> LoadBalancerListResponse: + """ + To list all of the load balancer instances on your account, send a GET request + to `/v2/load_balancers`. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/load_balancers" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/load_balancers", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + load_balancer_list_params.LoadBalancerListParams, + ), + ), + cast_to=LoadBalancerListResponse, + ) + + async def delete( + self, + lb_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To delete a load balancer instance, disassociating any Droplets assigned to it + and removing it from your account, send a DELETE request to + `/v2/load_balancers/$LOAD_BALANCER_ID`. + + A successful request will receive a 204 status code with no body in response. + This indicates that the request was processed successfully. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not lb_id: + raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._delete( + f"/v2/load_balancers/{lb_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + async def delete_cache( + self, + lb_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To delete a Global load balancer CDN cache, send a DELETE request to + `/v2/load_balancers/$LOAD_BALANCER_ID/cache`. + + A successful request will receive a 204 status code with no body in response. + This indicates that the request was processed successfully. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not lb_id: + raise ValueError(f"Expected a non-empty value for `lb_id` but received {lb_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._delete( + f"/v2/load_balancers/{lb_id}/cache" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/load_balancers/{lb_id}/cache", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class LoadBalancersResourceWithRawResponse: + def __init__(self, load_balancers: LoadBalancersResource) -> None: + self._load_balancers = load_balancers + + self.create = to_raw_response_wrapper( + load_balancers.create, + ) + self.retrieve = to_raw_response_wrapper( + load_balancers.retrieve, + ) + self.update = to_raw_response_wrapper( + load_balancers.update, + ) + self.list = to_raw_response_wrapper( + load_balancers.list, + ) + self.delete = to_raw_response_wrapper( + load_balancers.delete, + ) + self.delete_cache = to_raw_response_wrapper( + load_balancers.delete_cache, + ) + + @cached_property + def droplets(self) -> DropletsResourceWithRawResponse: + return DropletsResourceWithRawResponse(self._load_balancers.droplets) + + @cached_property + def forwarding_rules(self) -> ForwardingRulesResourceWithRawResponse: + return ForwardingRulesResourceWithRawResponse(self._load_balancers.forwarding_rules) + + +class AsyncLoadBalancersResourceWithRawResponse: + def __init__(self, load_balancers: AsyncLoadBalancersResource) -> None: + self._load_balancers = load_balancers + + self.create = async_to_raw_response_wrapper( + load_balancers.create, + ) + self.retrieve = async_to_raw_response_wrapper( + load_balancers.retrieve, + ) + self.update = async_to_raw_response_wrapper( + load_balancers.update, + ) + self.list = async_to_raw_response_wrapper( + load_balancers.list, + ) + self.delete = async_to_raw_response_wrapper( + load_balancers.delete, + ) + self.delete_cache = async_to_raw_response_wrapper( + load_balancers.delete_cache, + ) + + @cached_property + def droplets(self) -> AsyncDropletsResourceWithRawResponse: + return AsyncDropletsResourceWithRawResponse(self._load_balancers.droplets) + + @cached_property + def forwarding_rules(self) -> AsyncForwardingRulesResourceWithRawResponse: + return AsyncForwardingRulesResourceWithRawResponse(self._load_balancers.forwarding_rules) + + +class LoadBalancersResourceWithStreamingResponse: + def __init__(self, load_balancers: LoadBalancersResource) -> None: + self._load_balancers = load_balancers + + self.create = to_streamed_response_wrapper( + load_balancers.create, + ) + self.retrieve = to_streamed_response_wrapper( + load_balancers.retrieve, + ) + self.update = to_streamed_response_wrapper( + load_balancers.update, + ) + self.list = to_streamed_response_wrapper( + load_balancers.list, + ) + self.delete = to_streamed_response_wrapper( + load_balancers.delete, + ) + self.delete_cache = to_streamed_response_wrapper( + load_balancers.delete_cache, + ) + + @cached_property + def droplets(self) -> DropletsResourceWithStreamingResponse: + return DropletsResourceWithStreamingResponse(self._load_balancers.droplets) + + @cached_property + def forwarding_rules(self) -> ForwardingRulesResourceWithStreamingResponse: + return ForwardingRulesResourceWithStreamingResponse(self._load_balancers.forwarding_rules) + + +class AsyncLoadBalancersResourceWithStreamingResponse: + def __init__(self, load_balancers: AsyncLoadBalancersResource) -> None: + self._load_balancers = load_balancers + + self.create = async_to_streamed_response_wrapper( + load_balancers.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + load_balancers.retrieve, + ) + self.update = async_to_streamed_response_wrapper( + load_balancers.update, + ) + self.list = async_to_streamed_response_wrapper( + load_balancers.list, + ) + self.delete = async_to_streamed_response_wrapper( + load_balancers.delete, + ) + self.delete_cache = async_to_streamed_response_wrapper( + load_balancers.delete_cache, + ) + + @cached_property + def droplets(self) -> AsyncDropletsResourceWithStreamingResponse: + return AsyncDropletsResourceWithStreamingResponse(self._load_balancers.droplets) + + @cached_property + def forwarding_rules(self) -> AsyncForwardingRulesResourceWithStreamingResponse: + return AsyncForwardingRulesResourceWithStreamingResponse(self._load_balancers.forwarding_rules) diff --git a/src/gradientai/resources/gpu_droplets/sizes.py b/src/gradientai/resources/gpu_droplets/sizes.py new file mode 100644 index 00000000..e37116c7 --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/sizes.py @@ -0,0 +1,199 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import httpx + +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.gpu_droplets import size_list_params +from ...types.gpu_droplets.size_list_response import SizeListResponse + +__all__ = ["SizesResource", "AsyncSizesResource"] + + +class SizesResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> SizesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return SizesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> SizesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return SizesResourceWithStreamingResponse(self) + + def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> SizeListResponse: + """To list all of available Droplet sizes, send a GET request to `/v2/sizes`. + + The + response will be a JSON object with a key called `sizes`. The value of this will + be an array of `size` objects each of which contain the standard size + attributes. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/sizes" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/sizes", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + size_list_params.SizeListParams, + ), + ), + cast_to=SizeListResponse, + ) + + +class AsyncSizesResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncSizesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncSizesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncSizesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncSizesResourceWithStreamingResponse(self) + + async def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> SizeListResponse: + """To list all of available Droplet sizes, send a GET request to `/v2/sizes`. + + The + response will be a JSON object with a key called `sizes`. The value of this will + be an array of `size` objects each of which contain the standard size + attributes. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/sizes" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/sizes", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + size_list_params.SizeListParams, + ), + ), + cast_to=SizeListResponse, + ) + + +class SizesResourceWithRawResponse: + def __init__(self, sizes: SizesResource) -> None: + self._sizes = sizes + + self.list = to_raw_response_wrapper( + sizes.list, + ) + + +class AsyncSizesResourceWithRawResponse: + def __init__(self, sizes: AsyncSizesResource) -> None: + self._sizes = sizes + + self.list = async_to_raw_response_wrapper( + sizes.list, + ) + + +class SizesResourceWithStreamingResponse: + def __init__(self, sizes: SizesResource) -> None: + self._sizes = sizes + + self.list = to_streamed_response_wrapper( + sizes.list, + ) + + +class AsyncSizesResourceWithStreamingResponse: + def __init__(self, sizes: AsyncSizesResource) -> None: + self._sizes = sizes + + self.list = async_to_streamed_response_wrapper( + sizes.list, + ) diff --git a/src/gradientai/resources/gpu_droplets/snapshots.py b/src/gradientai/resources/gpu_droplets/snapshots.py new file mode 100644 index 00000000..081ab5b8 --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/snapshots.py @@ -0,0 +1,425 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal + +import httpx + +from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ..._base_client import make_request_options +from ...types.gpu_droplets import snapshot_list_params +from ...types.gpu_droplets.snapshot_list_response import SnapshotListResponse +from ...types.gpu_droplets.snapshot_retrieve_response import SnapshotRetrieveResponse + +__all__ = ["SnapshotsResource", "AsyncSnapshotsResource"] + + +class SnapshotsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> SnapshotsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return SnapshotsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> SnapshotsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return SnapshotsResourceWithStreamingResponse(self) + + def retrieve( + self, + snapshot_id: Union[int, str], + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> SnapshotRetrieveResponse: + """ + To retrieve information about a snapshot, send a GET request to + `/v2/snapshots/$SNAPSHOT_ID`. + + The response will be a JSON object with a key called `snapshot`. The value of + this will be an snapshot object containing the standard snapshot attributes. + + Args: + snapshot_id: The ID of a Droplet snapshot. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + f"/v2/snapshots/{snapshot_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/snapshots/{snapshot_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=SnapshotRetrieveResponse, + ) + + def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + resource_type: Literal["droplet", "volume"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> SnapshotListResponse: + """ + To list all of the snapshots available on your account, send a GET request to + `/v2/snapshots`. + + The response will be a JSON object with a key called `snapshots`. This will be + set to an array of `snapshot` objects, each of which will contain the standard + snapshot attributes. + + ### Filtering Results by Resource Type + + It's possible to request filtered results by including certain query parameters. + + #### List Droplet Snapshots + + To retrieve only snapshots based on Droplets, include the `resource_type` query + parameter set to `droplet`. For example, `/v2/snapshots?resource_type=droplet`. + + #### List Volume Snapshots + + To retrieve only snapshots based on volumes, include the `resource_type` query + parameter set to `volume`. For example, `/v2/snapshots?resource_type=volume`. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + resource_type: Used to filter snapshots by a resource type. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/snapshots" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/snapshots", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + "resource_type": resource_type, + }, + snapshot_list_params.SnapshotListParams, + ), + ), + cast_to=SnapshotListResponse, + ) + + def delete( + self, + snapshot_id: Union[int, str], + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + Both Droplet and volume snapshots are managed through the `/v2/snapshots/` + endpoint. To delete a snapshot, send a DELETE request to + `/v2/snapshots/$SNAPSHOT_ID`. + + A status of 204 will be given. This indicates that the request was processed + successfully, but that no response body is needed. + + Args: + snapshot_id: The ID of a Droplet snapshot. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._delete( + f"/v2/snapshots/{snapshot_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/snapshots/{snapshot_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class AsyncSnapshotsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncSnapshotsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncSnapshotsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncSnapshotsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncSnapshotsResourceWithStreamingResponse(self) + + async def retrieve( + self, + snapshot_id: Union[int, str], + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> SnapshotRetrieveResponse: + """ + To retrieve information about a snapshot, send a GET request to + `/v2/snapshots/$SNAPSHOT_ID`. + + The response will be a JSON object with a key called `snapshot`. The value of + this will be an snapshot object containing the standard snapshot attributes. + + Args: + snapshot_id: The ID of a Droplet snapshot. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + f"/v2/snapshots/{snapshot_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/snapshots/{snapshot_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=SnapshotRetrieveResponse, + ) + + async def list( + self, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + resource_type: Literal["droplet", "volume"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> SnapshotListResponse: + """ + To list all of the snapshots available on your account, send a GET request to + `/v2/snapshots`. + + The response will be a JSON object with a key called `snapshots`. This will be + set to an array of `snapshot` objects, each of which will contain the standard + snapshot attributes. + + ### Filtering Results by Resource Type + + It's possible to request filtered results by including certain query parameters. + + #### List Droplet Snapshots + + To retrieve only snapshots based on Droplets, include the `resource_type` query + parameter set to `droplet`. For example, `/v2/snapshots?resource_type=droplet`. + + #### List Volume Snapshots + + To retrieve only snapshots based on volumes, include the `resource_type` query + parameter set to `volume`. For example, `/v2/snapshots?resource_type=volume`. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + resource_type: Used to filter snapshots by a resource type. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/snapshots" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/snapshots", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + "resource_type": resource_type, + }, + snapshot_list_params.SnapshotListParams, + ), + ), + cast_to=SnapshotListResponse, + ) + + async def delete( + self, + snapshot_id: Union[int, str], + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + Both Droplet and volume snapshots are managed through the `/v2/snapshots/` + endpoint. To delete a snapshot, send a DELETE request to + `/v2/snapshots/$SNAPSHOT_ID`. + + A status of 204 will be given. This indicates that the request was processed + successfully, but that no response body is needed. + + Args: + snapshot_id: The ID of a Droplet snapshot. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._delete( + f"/v2/snapshots/{snapshot_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/snapshots/{snapshot_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class SnapshotsResourceWithRawResponse: + def __init__(self, snapshots: SnapshotsResource) -> None: + self._snapshots = snapshots + + self.retrieve = to_raw_response_wrapper( + snapshots.retrieve, + ) + self.list = to_raw_response_wrapper( + snapshots.list, + ) + self.delete = to_raw_response_wrapper( + snapshots.delete, + ) + + +class AsyncSnapshotsResourceWithRawResponse: + def __init__(self, snapshots: AsyncSnapshotsResource) -> None: + self._snapshots = snapshots + + self.retrieve = async_to_raw_response_wrapper( + snapshots.retrieve, + ) + self.list = async_to_raw_response_wrapper( + snapshots.list, + ) + self.delete = async_to_raw_response_wrapper( + snapshots.delete, + ) + + +class SnapshotsResourceWithStreamingResponse: + def __init__(self, snapshots: SnapshotsResource) -> None: + self._snapshots = snapshots + + self.retrieve = to_streamed_response_wrapper( + snapshots.retrieve, + ) + self.list = to_streamed_response_wrapper( + snapshots.list, + ) + self.delete = to_streamed_response_wrapper( + snapshots.delete, + ) + + +class AsyncSnapshotsResourceWithStreamingResponse: + def __init__(self, snapshots: AsyncSnapshotsResource) -> None: + self._snapshots = snapshots + + self.retrieve = async_to_streamed_response_wrapper( + snapshots.retrieve, + ) + self.list = async_to_streamed_response_wrapper( + snapshots.list, + ) + self.delete = async_to_streamed_response_wrapper( + snapshots.delete, + ) diff --git a/src/gradientai/resources/gpu_droplets/volumes/__init__.py b/src/gradientai/resources/gpu_droplets/volumes/__init__.py new file mode 100644 index 00000000..167db0b3 --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/volumes/__init__.py @@ -0,0 +1,47 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .actions import ( + ActionsResource, + AsyncActionsResource, + ActionsResourceWithRawResponse, + AsyncActionsResourceWithRawResponse, + ActionsResourceWithStreamingResponse, + AsyncActionsResourceWithStreamingResponse, +) +from .volumes import ( + VolumesResource, + AsyncVolumesResource, + VolumesResourceWithRawResponse, + AsyncVolumesResourceWithRawResponse, + VolumesResourceWithStreamingResponse, + AsyncVolumesResourceWithStreamingResponse, +) +from .snapshots import ( + SnapshotsResource, + AsyncSnapshotsResource, + SnapshotsResourceWithRawResponse, + AsyncSnapshotsResourceWithRawResponse, + SnapshotsResourceWithStreamingResponse, + AsyncSnapshotsResourceWithStreamingResponse, +) + +__all__ = [ + "ActionsResource", + "AsyncActionsResource", + "ActionsResourceWithRawResponse", + "AsyncActionsResourceWithRawResponse", + "ActionsResourceWithStreamingResponse", + "AsyncActionsResourceWithStreamingResponse", + "SnapshotsResource", + "AsyncSnapshotsResource", + "SnapshotsResourceWithRawResponse", + "AsyncSnapshotsResourceWithRawResponse", + "SnapshotsResourceWithStreamingResponse", + "AsyncSnapshotsResourceWithStreamingResponse", + "VolumesResource", + "AsyncVolumesResource", + "VolumesResourceWithRawResponse", + "AsyncVolumesResourceWithRawResponse", + "VolumesResourceWithStreamingResponse", + "AsyncVolumesResourceWithStreamingResponse", +] diff --git a/src/gradientai/resources/gpu_droplets/volumes/actions.py b/src/gradientai/resources/gpu_droplets/volumes/actions.py new file mode 100644 index 00000000..9d925397 --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/volumes/actions.py @@ -0,0 +1,1554 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Optional +from typing_extensions import Literal, overload + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ...._utils import required_args, maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.gpu_droplets.volumes import ( + action_list_params, + action_retrieve_params, + action_initiate_by_id_params, + action_initiate_by_name_params, +) +from ....types.gpu_droplets.volumes.action_list_response import ActionListResponse +from ....types.gpu_droplets.volumes.action_retrieve_response import ActionRetrieveResponse +from ....types.gpu_droplets.volumes.action_initiate_by_id_response import ActionInitiateByIDResponse +from ....types.gpu_droplets.volumes.action_initiate_by_name_response import ActionInitiateByNameResponse + +__all__ = ["ActionsResource", "AsyncActionsResource"] + + +class ActionsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> ActionsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return ActionsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ActionsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return ActionsResourceWithStreamingResponse(self) + + def retrieve( + self, + action_id: int, + *, + volume_id: str, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionRetrieveResponse: + """ + To retrieve the status of a volume action, send a GET request to + `/v2/volumes/$VOLUME_ID/actions/$ACTION_ID`. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not volume_id: + raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}") + return self._get( + f"/v2/volumes/{volume_id}/actions/{action_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/volumes/{volume_id}/actions/{action_id}", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + action_retrieve_params.ActionRetrieveParams, + ), + ), + cast_to=ActionRetrieveResponse, + ) + + def list( + self, + volume_id: str, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionListResponse: + """ + To retrieve all actions that have been executed on a volume, send a GET request + to `/v2/volumes/$VOLUME_ID/actions`. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not volume_id: + raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}") + return self._get( + f"/v2/volumes/{volume_id}/actions" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/volumes/{volume_id}/actions", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + action_list_params.ActionListParams, + ), + ), + cast_to=ActionListResponse, + ) + + @overload + def initiate_by_id( + self, + volume_id: str, + *, + droplet_id: int, + type: Literal["attach", "detach", "resize"], + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateByIDResponse: + """ + To initiate an action on a block storage volume by Id, send a POST request to + `~/v2/volumes/$VOLUME_ID/actions`. The body should contain the appropriate + attributes for the respective action. + + ## Attach a Block Storage Volume to a Droplet + + | Attribute | Details | + | ---------- | ------------------------------------------------------------------- | + | type | This must be `attach` | + | droplet_id | Set to the Droplet's ID | + | region | Set to the slug representing the region where the volume is located | + + Each volume may only be attached to a single Droplet. However, up to fifteen + volumes may be attached to a Droplet at a time. Pre-formatted volumes will be + automatically mounted to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS + Droplets created on or after April 26, 2018 when attached. On older Droplets, + [additional configuration](https://docs.digitalocean.com/products/volumes/how-to/mount/) + is required. + + ## Remove a Block Storage Volume from a Droplet + + | Attribute | Details | + | ---------- | ------------------------------------------------------------------- | + | type | This must be `detach` | + | droplet_id | Set to the Droplet's ID | + | region | Set to the slug representing the region where the volume is located | + + ## Resize a Volume + + | Attribute | Details | + | -------------- | ------------------------------------------------------------------- | + | type | This must be `resize` | + | size_gigabytes | The new size of the block storage volume in GiB (1024^3) | + | region | Set to the slug representing the region where the volume is located | + + Volumes may only be resized upwards. The maximum size for a volume is 16TiB. + + Args: + droplet_id: The unique identifier for the Droplet the volume will be attached or detached + from. + + type: The volume action to initiate. + + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + region: The slug identifier for the region where the resource will initially be + available. + + tags: A flat array of tag names as strings to be applied to the resource. Tag names + may be for either existing or new tags. + + Requires `tag:create` scope. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def initiate_by_id( + self, + volume_id: str, + *, + droplet_id: int, + type: Literal["attach", "detach", "resize"], + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateByIDResponse: + """ + To initiate an action on a block storage volume by Id, send a POST request to + `~/v2/volumes/$VOLUME_ID/actions`. The body should contain the appropriate + attributes for the respective action. + + ## Attach a Block Storage Volume to a Droplet + + | Attribute | Details | + | ---------- | ------------------------------------------------------------------- | + | type | This must be `attach` | + | droplet_id | Set to the Droplet's ID | + | region | Set to the slug representing the region where the volume is located | + + Each volume may only be attached to a single Droplet. However, up to fifteen + volumes may be attached to a Droplet at a time. Pre-formatted volumes will be + automatically mounted to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS + Droplets created on or after April 26, 2018 when attached. On older Droplets, + [additional configuration](https://docs.digitalocean.com/products/volumes/how-to/mount/) + is required. + + ## Remove a Block Storage Volume from a Droplet + + | Attribute | Details | + | ---------- | ------------------------------------------------------------------- | + | type | This must be `detach` | + | droplet_id | Set to the Droplet's ID | + | region | Set to the slug representing the region where the volume is located | + + ## Resize a Volume + + | Attribute | Details | + | -------------- | ------------------------------------------------------------------- | + | type | This must be `resize` | + | size_gigabytes | The new size of the block storage volume in GiB (1024^3) | + | region | Set to the slug representing the region where the volume is located | + + Volumes may only be resized upwards. The maximum size for a volume is 16TiB. + + Args: + droplet_id: The unique identifier for the Droplet the volume will be attached or detached + from. + + type: The volume action to initiate. + + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + region: The slug identifier for the region where the resource will initially be + available. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def initiate_by_id( + self, + volume_id: str, + *, + size_gigabytes: int, + type: Literal["attach", "detach", "resize"], + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateByIDResponse: + """ + To initiate an action on a block storage volume by Id, send a POST request to + `~/v2/volumes/$VOLUME_ID/actions`. The body should contain the appropriate + attributes for the respective action. + + ## Attach a Block Storage Volume to a Droplet + + | Attribute | Details | + | ---------- | ------------------------------------------------------------------- | + | type | This must be `attach` | + | droplet_id | Set to the Droplet's ID | + | region | Set to the slug representing the region where the volume is located | + + Each volume may only be attached to a single Droplet. However, up to fifteen + volumes may be attached to a Droplet at a time. Pre-formatted volumes will be + automatically mounted to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS + Droplets created on or after April 26, 2018 when attached. On older Droplets, + [additional configuration](https://docs.digitalocean.com/products/volumes/how-to/mount/) + is required. + + ## Remove a Block Storage Volume from a Droplet + + | Attribute | Details | + | ---------- | ------------------------------------------------------------------- | + | type | This must be `detach` | + | droplet_id | Set to the Droplet's ID | + | region | Set to the slug representing the region where the volume is located | + + ## Resize a Volume + + | Attribute | Details | + | -------------- | ------------------------------------------------------------------- | + | type | This must be `resize` | + | size_gigabytes | The new size of the block storage volume in GiB (1024^3) | + | region | Set to the slug representing the region where the volume is located | + + Volumes may only be resized upwards. The maximum size for a volume is 16TiB. + + Args: + size_gigabytes: The new size of the block storage volume in GiB (1024^3). + + type: The volume action to initiate. + + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + region: The slug identifier for the region where the resource will initially be + available. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["droplet_id", "type"], ["size_gigabytes", "type"]) + def initiate_by_id( + self, + volume_id: str, + *, + droplet_id: int | NotGiven = NOT_GIVEN, + type: Literal["attach", "detach", "resize"], + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + size_gigabytes: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateByIDResponse: + if not volume_id: + raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}") + return self._post( + f"/v2/volumes/{volume_id}/actions" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/volumes/{volume_id}/actions", + body=maybe_transform( + { + "droplet_id": droplet_id, + "type": type, + "region": region, + "tags": tags, + "size_gigabytes": size_gigabytes, + }, + action_initiate_by_id_params.ActionInitiateByIDParams, + ), + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + action_initiate_by_id_params.ActionInitiateByIDParams, + ), + ), + cast_to=ActionInitiateByIDResponse, + ) + + @overload + def initiate_by_name( + self, + *, + droplet_id: int, + type: Literal["attach", "detach", "resize"], + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateByNameResponse: + """ + To initiate an action on a block storage volume by Name, send a POST request to + `~/v2/volumes/actions`. The body should contain the appropriate attributes for + the respective action. + + ## Attach a Block Storage Volume to a Droplet + + | Attribute | Details | + | ----------- | ------------------------------------------------------------------- | + | type | This must be `attach` | + | volume_name | The name of the block storage volume | + | droplet_id | Set to the Droplet's ID | + | region | Set to the slug representing the region where the volume is located | + + Each volume may only be attached to a single Droplet. However, up to fifteen + volumes may be attached to a Droplet at a time. Pre-formatted volumes will be + automatically mounted to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS + Droplets created on or after April 26, 2018 when attached. On older Droplets, + [additional configuration](https://docs.digitalocean.com/products/volumes/how-to/mount/) + is required. + + ## Remove a Block Storage Volume from a Droplet + + | Attribute | Details | + | ----------- | ------------------------------------------------------------------- | + | type | This must be `detach` | + | volume_name | The name of the block storage volume | + | droplet_id | Set to the Droplet's ID | + | region | Set to the slug representing the region where the volume is located | + + Args: + droplet_id: The unique identifier for the Droplet the volume will be attached or detached + from. + + type: The volume action to initiate. + + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + region: The slug identifier for the region where the resource will initially be + available. + + tags: A flat array of tag names as strings to be applied to the resource. Tag names + may be for either existing or new tags. + + Requires `tag:create` scope. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def initiate_by_name( + self, + *, + droplet_id: int, + type: Literal["attach", "detach", "resize"], + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateByNameResponse: + """ + To initiate an action on a block storage volume by Name, send a POST request to + `~/v2/volumes/actions`. The body should contain the appropriate attributes for + the respective action. + + ## Attach a Block Storage Volume to a Droplet + + | Attribute | Details | + | ----------- | ------------------------------------------------------------------- | + | type | This must be `attach` | + | volume_name | The name of the block storage volume | + | droplet_id | Set to the Droplet's ID | + | region | Set to the slug representing the region where the volume is located | + + Each volume may only be attached to a single Droplet. However, up to fifteen + volumes may be attached to a Droplet at a time. Pre-formatted volumes will be + automatically mounted to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS + Droplets created on or after April 26, 2018 when attached. On older Droplets, + [additional configuration](https://docs.digitalocean.com/products/volumes/how-to/mount/) + is required. + + ## Remove a Block Storage Volume from a Droplet + + | Attribute | Details | + | ----------- | ------------------------------------------------------------------- | + | type | This must be `detach` | + | volume_name | The name of the block storage volume | + | droplet_id | Set to the Droplet's ID | + | region | Set to the slug representing the region where the volume is located | + + Args: + droplet_id: The unique identifier for the Droplet the volume will be attached or detached + from. + + type: The volume action to initiate. + + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + region: The slug identifier for the region where the resource will initially be + available. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["droplet_id", "type"]) + def initiate_by_name( + self, + *, + droplet_id: int, + type: Literal["attach", "detach", "resize"], + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateByNameResponse: + return self._post( + "/v2/volumes/actions" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/volumes/actions", + body=maybe_transform( + { + "droplet_id": droplet_id, + "type": type, + "region": region, + "tags": tags, + }, + action_initiate_by_name_params.ActionInitiateByNameParams, + ), + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + action_initiate_by_name_params.ActionInitiateByNameParams, + ), + ), + cast_to=ActionInitiateByNameResponse, + ) + + +class AsyncActionsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncActionsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncActionsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncActionsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncActionsResourceWithStreamingResponse(self) + + async def retrieve( + self, + action_id: int, + *, + volume_id: str, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionRetrieveResponse: + """ + To retrieve the status of a volume action, send a GET request to + `/v2/volumes/$VOLUME_ID/actions/$ACTION_ID`. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not volume_id: + raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}") + return await self._get( + f"/v2/volumes/{volume_id}/actions/{action_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/volumes/{volume_id}/actions/{action_id}", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + action_retrieve_params.ActionRetrieveParams, + ), + ), + cast_to=ActionRetrieveResponse, + ) + + async def list( + self, + volume_id: str, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionListResponse: + """ + To retrieve all actions that have been executed on a volume, send a GET request + to `/v2/volumes/$VOLUME_ID/actions`. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not volume_id: + raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}") + return await self._get( + f"/v2/volumes/{volume_id}/actions" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/volumes/{volume_id}/actions", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + action_list_params.ActionListParams, + ), + ), + cast_to=ActionListResponse, + ) + + @overload + async def initiate_by_id( + self, + volume_id: str, + *, + droplet_id: int, + type: Literal["attach", "detach", "resize"], + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateByIDResponse: + """ + To initiate an action on a block storage volume by Id, send a POST request to + `~/v2/volumes/$VOLUME_ID/actions`. The body should contain the appropriate + attributes for the respective action. + + ## Attach a Block Storage Volume to a Droplet + + | Attribute | Details | + | ---------- | ------------------------------------------------------------------- | + | type | This must be `attach` | + | droplet_id | Set to the Droplet's ID | + | region | Set to the slug representing the region where the volume is located | + + Each volume may only be attached to a single Droplet. However, up to fifteen + volumes may be attached to a Droplet at a time. Pre-formatted volumes will be + automatically mounted to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS + Droplets created on or after April 26, 2018 when attached. On older Droplets, + [additional configuration](https://docs.digitalocean.com/products/volumes/how-to/mount/) + is required. + + ## Remove a Block Storage Volume from a Droplet + + | Attribute | Details | + | ---------- | ------------------------------------------------------------------- | + | type | This must be `detach` | + | droplet_id | Set to the Droplet's ID | + | region | Set to the slug representing the region where the volume is located | + + ## Resize a Volume + + | Attribute | Details | + | -------------- | ------------------------------------------------------------------- | + | type | This must be `resize` | + | size_gigabytes | The new size of the block storage volume in GiB (1024^3) | + | region | Set to the slug representing the region where the volume is located | + + Volumes may only be resized upwards. The maximum size for a volume is 16TiB. + + Args: + droplet_id: The unique identifier for the Droplet the volume will be attached or detached + from. + + type: The volume action to initiate. + + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + region: The slug identifier for the region where the resource will initially be + available. + + tags: A flat array of tag names as strings to be applied to the resource. Tag names + may be for either existing or new tags. + + Requires `tag:create` scope. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def initiate_by_id( + self, + volume_id: str, + *, + droplet_id: int, + type: Literal["attach", "detach", "resize"], + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateByIDResponse: + """ + To initiate an action on a block storage volume by Id, send a POST request to + `~/v2/volumes/$VOLUME_ID/actions`. The body should contain the appropriate + attributes for the respective action. + + ## Attach a Block Storage Volume to a Droplet + + | Attribute | Details | + | ---------- | ------------------------------------------------------------------- | + | type | This must be `attach` | + | droplet_id | Set to the Droplet's ID | + | region | Set to the slug representing the region where the volume is located | + + Each volume may only be attached to a single Droplet. However, up to fifteen + volumes may be attached to a Droplet at a time. Pre-formatted volumes will be + automatically mounted to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS + Droplets created on or after April 26, 2018 when attached. On older Droplets, + [additional configuration](https://docs.digitalocean.com/products/volumes/how-to/mount/) + is required. + + ## Remove a Block Storage Volume from a Droplet + + | Attribute | Details | + | ---------- | ------------------------------------------------------------------- | + | type | This must be `detach` | + | droplet_id | Set to the Droplet's ID | + | region | Set to the slug representing the region where the volume is located | + + ## Resize a Volume + + | Attribute | Details | + | -------------- | ------------------------------------------------------------------- | + | type | This must be `resize` | + | size_gigabytes | The new size of the block storage volume in GiB (1024^3) | + | region | Set to the slug representing the region where the volume is located | + + Volumes may only be resized upwards. The maximum size for a volume is 16TiB. + + Args: + droplet_id: The unique identifier for the Droplet the volume will be attached or detached + from. + + type: The volume action to initiate. + + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + region: The slug identifier for the region where the resource will initially be + available. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def initiate_by_id( + self, + volume_id: str, + *, + size_gigabytes: int, + type: Literal["attach", "detach", "resize"], + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateByIDResponse: + """ + To initiate an action on a block storage volume by Id, send a POST request to + `~/v2/volumes/$VOLUME_ID/actions`. The body should contain the appropriate + attributes for the respective action. + + ## Attach a Block Storage Volume to a Droplet + + | Attribute | Details | + | ---------- | ------------------------------------------------------------------- | + | type | This must be `attach` | + | droplet_id | Set to the Droplet's ID | + | region | Set to the slug representing the region where the volume is located | + + Each volume may only be attached to a single Droplet. However, up to fifteen + volumes may be attached to a Droplet at a time. Pre-formatted volumes will be + automatically mounted to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS + Droplets created on or after April 26, 2018 when attached. On older Droplets, + [additional configuration](https://docs.digitalocean.com/products/volumes/how-to/mount/) + is required. + + ## Remove a Block Storage Volume from a Droplet + + | Attribute | Details | + | ---------- | ------------------------------------------------------------------- | + | type | This must be `detach` | + | droplet_id | Set to the Droplet's ID | + | region | Set to the slug representing the region where the volume is located | + + ## Resize a Volume + + | Attribute | Details | + | -------------- | ------------------------------------------------------------------- | + | type | This must be `resize` | + | size_gigabytes | The new size of the block storage volume in GiB (1024^3) | + | region | Set to the slug representing the region where the volume is located | + + Volumes may only be resized upwards. The maximum size for a volume is 16TiB. + + Args: + size_gigabytes: The new size of the block storage volume in GiB (1024^3). + + type: The volume action to initiate. + + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + region: The slug identifier for the region where the resource will initially be + available. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["droplet_id", "type"], ["size_gigabytes", "type"]) + async def initiate_by_id( + self, + volume_id: str, + *, + droplet_id: int | NotGiven = NOT_GIVEN, + type: Literal["attach", "detach", "resize"], + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + size_gigabytes: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateByIDResponse: + if not volume_id: + raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}") + return await self._post( + f"/v2/volumes/{volume_id}/actions" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/volumes/{volume_id}/actions", + body=await async_maybe_transform( + { + "droplet_id": droplet_id, + "type": type, + "region": region, + "tags": tags, + "size_gigabytes": size_gigabytes, + }, + action_initiate_by_id_params.ActionInitiateByIDParams, + ), + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + action_initiate_by_id_params.ActionInitiateByIDParams, + ), + ), + cast_to=ActionInitiateByIDResponse, + ) + + @overload + async def initiate_by_name( + self, + *, + droplet_id: int, + type: Literal["attach", "detach", "resize"], + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateByNameResponse: + """ + To initiate an action on a block storage volume by Name, send a POST request to + `~/v2/volumes/actions`. The body should contain the appropriate attributes for + the respective action. + + ## Attach a Block Storage Volume to a Droplet + + | Attribute | Details | + | ----------- | ------------------------------------------------------------------- | + | type | This must be `attach` | + | volume_name | The name of the block storage volume | + | droplet_id | Set to the Droplet's ID | + | region | Set to the slug representing the region where the volume is located | + + Each volume may only be attached to a single Droplet. However, up to fifteen + volumes may be attached to a Droplet at a time. Pre-formatted volumes will be + automatically mounted to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS + Droplets created on or after April 26, 2018 when attached. On older Droplets, + [additional configuration](https://docs.digitalocean.com/products/volumes/how-to/mount/) + is required. + + ## Remove a Block Storage Volume from a Droplet + + | Attribute | Details | + | ----------- | ------------------------------------------------------------------- | + | type | This must be `detach` | + | volume_name | The name of the block storage volume | + | droplet_id | Set to the Droplet's ID | + | region | Set to the slug representing the region where the volume is located | + + Args: + droplet_id: The unique identifier for the Droplet the volume will be attached or detached + from. + + type: The volume action to initiate. + + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + region: The slug identifier for the region where the resource will initially be + available. + + tags: A flat array of tag names as strings to be applied to the resource. Tag names + may be for either existing or new tags. + + Requires `tag:create` scope. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def initiate_by_name( + self, + *, + droplet_id: int, + type: Literal["attach", "detach", "resize"], + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateByNameResponse: + """ + To initiate an action on a block storage volume by Name, send a POST request to + `~/v2/volumes/actions`. The body should contain the appropriate attributes for + the respective action. + + ## Attach a Block Storage Volume to a Droplet + + | Attribute | Details | + | ----------- | ------------------------------------------------------------------- | + | type | This must be `attach` | + | volume_name | The name of the block storage volume | + | droplet_id | Set to the Droplet's ID | + | region | Set to the slug representing the region where the volume is located | + + Each volume may only be attached to a single Droplet. However, up to fifteen + volumes may be attached to a Droplet at a time. Pre-formatted volumes will be + automatically mounted to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS + Droplets created on or after April 26, 2018 when attached. On older Droplets, + [additional configuration](https://docs.digitalocean.com/products/volumes/how-to/mount/) + is required. + + ## Remove a Block Storage Volume from a Droplet + + | Attribute | Details | + | ----------- | ------------------------------------------------------------------- | + | type | This must be `detach` | + | volume_name | The name of the block storage volume | + | droplet_id | Set to the Droplet's ID | + | region | Set to the slug representing the region where the volume is located | + + Args: + droplet_id: The unique identifier for the Droplet the volume will be attached or detached + from. + + type: The volume action to initiate. + + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + region: The slug identifier for the region where the resource will initially be + available. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["droplet_id", "type"]) + async def initiate_by_name( + self, + *, + droplet_id: int, + type: Literal["attach", "detach", "resize"], + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ActionInitiateByNameResponse: + return await self._post( + "/v2/volumes/actions" + if self._client._base_url_overridden + else "https://api.digitalocean.com/v2/volumes/actions", + body=await async_maybe_transform( + { + "droplet_id": droplet_id, + "type": type, + "region": region, + "tags": tags, + }, + action_initiate_by_name_params.ActionInitiateByNameParams, + ), + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + action_initiate_by_name_params.ActionInitiateByNameParams, + ), + ), + cast_to=ActionInitiateByNameResponse, + ) + + +class ActionsResourceWithRawResponse: + def __init__(self, actions: ActionsResource) -> None: + self._actions = actions + + self.retrieve = to_raw_response_wrapper( + actions.retrieve, + ) + self.list = to_raw_response_wrapper( + actions.list, + ) + self.initiate_by_id = to_raw_response_wrapper( + actions.initiate_by_id, + ) + self.initiate_by_name = to_raw_response_wrapper( + actions.initiate_by_name, + ) + + +class AsyncActionsResourceWithRawResponse: + def __init__(self, actions: AsyncActionsResource) -> None: + self._actions = actions + + self.retrieve = async_to_raw_response_wrapper( + actions.retrieve, + ) + self.list = async_to_raw_response_wrapper( + actions.list, + ) + self.initiate_by_id = async_to_raw_response_wrapper( + actions.initiate_by_id, + ) + self.initiate_by_name = async_to_raw_response_wrapper( + actions.initiate_by_name, + ) + + +class ActionsResourceWithStreamingResponse: + def __init__(self, actions: ActionsResource) -> None: + self._actions = actions + + self.retrieve = to_streamed_response_wrapper( + actions.retrieve, + ) + self.list = to_streamed_response_wrapper( + actions.list, + ) + self.initiate_by_id = to_streamed_response_wrapper( + actions.initiate_by_id, + ) + self.initiate_by_name = to_streamed_response_wrapper( + actions.initiate_by_name, + ) + + +class AsyncActionsResourceWithStreamingResponse: + def __init__(self, actions: AsyncActionsResource) -> None: + self._actions = actions + + self.retrieve = async_to_streamed_response_wrapper( + actions.retrieve, + ) + self.list = async_to_streamed_response_wrapper( + actions.list, + ) + self.initiate_by_id = async_to_streamed_response_wrapper( + actions.initiate_by_id, + ) + self.initiate_by_name = async_to_streamed_response_wrapper( + actions.initiate_by_name, + ) diff --git a/src/gradientai/resources/gpu_droplets/volumes/snapshots.py b/src/gradientai/resources/gpu_droplets/volumes/snapshots.py new file mode 100644 index 00000000..766d9a3a --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/volumes/snapshots.py @@ -0,0 +1,499 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Optional + +import httpx + +from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ...._utils import maybe_transform, async_maybe_transform +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.gpu_droplets.volumes import snapshot_list_params, snapshot_create_params +from ....types.gpu_droplets.volumes.snapshot_list_response import SnapshotListResponse +from ....types.gpu_droplets.volumes.snapshot_create_response import SnapshotCreateResponse +from ....types.gpu_droplets.volumes.snapshot_retrieve_response import SnapshotRetrieveResponse + +__all__ = ["SnapshotsResource", "AsyncSnapshotsResource"] + + +class SnapshotsResource(SyncAPIResource): + @cached_property + def with_raw_response(self) -> SnapshotsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return SnapshotsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> SnapshotsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return SnapshotsResourceWithStreamingResponse(self) + + def create( + self, + volume_id: str, + *, + name: str, + tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> SnapshotCreateResponse: + """ + To create a snapshot from a volume, sent a POST request to + `/v2/volumes/$VOLUME_ID/snapshots`. + + Args: + name: A human-readable name for the volume snapshot. + + tags: A flat array of tag names as strings to be applied to the resource. Tag names + may be for either existing or new tags. + + Requires `tag:create` scope. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not volume_id: + raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}") + return self._post( + f"/v2/volumes/{volume_id}/snapshots" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/volumes/{volume_id}/snapshots", + body=maybe_transform( + { + "name": name, + "tags": tags, + }, + snapshot_create_params.SnapshotCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=SnapshotCreateResponse, + ) + + def retrieve( + self, + snapshot_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> SnapshotRetrieveResponse: + """ + To retrieve the details of a snapshot that has been created from a volume, send + a GET request to `/v2/volumes/snapshots/$VOLUME_SNAPSHOT_ID`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not snapshot_id: + raise ValueError(f"Expected a non-empty value for `snapshot_id` but received {snapshot_id!r}") + return self._get( + f"/v2/volumes/snapshots/{snapshot_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/volumes/snapshots/{snapshot_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=SnapshotRetrieveResponse, + ) + + def list( + self, + volume_id: str, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> SnapshotListResponse: + """ + To retrieve the snapshots that have been created from a volume, send a GET + request to `/v2/volumes/$VOLUME_ID/snapshots`. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not volume_id: + raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}") + return self._get( + f"/v2/volumes/{volume_id}/snapshots" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/volumes/{volume_id}/snapshots", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "page": page, + "per_page": per_page, + }, + snapshot_list_params.SnapshotListParams, + ), + ), + cast_to=SnapshotListResponse, + ) + + def delete( + self, + snapshot_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To delete a volume snapshot, send a DELETE request to + `/v2/volumes/snapshots/$VOLUME_SNAPSHOT_ID`. + + A status of 204 will be given. This indicates that the request was processed + successfully, but that no response body is needed. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not snapshot_id: + raise ValueError(f"Expected a non-empty value for `snapshot_id` but received {snapshot_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._delete( + f"/v2/volumes/snapshots/{snapshot_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/volumes/snapshots/{snapshot_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class AsyncSnapshotsResource(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncSnapshotsResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncSnapshotsResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncSnapshotsResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncSnapshotsResourceWithStreamingResponse(self) + + async def create( + self, + volume_id: str, + *, + name: str, + tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> SnapshotCreateResponse: + """ + To create a snapshot from a volume, sent a POST request to + `/v2/volumes/$VOLUME_ID/snapshots`. + + Args: + name: A human-readable name for the volume snapshot. + + tags: A flat array of tag names as strings to be applied to the resource. Tag names + may be for either existing or new tags. + + Requires `tag:create` scope. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not volume_id: + raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}") + return await self._post( + f"/v2/volumes/{volume_id}/snapshots" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/volumes/{volume_id}/snapshots", + body=await async_maybe_transform( + { + "name": name, + "tags": tags, + }, + snapshot_create_params.SnapshotCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=SnapshotCreateResponse, + ) + + async def retrieve( + self, + snapshot_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> SnapshotRetrieveResponse: + """ + To retrieve the details of a snapshot that has been created from a volume, send + a GET request to `/v2/volumes/snapshots/$VOLUME_SNAPSHOT_ID`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not snapshot_id: + raise ValueError(f"Expected a non-empty value for `snapshot_id` but received {snapshot_id!r}") + return await self._get( + f"/v2/volumes/snapshots/{snapshot_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/volumes/snapshots/{snapshot_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=SnapshotRetrieveResponse, + ) + + async def list( + self, + volume_id: str, + *, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> SnapshotListResponse: + """ + To retrieve the snapshots that have been created from a volume, send a GET + request to `/v2/volumes/$VOLUME_ID/snapshots`. + + Args: + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not volume_id: + raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}") + return await self._get( + f"/v2/volumes/{volume_id}/snapshots" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/volumes/{volume_id}/snapshots", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "page": page, + "per_page": per_page, + }, + snapshot_list_params.SnapshotListParams, + ), + ), + cast_to=SnapshotListResponse, + ) + + async def delete( + self, + snapshot_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To delete a volume snapshot, send a DELETE request to + `/v2/volumes/snapshots/$VOLUME_SNAPSHOT_ID`. + + A status of 204 will be given. This indicates that the request was processed + successfully, but that no response body is needed. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not snapshot_id: + raise ValueError(f"Expected a non-empty value for `snapshot_id` but received {snapshot_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._delete( + f"/v2/volumes/snapshots/{snapshot_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/volumes/snapshots/{snapshot_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + +class SnapshotsResourceWithRawResponse: + def __init__(self, snapshots: SnapshotsResource) -> None: + self._snapshots = snapshots + + self.create = to_raw_response_wrapper( + snapshots.create, + ) + self.retrieve = to_raw_response_wrapper( + snapshots.retrieve, + ) + self.list = to_raw_response_wrapper( + snapshots.list, + ) + self.delete = to_raw_response_wrapper( + snapshots.delete, + ) + + +class AsyncSnapshotsResourceWithRawResponse: + def __init__(self, snapshots: AsyncSnapshotsResource) -> None: + self._snapshots = snapshots + + self.create = async_to_raw_response_wrapper( + snapshots.create, + ) + self.retrieve = async_to_raw_response_wrapper( + snapshots.retrieve, + ) + self.list = async_to_raw_response_wrapper( + snapshots.list, + ) + self.delete = async_to_raw_response_wrapper( + snapshots.delete, + ) + + +class SnapshotsResourceWithStreamingResponse: + def __init__(self, snapshots: SnapshotsResource) -> None: + self._snapshots = snapshots + + self.create = to_streamed_response_wrapper( + snapshots.create, + ) + self.retrieve = to_streamed_response_wrapper( + snapshots.retrieve, + ) + self.list = to_streamed_response_wrapper( + snapshots.list, + ) + self.delete = to_streamed_response_wrapper( + snapshots.delete, + ) + + +class AsyncSnapshotsResourceWithStreamingResponse: + def __init__(self, snapshots: AsyncSnapshotsResource) -> None: + self._snapshots = snapshots + + self.create = async_to_streamed_response_wrapper( + snapshots.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + snapshots.retrieve, + ) + self.list = async_to_streamed_response_wrapper( + snapshots.list, + ) + self.delete = async_to_streamed_response_wrapper( + snapshots.delete, + ) diff --git a/src/gradientai/resources/gpu_droplets/volumes/volumes.py b/src/gradientai/resources/gpu_droplets/volumes/volumes.py new file mode 100644 index 00000000..efd1d4ae --- /dev/null +++ b/src/gradientai/resources/gpu_droplets/volumes/volumes.py @@ -0,0 +1,1144 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Optional +from typing_extensions import Literal, overload + +import httpx + +from .actions import ( + ActionsResource, + AsyncActionsResource, + ActionsResourceWithRawResponse, + AsyncActionsResourceWithRawResponse, + ActionsResourceWithStreamingResponse, + AsyncActionsResourceWithStreamingResponse, +) +from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven +from ...._utils import required_args, maybe_transform, async_maybe_transform +from .snapshots import ( + SnapshotsResource, + AsyncSnapshotsResource, + SnapshotsResourceWithRawResponse, + AsyncSnapshotsResourceWithRawResponse, + SnapshotsResourceWithStreamingResponse, + AsyncSnapshotsResourceWithStreamingResponse, +) +from ...._compat import cached_property +from ...._resource import SyncAPIResource, AsyncAPIResource +from ...._response import ( + to_raw_response_wrapper, + to_streamed_response_wrapper, + async_to_raw_response_wrapper, + async_to_streamed_response_wrapper, +) +from ...._base_client import make_request_options +from ....types.gpu_droplets import volume_list_params, volume_create_params, volume_delete_by_name_params +from ....types.gpu_droplets.volume_list_response import VolumeListResponse +from ....types.gpu_droplets.volume_create_response import VolumeCreateResponse +from ....types.gpu_droplets.volume_retrieve_response import VolumeRetrieveResponse + +__all__ = ["VolumesResource", "AsyncVolumesResource"] + + +class VolumesResource(SyncAPIResource): + @cached_property + def actions(self) -> ActionsResource: + return ActionsResource(self._client) + + @cached_property + def snapshots(self) -> SnapshotsResource: + return SnapshotsResource(self._client) + + @cached_property + def with_raw_response(self) -> VolumesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return VolumesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> VolumesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return VolumesResourceWithStreamingResponse(self) + + @overload + def create( + self, + *, + name: str, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ], + size_gigabytes: int, + description: str | NotGiven = NOT_GIVEN, + filesystem_label: str | NotGiven = NOT_GIVEN, + filesystem_type: str | NotGiven = NOT_GIVEN, + snapshot_id: str | NotGiven = NOT_GIVEN, + tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VolumeCreateResponse: + """To create a new volume, send a POST request to `/v2/volumes`. + + Optionally, a + `filesystem_type` attribute may be provided in order to automatically format the + volume's filesystem. Pre-formatted volumes are automatically mounted when + attached to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS Droplets created + on or after April 26, 2018. Attaching pre-formatted volumes to Droplets without + support for auto-mounting is not recommended. + + Args: + name: A human-readable name for the block storage volume. Must be lowercase and be + composed only of numbers, letters and "-", up to a limit of 64 characters. The + name must begin with a letter. + + region: The slug identifier for the region where the resource will initially be + available. + + size_gigabytes: The size of the block storage volume in GiB (1024^3). This field does not apply + when creating a volume from a snapshot. + + description: An optional free-form text field to describe a block storage volume. + + filesystem_label: The label applied to the filesystem. Labels for ext4 type filesystems may + contain 16 characters while labels for xfs type filesystems are limited to 12 + characters. May only be used in conjunction with filesystem_type. + + filesystem_type: The name of the filesystem type to be used on the volume. When provided, the + volume will automatically be formatted to the specified filesystem type. + Currently, the available options are `ext4` and `xfs`. Pre-formatted volumes are + automatically mounted when attached to Ubuntu, Debian, Fedora, Fedora Atomic, + and CentOS Droplets created on or after April 26, 2018. Attaching pre-formatted + volumes to other Droplets is not recommended. + + snapshot_id: The unique identifier for the volume snapshot from which to create the volume. + + tags: A flat array of tag names as strings to be applied to the resource. Tag names + may be for either existing or new tags. + + Requires `tag:create` scope. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def create( + self, + *, + name: str, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ], + size_gigabytes: int, + description: str | NotGiven = NOT_GIVEN, + filesystem_label: str | NotGiven = NOT_GIVEN, + filesystem_type: str | NotGiven = NOT_GIVEN, + snapshot_id: str | NotGiven = NOT_GIVEN, + tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VolumeCreateResponse: + """To create a new volume, send a POST request to `/v2/volumes`. + + Optionally, a + `filesystem_type` attribute may be provided in order to automatically format the + volume's filesystem. Pre-formatted volumes are automatically mounted when + attached to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS Droplets created + on or after April 26, 2018. Attaching pre-formatted volumes to Droplets without + support for auto-mounting is not recommended. + + Args: + name: A human-readable name for the block storage volume. Must be lowercase and be + composed only of numbers, letters and "-", up to a limit of 64 characters. The + name must begin with a letter. + + region: The slug identifier for the region where the resource will initially be + available. + + size_gigabytes: The size of the block storage volume in GiB (1024^3). This field does not apply + when creating a volume from a snapshot. + + description: An optional free-form text field to describe a block storage volume. + + filesystem_label: The label applied to the filesystem. Labels for ext4 type filesystems may + contain 16 characters while labels for xfs type filesystems are limited to 12 + characters. May only be used in conjunction with filesystem_type. + + filesystem_type: The name of the filesystem type to be used on the volume. When provided, the + volume will automatically be formatted to the specified filesystem type. + Currently, the available options are `ext4` and `xfs`. Pre-formatted volumes are + automatically mounted when attached to Ubuntu, Debian, Fedora, Fedora Atomic, + and CentOS Droplets created on or after April 26, 2018. Attaching pre-formatted + volumes to other Droplets is not recommended. + + snapshot_id: The unique identifier for the volume snapshot from which to create the volume. + + tags: A flat array of tag names as strings to be applied to the resource. Tag names + may be for either existing or new tags. + + Requires `tag:create` scope. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["name", "region", "size_gigabytes"]) + def create( + self, + *, + name: str, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ], + size_gigabytes: int, + description: str | NotGiven = NOT_GIVEN, + filesystem_label: str | NotGiven = NOT_GIVEN, + filesystem_type: str | NotGiven = NOT_GIVEN, + snapshot_id: str | NotGiven = NOT_GIVEN, + tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VolumeCreateResponse: + return self._post( + "/v2/volumes" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/volumes", + body=maybe_transform( + { + "name": name, + "region": region, + "size_gigabytes": size_gigabytes, + "description": description, + "filesystem_label": filesystem_label, + "filesystem_type": filesystem_type, + "snapshot_id": snapshot_id, + "tags": tags, + }, + volume_create_params.VolumeCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=VolumeCreateResponse, + ) + + def retrieve( + self, + volume_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VolumeRetrieveResponse: + """ + To show information about a block storage volume, send a GET request to + `/v2/volumes/$VOLUME_ID`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not volume_id: + raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}") + return self._get( + f"/v2/volumes/{volume_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/volumes/{volume_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=VolumeRetrieveResponse, + ) + + def list( + self, + *, + name: str | NotGiven = NOT_GIVEN, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VolumeListResponse: + """ + To list all of the block storage volumes available on your account, send a GET + request to `/v2/volumes`. + + ## Filtering Results + + ### By Region + + The `region` may be provided as query parameter in order to restrict results to + volumes available in a specific region. For example: `/v2/volumes?region=nyc1` + + ### By Name + + It is also possible to list volumes on your account that match a specified name. + To do so, send a GET request with the volume's name as a query parameter to + `/v2/volumes?name=$VOLUME_NAME`. **Note:** You can only create one volume per + region with the same name. + + ### By Name and Region + + It is also possible to retrieve information about a block storage volume by + name. To do so, send a GET request with the volume's name and the region slug + for the region it is located in as query parameters to + `/v2/volumes?name=$VOLUME_NAME®ion=nyc1`. + + Args: + name: The block storage volume's name. + + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + region: The slug identifier for the region where the resource is available. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._get( + "/v2/volumes" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/volumes", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "name": name, + "page": page, + "per_page": per_page, + "region": region, + }, + volume_list_params.VolumeListParams, + ), + ), + cast_to=VolumeListResponse, + ) + + def delete( + self, + volume_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To delete a block storage volume, destroying all data and removing it from your + account, send a DELETE request to `/v2/volumes/$VOLUME_ID`. No response body + will be sent back, but the response code will indicate success. Specifically, + the response code will be a 204, which means that the action was successful with + no returned body data. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not volume_id: + raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._delete( + f"/v2/volumes/{volume_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/volumes/{volume_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + def delete_by_name( + self, + *, + name: str | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + Block storage volumes may also be deleted by name by sending a DELETE request + with the volume's **name** and the **region slug** for the region it is located + in as query parameters to `/v2/volumes?name=$VOLUME_NAME®ion=nyc1`. No + response body will be sent back, but the response code will indicate success. + Specifically, the response code will be a 204, which means that the action was + successful with no returned body data. + + Args: + name: The block storage volume's name. + + region: The slug identifier for the region where the resource is available. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return self._delete( + "/v2/volumes" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/volumes", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "name": name, + "region": region, + }, + volume_delete_by_name_params.VolumeDeleteByNameParams, + ), + ), + cast_to=NoneType, + ) + + +class AsyncVolumesResource(AsyncAPIResource): + @cached_property + def actions(self) -> AsyncActionsResource: + return AsyncActionsResource(self._client) + + @cached_property + def snapshots(self) -> AsyncSnapshotsResource: + return AsyncSnapshotsResource(self._client) + + @cached_property + def with_raw_response(self) -> AsyncVolumesResourceWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/digitalocean/gradientai-python#accessing-raw-response-data-eg-headers + """ + return AsyncVolumesResourceWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncVolumesResourceWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/digitalocean/gradientai-python#with_streaming_response + """ + return AsyncVolumesResourceWithStreamingResponse(self) + + @overload + async def create( + self, + *, + name: str, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ], + size_gigabytes: int, + description: str | NotGiven = NOT_GIVEN, + filesystem_label: str | NotGiven = NOT_GIVEN, + filesystem_type: str | NotGiven = NOT_GIVEN, + snapshot_id: str | NotGiven = NOT_GIVEN, + tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VolumeCreateResponse: + """To create a new volume, send a POST request to `/v2/volumes`. + + Optionally, a + `filesystem_type` attribute may be provided in order to automatically format the + volume's filesystem. Pre-formatted volumes are automatically mounted when + attached to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS Droplets created + on or after April 26, 2018. Attaching pre-formatted volumes to Droplets without + support for auto-mounting is not recommended. + + Args: + name: A human-readable name for the block storage volume. Must be lowercase and be + composed only of numbers, letters and "-", up to a limit of 64 characters. The + name must begin with a letter. + + region: The slug identifier for the region where the resource will initially be + available. + + size_gigabytes: The size of the block storage volume in GiB (1024^3). This field does not apply + when creating a volume from a snapshot. + + description: An optional free-form text field to describe a block storage volume. + + filesystem_label: The label applied to the filesystem. Labels for ext4 type filesystems may + contain 16 characters while labels for xfs type filesystems are limited to 12 + characters. May only be used in conjunction with filesystem_type. + + filesystem_type: The name of the filesystem type to be used on the volume. When provided, the + volume will automatically be formatted to the specified filesystem type. + Currently, the available options are `ext4` and `xfs`. Pre-formatted volumes are + automatically mounted when attached to Ubuntu, Debian, Fedora, Fedora Atomic, + and CentOS Droplets created on or after April 26, 2018. Attaching pre-formatted + volumes to other Droplets is not recommended. + + snapshot_id: The unique identifier for the volume snapshot from which to create the volume. + + tags: A flat array of tag names as strings to be applied to the resource. Tag names + may be for either existing or new tags. + + Requires `tag:create` scope. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def create( + self, + *, + name: str, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ], + size_gigabytes: int, + description: str | NotGiven = NOT_GIVEN, + filesystem_label: str | NotGiven = NOT_GIVEN, + filesystem_type: str | NotGiven = NOT_GIVEN, + snapshot_id: str | NotGiven = NOT_GIVEN, + tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VolumeCreateResponse: + """To create a new volume, send a POST request to `/v2/volumes`. + + Optionally, a + `filesystem_type` attribute may be provided in order to automatically format the + volume's filesystem. Pre-formatted volumes are automatically mounted when + attached to Ubuntu, Debian, Fedora, Fedora Atomic, and CentOS Droplets created + on or after April 26, 2018. Attaching pre-formatted volumes to Droplets without + support for auto-mounting is not recommended. + + Args: + name: A human-readable name for the block storage volume. Must be lowercase and be + composed only of numbers, letters and "-", up to a limit of 64 characters. The + name must begin with a letter. + + region: The slug identifier for the region where the resource will initially be + available. + + size_gigabytes: The size of the block storage volume in GiB (1024^3). This field does not apply + when creating a volume from a snapshot. + + description: An optional free-form text field to describe a block storage volume. + + filesystem_label: The label applied to the filesystem. Labels for ext4 type filesystems may + contain 16 characters while labels for xfs type filesystems are limited to 12 + characters. May only be used in conjunction with filesystem_type. + + filesystem_type: The name of the filesystem type to be used on the volume. When provided, the + volume will automatically be formatted to the specified filesystem type. + Currently, the available options are `ext4` and `xfs`. Pre-formatted volumes are + automatically mounted when attached to Ubuntu, Debian, Fedora, Fedora Atomic, + and CentOS Droplets created on or after April 26, 2018. Attaching pre-formatted + volumes to other Droplets is not recommended. + + snapshot_id: The unique identifier for the volume snapshot from which to create the volume. + + tags: A flat array of tag names as strings to be applied to the resource. Tag names + may be for either existing or new tags. + + Requires `tag:create` scope. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["name", "region", "size_gigabytes"]) + async def create( + self, + *, + name: str, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ], + size_gigabytes: int, + description: str | NotGiven = NOT_GIVEN, + filesystem_label: str | NotGiven = NOT_GIVEN, + filesystem_type: str | NotGiven = NOT_GIVEN, + snapshot_id: str | NotGiven = NOT_GIVEN, + tags: Optional[List[str]] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VolumeCreateResponse: + return await self._post( + "/v2/volumes" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/volumes", + body=await async_maybe_transform( + { + "name": name, + "region": region, + "size_gigabytes": size_gigabytes, + "description": description, + "filesystem_label": filesystem_label, + "filesystem_type": filesystem_type, + "snapshot_id": snapshot_id, + "tags": tags, + }, + volume_create_params.VolumeCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=VolumeCreateResponse, + ) + + async def retrieve( + self, + volume_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VolumeRetrieveResponse: + """ + To show information about a block storage volume, send a GET request to + `/v2/volumes/$VOLUME_ID`. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not volume_id: + raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}") + return await self._get( + f"/v2/volumes/{volume_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/volumes/{volume_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=VolumeRetrieveResponse, + ) + + async def list( + self, + *, + name: str | NotGiven = NOT_GIVEN, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> VolumeListResponse: + """ + To list all of the block storage volumes available on your account, send a GET + request to `/v2/volumes`. + + ## Filtering Results + + ### By Region + + The `region` may be provided as query parameter in order to restrict results to + volumes available in a specific region. For example: `/v2/volumes?region=nyc1` + + ### By Name + + It is also possible to list volumes on your account that match a specified name. + To do so, send a GET request with the volume's name as a query parameter to + `/v2/volumes?name=$VOLUME_NAME`. **Note:** You can only create one volume per + region with the same name. + + ### By Name and Region + + It is also possible to retrieve information about a block storage volume by + name. To do so, send a GET request with the volume's name and the region slug + for the region it is located in as query parameters to + `/v2/volumes?name=$VOLUME_NAME®ion=nyc1`. + + Args: + name: The block storage volume's name. + + page: Which 'page' of paginated results to return. + + per_page: Number of items returned per page + + region: The slug identifier for the region where the resource is available. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._get( + "/v2/volumes" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/volumes", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "name": name, + "page": page, + "per_page": per_page, + "region": region, + }, + volume_list_params.VolumeListParams, + ), + ), + cast_to=VolumeListResponse, + ) + + async def delete( + self, + volume_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + To delete a block storage volume, destroying all data and removing it from your + account, send a DELETE request to `/v2/volumes/$VOLUME_ID`. No response body + will be sent back, but the response code will indicate success. Specifically, + the response code will be a 204, which means that the action was successful with + no returned body data. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not volume_id: + raise ValueError(f"Expected a non-empty value for `volume_id` but received {volume_id!r}") + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._delete( + f"/v2/volumes/{volume_id}" + if self._client._base_url_overridden + else f"https://api.digitalocean.com/v2/volumes/{volume_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=NoneType, + ) + + async def delete_by_name( + self, + *, + name: str | NotGiven = NOT_GIVEN, + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> None: + """ + Block storage volumes may also be deleted by name by sending a DELETE request + with the volume's **name** and the **region slug** for the region it is located + in as query parameters to `/v2/volumes?name=$VOLUME_NAME®ion=nyc1`. No + response body will be sent back, but the response code will indicate success. + Specifically, the response code will be a 204, which means that the action was + successful with no returned body data. + + Args: + name: The block storage volume's name. + + region: The slug identifier for the region where the resource is available. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + extra_headers = {"Accept": "*/*", **(extra_headers or {})} + return await self._delete( + "/v2/volumes" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/volumes", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform( + { + "name": name, + "region": region, + }, + volume_delete_by_name_params.VolumeDeleteByNameParams, + ), + ), + cast_to=NoneType, + ) + + +class VolumesResourceWithRawResponse: + def __init__(self, volumes: VolumesResource) -> None: + self._volumes = volumes + + self.create = to_raw_response_wrapper( + volumes.create, + ) + self.retrieve = to_raw_response_wrapper( + volumes.retrieve, + ) + self.list = to_raw_response_wrapper( + volumes.list, + ) + self.delete = to_raw_response_wrapper( + volumes.delete, + ) + self.delete_by_name = to_raw_response_wrapper( + volumes.delete_by_name, + ) + + @cached_property + def actions(self) -> ActionsResourceWithRawResponse: + return ActionsResourceWithRawResponse(self._volumes.actions) + + @cached_property + def snapshots(self) -> SnapshotsResourceWithRawResponse: + return SnapshotsResourceWithRawResponse(self._volumes.snapshots) + + +class AsyncVolumesResourceWithRawResponse: + def __init__(self, volumes: AsyncVolumesResource) -> None: + self._volumes = volumes + + self.create = async_to_raw_response_wrapper( + volumes.create, + ) + self.retrieve = async_to_raw_response_wrapper( + volumes.retrieve, + ) + self.list = async_to_raw_response_wrapper( + volumes.list, + ) + self.delete = async_to_raw_response_wrapper( + volumes.delete, + ) + self.delete_by_name = async_to_raw_response_wrapper( + volumes.delete_by_name, + ) + + @cached_property + def actions(self) -> AsyncActionsResourceWithRawResponse: + return AsyncActionsResourceWithRawResponse(self._volumes.actions) + + @cached_property + def snapshots(self) -> AsyncSnapshotsResourceWithRawResponse: + return AsyncSnapshotsResourceWithRawResponse(self._volumes.snapshots) + + +class VolumesResourceWithStreamingResponse: + def __init__(self, volumes: VolumesResource) -> None: + self._volumes = volumes + + self.create = to_streamed_response_wrapper( + volumes.create, + ) + self.retrieve = to_streamed_response_wrapper( + volumes.retrieve, + ) + self.list = to_streamed_response_wrapper( + volumes.list, + ) + self.delete = to_streamed_response_wrapper( + volumes.delete, + ) + self.delete_by_name = to_streamed_response_wrapper( + volumes.delete_by_name, + ) + + @cached_property + def actions(self) -> ActionsResourceWithStreamingResponse: + return ActionsResourceWithStreamingResponse(self._volumes.actions) + + @cached_property + def snapshots(self) -> SnapshotsResourceWithStreamingResponse: + return SnapshotsResourceWithStreamingResponse(self._volumes.snapshots) + + +class AsyncVolumesResourceWithStreamingResponse: + def __init__(self, volumes: AsyncVolumesResource) -> None: + self._volumes = volumes + + self.create = async_to_streamed_response_wrapper( + volumes.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + volumes.retrieve, + ) + self.list = async_to_streamed_response_wrapper( + volumes.list, + ) + self.delete = async_to_streamed_response_wrapper( + volumes.delete, + ) + self.delete_by_name = async_to_streamed_response_wrapper( + volumes.delete_by_name, + ) + + @cached_property + def actions(self) -> AsyncActionsResourceWithStreamingResponse: + return AsyncActionsResourceWithStreamingResponse(self._volumes.actions) + + @cached_property + def snapshots(self) -> AsyncSnapshotsResourceWithStreamingResponse: + return AsyncSnapshotsResourceWithStreamingResponse(self._volumes.snapshots) diff --git a/src/gradientai/resources/inference/api_keys.py b/src/gradientai/resources/inference/api_keys.py index 6759d09c..238ef6f6 100644 --- a/src/gradientai/resources/inference/api_keys.py +++ b/src/gradientai/resources/inference/api_keys.py @@ -60,6 +60,8 @@ def create( To create a model API key, send a POST request to `/v2/gen-ai/models/api_keys`. Args: + name: A human friendly name to identify the key + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -97,6 +99,10 @@ def update( `/v2/gen-ai/models/api_keys/{api_key_uuid}`. Args: + body_api_key_uuid: API key ID + + name: Name + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -140,9 +146,9 @@ def list( To list all model API keys, send a GET request to `/v2/gen-ai/models/api_keys`. Args: - page: page number. + page: Page number. - per_page: items per page. + per_page: Items per page. extra_headers: Send extra headers @@ -280,6 +286,8 @@ async def create( To create a model API key, send a POST request to `/v2/gen-ai/models/api_keys`. Args: + name: A human friendly name to identify the key + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -317,6 +325,10 @@ async def update( `/v2/gen-ai/models/api_keys/{api_key_uuid}`. Args: + body_api_key_uuid: API key ID + + name: Name + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -360,9 +372,9 @@ async def list( To list all model API keys, send a GET request to `/v2/gen-ai/models/api_keys`. Args: - page: page number. + page: Page number. - per_page: items per page. + per_page: Items per page. extra_headers: Send extra headers diff --git a/src/gradientai/resources/knowledge_bases/data_sources.py b/src/gradientai/resources/knowledge_bases/data_sources.py index e05696b9..8357dfda 100644 --- a/src/gradientai/resources/knowledge_bases/data_sources.py +++ b/src/gradientai/resources/knowledge_bases/data_sources.py @@ -69,6 +69,14 @@ def create( `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources`. Args: + aws_data_source: AWS S3 Data Source + + body_knowledge_base_uuid: Knowledge base id + + spaces_data_source: Spaces Bucket Data Source + + web_crawler_data_source: WebCrawlerDataSource + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -118,9 +126,9 @@ def list( `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources`. Args: - page: page number. + page: Page number. - per_page: items per page. + per_page: Items per page. extra_headers: Send extra headers @@ -236,6 +244,14 @@ async def create( `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources`. Args: + aws_data_source: AWS S3 Data Source + + body_knowledge_base_uuid: Knowledge base id + + spaces_data_source: Spaces Bucket Data Source + + web_crawler_data_source: WebCrawlerDataSource + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -285,9 +301,9 @@ async def list( `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/data_sources`. Args: - page: page number. + page: Page number. - per_page: items per page. + per_page: Items per page. extra_headers: Send extra headers diff --git a/src/gradientai/resources/knowledge_bases/indexing_jobs.py b/src/gradientai/resources/knowledge_bases/indexing_jobs.py index 39151e41..891acd0b 100644 --- a/src/gradientai/resources/knowledge_bases/indexing_jobs.py +++ b/src/gradientai/resources/knowledge_bases/indexing_jobs.py @@ -68,6 +68,11 @@ def create( `/v2/gen-ai/indexing_jobs`. Args: + data_source_uuids: List of data source ids to index, if none are provided, all data sources will be + indexed + + knowledge_base_uuid: Knowledge base id + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -146,9 +151,9 @@ def list( `/v2/gen-ai/indexing_jobs`. Args: - page: page number. + page: Page number. - per_page: items per page. + per_page: Items per page. extra_headers: Send extra headers @@ -294,6 +299,11 @@ async def create( `/v2/gen-ai/indexing_jobs`. Args: + data_source_uuids: List of data source ids to index, if none are provided, all data sources will be + indexed + + knowledge_base_uuid: Knowledge base id + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -372,9 +382,9 @@ async def list( `/v2/gen-ai/indexing_jobs`. Args: - page: page number. + page: Page number. - per_page: items per page. + per_page: Items per page. extra_headers: Send extra headers diff --git a/src/gradientai/resources/knowledge_bases/knowledge_bases.py b/src/gradientai/resources/knowledge_bases/knowledge_bases.py index 28acdd7f..c181295c 100644 --- a/src/gradientai/resources/knowledge_bases/knowledge_bases.py +++ b/src/gradientai/resources/knowledge_bases/knowledge_bases.py @@ -112,6 +112,8 @@ def create( tags: Tags to organize your knowledge base. + vpc_uuid: The VPC to deploy the knowledge base database in + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -201,12 +203,18 @@ def update( `/v2/gen-ai/knowledge_bases/{uuid}`. Args: - database_id: the id of the DigitalOcean database this knowledge base will use, optiona. + database_id: The id of the DigitalOcean database this knowledge base will use, optiona. embedding_model_uuid: Identifier for the foundation model. + name: Knowledge base name + + project_id: The id of the DigitalOcean project this knowledge base will belong to + tags: Tags to organize your knowledge base. + body_uuid: Knowledge base id + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -254,9 +262,9 @@ def list( To list all knowledge bases, send a GET request to `/v2/gen-ai/knowledge_bases`. Args: - page: page number. + page: Page number. - per_page: items per page. + per_page: Items per page. extra_headers: Send extra headers @@ -392,6 +400,8 @@ async def create( tags: Tags to organize your knowledge base. + vpc_uuid: The VPC to deploy the knowledge base database in + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -481,12 +491,18 @@ async def update( `/v2/gen-ai/knowledge_bases/{uuid}`. Args: - database_id: the id of the DigitalOcean database this knowledge base will use, optiona. + database_id: The id of the DigitalOcean database this knowledge base will use, optiona. embedding_model_uuid: Identifier for the foundation model. + name: Knowledge base name + + project_id: The id of the DigitalOcean project this knowledge base will belong to + tags: Tags to organize your knowledge base. + body_uuid: Knowledge base id + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -534,9 +550,9 @@ async def list( To list all knowledge bases, send a GET request to `/v2/gen-ai/knowledge_bases`. Args: - page: page number. + page: Page number. - per_page: items per page. + per_page: Items per page. extra_headers: Send extra headers diff --git a/src/gradientai/resources/models/models.py b/src/gradientai/resources/models/models.py index 3c524767..41f2eabd 100644 --- a/src/gradientai/resources/models/models.py +++ b/src/gradientai/resources/models/models.py @@ -2,14 +2,9 @@ from __future__ import annotations -from typing import List -from typing_extensions import Literal - import httpx -from ...types import model_list_params from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven -from ..._utils import maybe_transform, async_maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource from ..._response import ( @@ -28,6 +23,7 @@ AsyncProvidersResourceWithStreamingResponse, ) from ...types.model_list_response import ModelListResponse +from ...types.model_retrieve_response import ModelRetrieveResponse __all__ = ["ModelsResource", "AsyncModelsResource"] @@ -56,52 +52,22 @@ def with_streaming_response(self) -> ModelsResourceWithStreamingResponse: """ return ModelsResourceWithStreamingResponse(self) - def list( + def retrieve( self, + model: str, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - public_only: bool | NotGiven = NOT_GIVEN, - usecases: List[ - Literal[ - "MODEL_USECASE_UNKNOWN", - "MODEL_USECASE_AGENT", - "MODEL_USECASE_FINETUNED", - "MODEL_USECASE_KNOWLEDGEBASE", - "MODEL_USECASE_GUARDRAIL", - "MODEL_USECASE_REASONING", - "MODEL_USECASE_SERVERLESS", - ] - ] - | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ModelListResponse: + ) -> ModelRetrieveResponse: """ - To list all models, send a GET request to `/v2/gen-ai/models`. + Retrieves a model instance, providing basic information about the model such as + the owner and permissioning. Args: - page: page number. - - per_page: items per page. - - public_only: only include models that are publicly available. - - usecases: include only models defined for the listed usecases. - - - MODEL_USECASE_UNKNOWN: The use case of the model is unknown - - MODEL_USECASE_AGENT: The model maybe used in an agent - - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning - - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases - (embedding models) - - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails - - MODEL_USECASE_REASONING: The model usecase for reasoning - - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -110,24 +76,36 @@ def list( timeout: Override the client-level default timeout for this request, in seconds """ + if not model: + raise ValueError(f"Expected a non-empty value for `model` but received {model!r}") return self._get( - "/v2/gen-ai/models" + f"/models/{model}" if self._client._base_url_overridden - else "https://api.digitalocean.com/v2/gen-ai/models", + else f"https://inference.do-ai.run/v1/models/{model}", options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=maybe_transform( - { - "page": page, - "per_page": per_page, - "public_only": public_only, - "usecases": usecases, - }, - model_list_params.ModelListParams, - ), + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ModelRetrieveResponse, + ) + + def list( + self, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ModelListResponse: + """ + Lists the currently available models, and provides basic information about each + one such as the owner and availability. + """ + return self._get( + "/models" if self._client._base_url_overridden else "https://inference.do-ai.run/v1/models", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=ModelListResponse, ) @@ -157,52 +135,22 @@ def with_streaming_response(self) -> AsyncModelsResourceWithStreamingResponse: """ return AsyncModelsResourceWithStreamingResponse(self) - async def list( + async def retrieve( self, + model: str, *, - page: int | NotGiven = NOT_GIVEN, - per_page: int | NotGiven = NOT_GIVEN, - public_only: bool | NotGiven = NOT_GIVEN, - usecases: List[ - Literal[ - "MODEL_USECASE_UNKNOWN", - "MODEL_USECASE_AGENT", - "MODEL_USECASE_FINETUNED", - "MODEL_USECASE_KNOWLEDGEBASE", - "MODEL_USECASE_GUARDRAIL", - "MODEL_USECASE_REASONING", - "MODEL_USECASE_SERVERLESS", - ] - ] - | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ModelListResponse: + ) -> ModelRetrieveResponse: """ - To list all models, send a GET request to `/v2/gen-ai/models`. + Retrieves a model instance, providing basic information about the model such as + the owner and permissioning. Args: - page: page number. - - per_page: items per page. - - public_only: only include models that are publicly available. - - usecases: include only models defined for the listed usecases. - - - MODEL_USECASE_UNKNOWN: The use case of the model is unknown - - MODEL_USECASE_AGENT: The model maybe used in an agent - - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning - - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases - (embedding models) - - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails - - MODEL_USECASE_REASONING: The model usecase for reasoning - - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -211,24 +159,36 @@ async def list( timeout: Override the client-level default timeout for this request, in seconds """ + if not model: + raise ValueError(f"Expected a non-empty value for `model` but received {model!r}") return await self._get( - "/v2/gen-ai/models" + f"/models/{model}" if self._client._base_url_overridden - else "https://api.digitalocean.com/v2/gen-ai/models", + else f"https://inference.do-ai.run/v1/models/{model}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ModelRetrieveResponse, + ) + + async def list( + self, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ModelListResponse: + """ + Lists the currently available models, and provides basic information about each + one such as the owner and availability. + """ + return await self._get( + "/models" if self._client._base_url_overridden else "https://inference.do-ai.run/v1/models", options=make_request_options( - extra_headers=extra_headers, - extra_query=extra_query, - extra_body=extra_body, - timeout=timeout, - query=await async_maybe_transform( - { - "page": page, - "per_page": per_page, - "public_only": public_only, - "usecases": usecases, - }, - model_list_params.ModelListParams, - ), + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=ModelListResponse, ) @@ -238,6 +198,9 @@ class ModelsResourceWithRawResponse: def __init__(self, models: ModelsResource) -> None: self._models = models + self.retrieve = to_raw_response_wrapper( + models.retrieve, + ) self.list = to_raw_response_wrapper( models.list, ) @@ -251,6 +214,9 @@ class AsyncModelsResourceWithRawResponse: def __init__(self, models: AsyncModelsResource) -> None: self._models = models + self.retrieve = async_to_raw_response_wrapper( + models.retrieve, + ) self.list = async_to_raw_response_wrapper( models.list, ) @@ -264,6 +230,9 @@ class ModelsResourceWithStreamingResponse: def __init__(self, models: ModelsResource) -> None: self._models = models + self.retrieve = to_streamed_response_wrapper( + models.retrieve, + ) self.list = to_streamed_response_wrapper( models.list, ) @@ -277,6 +246,9 @@ class AsyncModelsResourceWithStreamingResponse: def __init__(self, models: AsyncModelsResource) -> None: self._models = models + self.retrieve = async_to_streamed_response_wrapper( + models.retrieve, + ) self.list = async_to_streamed_response_wrapper( models.list, ) diff --git a/src/gradientai/resources/models/providers/anthropic.py b/src/gradientai/resources/models/providers/anthropic.py index 26c9b977..e570be51 100644 --- a/src/gradientai/resources/models/providers/anthropic.py +++ b/src/gradientai/resources/models/providers/anthropic.py @@ -68,6 +68,10 @@ def create( `/v2/gen-ai/anthropic/keys`. Args: + api_key: Anthropic API key + + name: Name of the key + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -148,6 +152,12 @@ def update( `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. Args: + api_key: Anthropic API key + + body_api_key_uuid: API key ID + + name: Name of the key + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -193,9 +203,9 @@ def list( `/v2/gen-ai/anthropic/keys`. Args: - page: page number. + page: Page number. - per_page: items per page. + per_page: Items per page. extra_headers: Send extra headers @@ -278,9 +288,9 @@ def list_agents( List Agents by Anthropic Key. Args: - page: page number. + page: Page number. - per_page: items per page. + per_page: Items per page. extra_headers: Send extra headers @@ -350,6 +360,10 @@ async def create( `/v2/gen-ai/anthropic/keys`. Args: + api_key: Anthropic API key + + name: Name of the key + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -430,6 +444,12 @@ async def update( `/v2/gen-ai/anthropic/keys/{api_key_uuid}`. Args: + api_key: Anthropic API key + + body_api_key_uuid: API key ID + + name: Name of the key + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -475,9 +495,9 @@ async def list( `/v2/gen-ai/anthropic/keys`. Args: - page: page number. + page: Page number. - per_page: items per page. + per_page: Items per page. extra_headers: Send extra headers @@ -560,9 +580,9 @@ async def list_agents( List Agents by Anthropic Key. Args: - page: page number. + page: Page number. - per_page: items per page. + per_page: Items per page. extra_headers: Send extra headers diff --git a/src/gradientai/resources/models/providers/openai.py b/src/gradientai/resources/models/providers/openai.py index d337cd9b..ccd594b8 100644 --- a/src/gradientai/resources/models/providers/openai.py +++ b/src/gradientai/resources/models/providers/openai.py @@ -67,6 +67,10 @@ def create( To create an OpenAI API key, send a POST request to `/v2/gen-ai/openai/keys`. Args: + api_key: OpenAI API key + + name: Name of the key + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -147,6 +151,12 @@ def update( `/v2/gen-ai/openai/keys/{api_key_uuid}`. Args: + api_key: OpenAI API key + + body_api_key_uuid: API key ID + + name: Name of the key + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -191,9 +201,9 @@ def list( To list all OpenAI API keys, send a GET request to `/v2/gen-ai/openai/keys`. Args: - page: page number. + page: Page number. - per_page: items per page. + per_page: Items per page. extra_headers: Send extra headers @@ -276,9 +286,9 @@ def retrieve_agents( List Agents by OpenAI Key. Args: - page: page number. + page: Page number. - per_page: items per page. + per_page: Items per page. extra_headers: Send extra headers @@ -347,6 +357,10 @@ async def create( To create an OpenAI API key, send a POST request to `/v2/gen-ai/openai/keys`. Args: + api_key: OpenAI API key + + name: Name of the key + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -427,6 +441,12 @@ async def update( `/v2/gen-ai/openai/keys/{api_key_uuid}`. Args: + api_key: OpenAI API key + + body_api_key_uuid: API key ID + + name: Name of the key + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -471,9 +491,9 @@ async def list( To list all OpenAI API keys, send a GET request to `/v2/gen-ai/openai/keys`. Args: - page: page number. + page: Page number. - per_page: items per page. + per_page: Items per page. extra_headers: Send extra headers @@ -556,9 +576,9 @@ async def retrieve_agents( List Agents by OpenAI Key. Args: - page: page number. + page: Page number. - per_page: items per page. + per_page: Items per page. extra_headers: Send extra headers diff --git a/src/gradientai/resources/regions.py b/src/gradientai/resources/regions.py index 4c50d9e6..e953e4f3 100644 --- a/src/gradientai/resources/regions.py +++ b/src/gradientai/resources/regions.py @@ -44,8 +44,8 @@ def with_streaming_response(self) -> RegionsResourceWithStreamingResponse: def list( self, *, - serves_batch: bool | NotGiven = NOT_GIVEN, - serves_inference: bool | NotGiven = NOT_GIVEN, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -54,12 +54,15 @@ def list( timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> RegionListResponse: """ - To list all datacenter regions, send a GET request to `/v2/gen-ai/regions`. + To list all of the regions that are available, send a GET request to + `/v2/regions`. The response will be a JSON object with a key called `regions`. + The value of this will be an array of `region` objects, each of which will + contain the standard region attributes. Args: - serves_batch: include datacenters that are capable of running batch jobs. + page: Which 'page' of paginated results to return. - serves_inference: include datacenters that serve inference. + per_page: Number of items returned per page extra_headers: Send extra headers @@ -70,9 +73,7 @@ def list( timeout: Override the client-level default timeout for this request, in seconds """ return self._get( - "/v2/gen-ai/regions" - if self._client._base_url_overridden - else "https://api.digitalocean.com/v2/gen-ai/regions", + "/v2/regions" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/regions", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -80,8 +81,8 @@ def list( timeout=timeout, query=maybe_transform( { - "serves_batch": serves_batch, - "serves_inference": serves_inference, + "page": page, + "per_page": per_page, }, region_list_params.RegionListParams, ), @@ -113,8 +114,8 @@ def with_streaming_response(self) -> AsyncRegionsResourceWithStreamingResponse: async def list( self, *, - serves_batch: bool | NotGiven = NOT_GIVEN, - serves_inference: bool | NotGiven = NOT_GIVEN, + page: int | NotGiven = NOT_GIVEN, + per_page: int | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -123,12 +124,15 @@ async def list( timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> RegionListResponse: """ - To list all datacenter regions, send a GET request to `/v2/gen-ai/regions`. + To list all of the regions that are available, send a GET request to + `/v2/regions`. The response will be a JSON object with a key called `regions`. + The value of this will be an array of `region` objects, each of which will + contain the standard region attributes. Args: - serves_batch: include datacenters that are capable of running batch jobs. + page: Which 'page' of paginated results to return. - serves_inference: include datacenters that serve inference. + per_page: Number of items returned per page extra_headers: Send extra headers @@ -139,9 +143,7 @@ async def list( timeout: Override the client-level default timeout for this request, in seconds """ return await self._get( - "/v2/gen-ai/regions" - if self._client._base_url_overridden - else "https://api.digitalocean.com/v2/gen-ai/regions", + "/v2/regions" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/regions", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, @@ -149,8 +151,8 @@ async def list( timeout=timeout, query=await async_maybe_transform( { - "serves_batch": serves_batch, - "serves_inference": serves_inference, + "page": page, + "per_page": per_page, }, region_list_params.RegionListParams, ), diff --git a/src/gradientai/types/__init__.py b/src/gradientai/types/__init__.py index c8144381..20747fb3 100644 --- a/src/gradientai/types/__init__.py +++ b/src/gradientai/types/__init__.py @@ -3,9 +3,32 @@ from __future__ import annotations from .shared import ( + Size as Size, + Image as Image, + Action as Action, + Kernel as Kernel, + Region as Region, APIMeta as APIMeta, + Droplet as Droplet, + GPUInfo as GPUInfo, APILinks as APILinks, + DiskInfo as DiskInfo, + NetworkV4 as NetworkV4, + NetworkV6 as NetworkV6, + PageLinks as PageLinks, + Snapshots as Snapshots, + ActionLink as ActionLink, + VpcPeering as VpcPeering, + ForwardLinks as ForwardLinks, + Subscription as Subscription, + BackwardLinks as BackwardLinks, + MetaProperties as MetaProperties, + CompletionUsage as CompletionUsage, + GarbageCollection as GarbageCollection, + FirewallRuleTarget as FirewallRuleTarget, ChatCompletionChunk as ChatCompletionChunk, + SubscriptionTierBase as SubscriptionTierBase, + DropletNextBackupWindow as DropletNextBackupWindow, ChatCompletionTokenLogprob as ChatCompletionTokenLogprob, ) from .api_agent import APIAgent as APIAgent @@ -15,7 +38,6 @@ from .api_agent_model import APIAgentModel as APIAgentModel from .agent_list_params import AgentListParams as AgentListParams from .api_model_version import APIModelVersion as APIModelVersion -from .model_list_params import ModelListParams as ModelListParams from .api_knowledge_base import APIKnowledgeBase as APIKnowledgeBase from .region_list_params import RegionListParams as RegionListParams from .agent_create_params import AgentCreateParams as AgentCreateParams @@ -27,18 +49,34 @@ from .agent_create_response import AgentCreateResponse as AgentCreateResponse from .agent_delete_response import AgentDeleteResponse as AgentDeleteResponse from .agent_update_response import AgentUpdateResponse as AgentUpdateResponse +from .droplet_backup_policy import DropletBackupPolicy as DropletBackupPolicy from .api_agent_api_key_info import APIAgentAPIKeyInfo as APIAgentAPIKeyInfo from .agent_retrieve_response import AgentRetrieveResponse as AgentRetrieveResponse from .api_openai_api_key_info import APIOpenAIAPIKeyInfo as APIOpenAIAPIKeyInfo +from .gpu_droplet_list_params import GPUDropletListParams as GPUDropletListParams +from .model_retrieve_response import ModelRetrieveResponse as ModelRetrieveResponse from .api_deployment_visibility import APIDeploymentVisibility as APIDeploymentVisibility +from .gpu_droplet_create_params import GPUDropletCreateParams as GPUDropletCreateParams +from .gpu_droplet_list_response import GPUDropletListResponse as GPUDropletListResponse from .agent_update_status_params import AgentUpdateStatusParams as AgentUpdateStatusParams from .api_anthropic_api_key_info import APIAnthropicAPIKeyInfo as APIAnthropicAPIKeyInfo from .knowledge_base_list_params import KnowledgeBaseListParams as KnowledgeBaseListParams +from .droplet_backup_policy_param import DropletBackupPolicyParam as DropletBackupPolicyParam +from .gpu_droplet_create_response import GPUDropletCreateResponse as GPUDropletCreateResponse from .agent_update_status_response import AgentUpdateStatusResponse as AgentUpdateStatusResponse from .knowledge_base_create_params import KnowledgeBaseCreateParams as KnowledgeBaseCreateParams from .knowledge_base_list_response import KnowledgeBaseListResponse as KnowledgeBaseListResponse from .knowledge_base_update_params import KnowledgeBaseUpdateParams as KnowledgeBaseUpdateParams +from .gpu_droplet_retrieve_response import GPUDropletRetrieveResponse as GPUDropletRetrieveResponse from .knowledge_base_create_response import KnowledgeBaseCreateResponse as KnowledgeBaseCreateResponse from .knowledge_base_delete_response import KnowledgeBaseDeleteResponse as KnowledgeBaseDeleteResponse from .knowledge_base_update_response import KnowledgeBaseUpdateResponse as KnowledgeBaseUpdateResponse +from .gpu_droplet_list_kernels_params import GPUDropletListKernelsParams as GPUDropletListKernelsParams +from .gpu_droplet_delete_by_tag_params import GPUDropletDeleteByTagParams as GPUDropletDeleteByTagParams from .knowledge_base_retrieve_response import KnowledgeBaseRetrieveResponse as KnowledgeBaseRetrieveResponse +from .gpu_droplet_list_firewalls_params import GPUDropletListFirewallsParams as GPUDropletListFirewallsParams +from .gpu_droplet_list_kernels_response import GPUDropletListKernelsResponse as GPUDropletListKernelsResponse +from .gpu_droplet_list_snapshots_params import GPUDropletListSnapshotsParams as GPUDropletListSnapshotsParams +from .gpu_droplet_list_firewalls_response import GPUDropletListFirewallsResponse as GPUDropletListFirewallsResponse +from .gpu_droplet_list_neighbors_response import GPUDropletListNeighborsResponse as GPUDropletListNeighborsResponse +from .gpu_droplet_list_snapshots_response import GPUDropletListSnapshotsResponse as GPUDropletListSnapshotsResponse diff --git a/src/gradientai/types/agent_create_params.py b/src/gradientai/types/agent_create_params.py index 58b99df7..68ebd227 100644 --- a/src/gradientai/types/agent_create_params.py +++ b/src/gradientai/types/agent_create_params.py @@ -12,8 +12,10 @@ class AgentCreateParams(TypedDict, total=False): anthropic_key_uuid: str + """Optional Anthropic API key ID to use with Anthropic models""" description: str + """A text description of the agent, not used in inference""" instruction: str """Agent instruction. @@ -24,16 +26,22 @@ class AgentCreateParams(TypedDict, total=False): """ knowledge_base_uuid: List[str] + """Ids of the knowledge base(s) to attach to the agent""" model_uuid: str """Identifier for the foundation model.""" name: str + """Agent name""" openai_key_uuid: Annotated[str, PropertyInfo(alias="open_ai_key_uuid")] + """Optional OpenAI API key ID to use with OpenAI models""" project_id: str + """The id of the DigitalOcean project this agent will belong to""" region: str + """The DigitalOcean region to deploy your agent in""" tags: List[str] + """Agent tag to organize related resources""" diff --git a/src/gradientai/types/agent_create_response.py b/src/gradientai/types/agent_create_response.py index 48545fe9..edd48b7d 100644 --- a/src/gradientai/types/agent_create_response.py +++ b/src/gradientai/types/agent_create_response.py @@ -11,6 +11,7 @@ class AgentCreateResponse(BaseModel): agent: Optional["APIAgent"] = None + """An Agent""" from .api_agent import APIAgent diff --git a/src/gradientai/types/agent_delete_response.py b/src/gradientai/types/agent_delete_response.py index eb1d440d..8c2b2e14 100644 --- a/src/gradientai/types/agent_delete_response.py +++ b/src/gradientai/types/agent_delete_response.py @@ -11,6 +11,7 @@ class AgentDeleteResponse(BaseModel): agent: Optional["APIAgent"] = None + """An Agent""" from .api_agent import APIAgent diff --git a/src/gradientai/types/agent_list_params.py b/src/gradientai/types/agent_list_params.py index e13a10c9..b56d0395 100644 --- a/src/gradientai/types/agent_list_params.py +++ b/src/gradientai/types/agent_list_params.py @@ -9,10 +9,10 @@ class AgentListParams(TypedDict, total=False): only_deployed: bool - """only list agents that are deployed.""" + """Only list agents that are deployed.""" page: int - """page number.""" + """Page number.""" per_page: int - """items per page.""" + """Items per page.""" diff --git a/src/gradientai/types/agent_list_response.py b/src/gradientai/types/agent_list_response.py index 397d9fd2..7a64c66e 100644 --- a/src/gradientai/types/agent_list_response.py +++ b/src/gradientai/types/agent_list_response.py @@ -29,6 +29,7 @@ class AgentChatbot(BaseModel): logo: Optional[str] = None name: Optional[str] = None + """Name of chatbot""" primary_color: Optional[str] = None @@ -39,12 +40,15 @@ class AgentChatbot(BaseModel): class AgentChatbotIdentifier(BaseModel): agent_chatbot_identifier: Optional[str] = None + """Agent chatbot identifier""" class AgentDeployment(BaseModel): created_at: Optional[datetime] = None + """Creation date / time""" name: Optional[str] = None + """Name""" status: Optional[ Literal[ @@ -61,70 +65,112 @@ class AgentDeployment(BaseModel): ] = None updated_at: Optional[datetime] = None + """Last modified""" url: Optional[str] = None + """Access your deployed agent here""" uuid: Optional[str] = None + """Unique id""" visibility: Optional[APIDeploymentVisibility] = None + """ + - VISIBILITY_UNKNOWN: The status of the deployment is unknown + - VISIBILITY_DISABLED: The deployment is disabled and will no longer service + requests + - VISIBILITY_PLAYGROUND: Deprecated: No longer a valid state + - VISIBILITY_PUBLIC: The deployment is public and will service requests from the + public internet + - VISIBILITY_PRIVATE: The deployment is private and will only service requests + from other agents, or through API keys + """ class AgentTemplateGuardrail(BaseModel): priority: Optional[int] = None + """Priority of the guardrail""" uuid: Optional[str] = None + """Uuid of the guardrail""" class AgentTemplate(BaseModel): created_at: Optional[datetime] = None + """The agent template's creation date""" description: Optional[str] = None + """Deprecated - Use summary instead""" guardrails: Optional[List[AgentTemplateGuardrail]] = None + """List of guardrails associated with the agent template""" instruction: Optional[str] = None + """Instructions for the agent template""" k: Optional[int] = None + """The 'k' value for the agent template""" knowledge_bases: Optional[List[APIKnowledgeBase]] = None + """List of knowledge bases associated with the agent template""" long_description: Optional[str] = None + """The long description of the agent template""" max_tokens: Optional[int] = None + """The max_tokens setting for the agent template""" model: Optional[APIAgentModel] = None + """Description of a Model""" name: Optional[str] = None + """Name of the agent template""" short_description: Optional[str] = None + """The short description of the agent template""" summary: Optional[str] = None + """The summary of the agent template""" tags: Optional[List[str]] = None + """List of tags associated with the agent template""" temperature: Optional[float] = None + """The temperature setting for the agent template""" template_type: Optional[Literal["AGENT_TEMPLATE_TYPE_STANDARD", "AGENT_TEMPLATE_TYPE_ONE_CLICK"]] = None + """ + - AGENT_TEMPLATE_TYPE_STANDARD: The standard agent template + - AGENT_TEMPLATE_TYPE_ONE_CLICK: The one click agent template + """ top_p: Optional[float] = None + """The top_p setting for the agent template""" updated_at: Optional[datetime] = None + """The agent template's last updated date""" uuid: Optional[str] = None + """Unique id""" class Agent(BaseModel): chatbot: Optional[AgentChatbot] = None + """A Chatbot""" chatbot_identifiers: Optional[List[AgentChatbotIdentifier]] = None + """Chatbot identifiers""" created_at: Optional[datetime] = None + """Creation date / time""" deployment: Optional[AgentDeployment] = None + """Description of deployment""" description: Optional[str] = None + """Description of agent""" if_case: Optional[str] = None + """Instructions to the agent on how to use the route""" instruction: Optional[str] = None """Agent instruction. @@ -135,6 +181,7 @@ class Agent(BaseModel): """ k: Optional[int] = None + """How many results should be considered from an attached knowledge base""" max_tokens: Optional[int] = None """ @@ -144,26 +191,43 @@ class Agent(BaseModel): """ model: Optional[APIAgentModel] = None + """Description of a Model""" name: Optional[str] = None + """Agent name""" project_id: Optional[str] = None + """The DigitalOcean project ID associated with the agent""" provide_citations: Optional[bool] = None + """Whether the agent should provide in-response citations""" region: Optional[str] = None + """Region code""" retrieval_method: Optional[APIRetrievalMethod] = None + """ + - RETRIEVAL_METHOD_UNKNOWN: The retrieval method is unknown + - RETRIEVAL_METHOD_REWRITE: The retrieval method is rewrite + - RETRIEVAL_METHOD_STEP_BACK: The retrieval method is step back + - RETRIEVAL_METHOD_SUB_QUERIES: The retrieval method is sub queries + - RETRIEVAL_METHOD_NONE: The retrieval method is none + """ route_created_at: Optional[datetime] = None + """Creation of route date / time""" route_created_by: Optional[str] = None + """Id of user that created the route""" route_name: Optional[str] = None + """Route name""" route_uuid: Optional[str] = None + """Route uuid""" tags: Optional[List[str]] = None + """A set of abitrary tags to organize your agent""" temperature: Optional[float] = None """Controls the model’s creativity, specified as a number between 0 and 1. @@ -173,6 +237,7 @@ class Agent(BaseModel): """ template: Optional[AgentTemplate] = None + """Represents an AgentTemplate entity""" top_p: Optional[float] = None """ @@ -182,17 +247,27 @@ class Agent(BaseModel): """ updated_at: Optional[datetime] = None + """Last modified""" url: Optional[str] = None + """Access your agent under this url""" user_id: Optional[str] = None + """Id of user that created the agent""" uuid: Optional[str] = None + """Unique agent id""" + + version_hash: Optional[str] = None + """The latest version of the agent""" class AgentListResponse(BaseModel): agents: Optional[List[Agent]] = None + """Agents""" links: Optional[APILinks] = None + """Links to other pages""" meta: Optional[APIMeta] = None + """Meta information about the data set""" diff --git a/src/gradientai/types/agent_retrieve_response.py b/src/gradientai/types/agent_retrieve_response.py index 2eed88af..2836558b 100644 --- a/src/gradientai/types/agent_retrieve_response.py +++ b/src/gradientai/types/agent_retrieve_response.py @@ -11,6 +11,7 @@ class AgentRetrieveResponse(BaseModel): agent: Optional["APIAgent"] = None + """An Agent""" from .api_agent import APIAgent diff --git a/src/gradientai/types/agent_update_params.py b/src/gradientai/types/agent_update_params.py index 85f9a9c2..5d2b5597 100644 --- a/src/gradientai/types/agent_update_params.py +++ b/src/gradientai/types/agent_update_params.py @@ -13,8 +13,13 @@ class AgentUpdateParams(TypedDict, total=False): anthropic_key_uuid: str + """Optional anthropic key uuid for use with anthropic models""" + + conversation_logs_enabled: bool + """Optional update of conversation logs enabled""" description: str + """Agent description""" instruction: str """Agent instruction. @@ -25,6 +30,7 @@ class AgentUpdateParams(TypedDict, total=False): """ k: int + """How many results should be considered from an attached knowledge base""" max_tokens: int """ @@ -37,16 +43,27 @@ class AgentUpdateParams(TypedDict, total=False): """Identifier for the foundation model.""" name: str + """Agent name""" openai_key_uuid: Annotated[str, PropertyInfo(alias="open_ai_key_uuid")] + """Optional OpenAI key uuid for use with OpenAI models""" project_id: str + """The id of the DigitalOcean project this agent will belong to""" provide_citations: bool retrieval_method: APIRetrievalMethod + """ + - RETRIEVAL_METHOD_UNKNOWN: The retrieval method is unknown + - RETRIEVAL_METHOD_REWRITE: The retrieval method is rewrite + - RETRIEVAL_METHOD_STEP_BACK: The retrieval method is step back + - RETRIEVAL_METHOD_SUB_QUERIES: The retrieval method is sub queries + - RETRIEVAL_METHOD_NONE: The retrieval method is none + """ tags: List[str] + """A set of abitrary tags to organize your agent""" temperature: float """Controls the model’s creativity, specified as a number between 0 and 1. @@ -63,3 +80,4 @@ class AgentUpdateParams(TypedDict, total=False): """ body_uuid: Annotated[str, PropertyInfo(alias="uuid")] + """Unique agent id""" diff --git a/src/gradientai/types/agent_update_response.py b/src/gradientai/types/agent_update_response.py index 2948aa1c..1976089b 100644 --- a/src/gradientai/types/agent_update_response.py +++ b/src/gradientai/types/agent_update_response.py @@ -11,6 +11,7 @@ class AgentUpdateResponse(BaseModel): agent: Optional["APIAgent"] = None + """An Agent""" from .api_agent import APIAgent diff --git a/src/gradientai/types/agent_update_status_params.py b/src/gradientai/types/agent_update_status_params.py index a0cdc0b9..3f16fdc2 100644 --- a/src/gradientai/types/agent_update_status_params.py +++ b/src/gradientai/types/agent_update_status_params.py @@ -12,5 +12,16 @@ class AgentUpdateStatusParams(TypedDict, total=False): body_uuid: Annotated[str, PropertyInfo(alias="uuid")] + """Unique id""" visibility: APIDeploymentVisibility + """ + - VISIBILITY_UNKNOWN: The status of the deployment is unknown + - VISIBILITY_DISABLED: The deployment is disabled and will no longer service + requests + - VISIBILITY_PLAYGROUND: Deprecated: No longer a valid state + - VISIBILITY_PUBLIC: The deployment is public and will service requests from the + public internet + - VISIBILITY_PRIVATE: The deployment is private and will only service requests + from other agents, or through API keys + """ diff --git a/src/gradientai/types/agent_update_status_response.py b/src/gradientai/types/agent_update_status_response.py index b200f99d..84457d85 100644 --- a/src/gradientai/types/agent_update_status_response.py +++ b/src/gradientai/types/agent_update_status_response.py @@ -11,6 +11,7 @@ class AgentUpdateStatusResponse(BaseModel): agent: Optional["APIAgent"] = None + """An Agent""" from .api_agent import APIAgent diff --git a/src/gradientai/types/agents/__init__.py b/src/gradientai/types/agents/__init__.py index 9c6508f6..39b82ebc 100644 --- a/src/gradientai/types/agents/__init__.py +++ b/src/gradientai/types/agents/__init__.py @@ -40,6 +40,7 @@ from .evaluation_dataset_create_params import EvaluationDatasetCreateParams as EvaluationDatasetCreateParams from .evaluation_run_retrieve_response import EvaluationRunRetrieveResponse as EvaluationRunRetrieveResponse from .evaluation_dataset_create_response import EvaluationDatasetCreateResponse as EvaluationDatasetCreateResponse +from .evaluation_run_list_results_params import EvaluationRunListResultsParams as EvaluationRunListResultsParams from .evaluation_test_case_create_params import EvaluationTestCaseCreateParams as EvaluationTestCaseCreateParams from .evaluation_test_case_list_response import EvaluationTestCaseListResponse as EvaluationTestCaseListResponse from .evaluation_test_case_update_params import EvaluationTestCaseUpdateParams as EvaluationTestCaseUpdateParams @@ -47,9 +48,15 @@ from .evaluation_test_case_create_response import EvaluationTestCaseCreateResponse as EvaluationTestCaseCreateResponse from .evaluation_test_case_retrieve_params import EvaluationTestCaseRetrieveParams as EvaluationTestCaseRetrieveParams from .evaluation_test_case_update_response import EvaluationTestCaseUpdateResponse as EvaluationTestCaseUpdateResponse +from .evaluation_metric_list_regions_params import ( + EvaluationMetricListRegionsParams as EvaluationMetricListRegionsParams, +) from .evaluation_test_case_retrieve_response import ( EvaluationTestCaseRetrieveResponse as EvaluationTestCaseRetrieveResponse, ) +from .evaluation_metric_list_regions_response import ( + EvaluationMetricListRegionsResponse as EvaluationMetricListRegionsResponse, +) from .evaluation_run_retrieve_results_response import ( EvaluationRunRetrieveResultsResponse as EvaluationRunRetrieveResultsResponse, ) diff --git a/src/gradientai/types/agents/api_evaluation_metric.py b/src/gradientai/types/agents/api_evaluation_metric.py index 1aa85306..2d3b4194 100644 --- a/src/gradientai/types/agents/api_evaluation_metric.py +++ b/src/gradientai/types/agents/api_evaluation_metric.py @@ -11,6 +11,9 @@ class APIEvaluationMetric(BaseModel): description: Optional[str] = None + inverted: Optional[bool] = None + """If true, the metric is inverted, meaning that a lower value is better.""" + metric_name: Optional[str] = None metric_type: Optional[ @@ -20,5 +23,16 @@ class APIEvaluationMetric(BaseModel): metric_uuid: Optional[str] = None metric_value_type: Optional[ - Literal["METRIC_VALUE_TYPE_UNSPECIFIED", "METRIC_VALUE_TYPE_NUMBER", "METRIC_VALUE_TYPE_STRING"] + Literal[ + "METRIC_VALUE_TYPE_UNSPECIFIED", + "METRIC_VALUE_TYPE_NUMBER", + "METRIC_VALUE_TYPE_STRING", + "METRIC_VALUE_TYPE_PERCENTAGE", + ] ] = None + + range_max: Optional[float] = None + """The maximum value for the metric.""" + + range_min: Optional[float] = None + """The minimum value for the metric.""" diff --git a/src/gradientai/types/agents/api_evaluation_metric_result.py b/src/gradientai/types/agents/api_evaluation_metric_result.py index 35146c00..3d6ea84f 100644 --- a/src/gradientai/types/agents/api_evaluation_metric_result.py +++ b/src/gradientai/types/agents/api_evaluation_metric_result.py @@ -1,6 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from typing import Optional +from typing_extensions import Literal from ..._models import BaseModel @@ -8,10 +9,26 @@ class APIEvaluationMetricResult(BaseModel): + error_description: Optional[str] = None + """Error description if the metric could not be calculated.""" + metric_name: Optional[str] = None + """Metric name""" + + metric_value_type: Optional[ + Literal[ + "METRIC_VALUE_TYPE_UNSPECIFIED", + "METRIC_VALUE_TYPE_NUMBER", + "METRIC_VALUE_TYPE_STRING", + "METRIC_VALUE_TYPE_PERCENTAGE", + ] + ] = None number_value: Optional[float] = None """The value of the metric as a number.""" + reasoning: Optional[str] = None + """Reasoning of the metric result.""" + string_value: Optional[str] = None """The value of the metric as a string.""" diff --git a/src/gradientai/types/agents/api_evaluation_prompt.py b/src/gradientai/types/agents/api_evaluation_prompt.py index 750e62fb..7471e9ae 100644 --- a/src/gradientai/types/agents/api_evaluation_prompt.py +++ b/src/gradientai/types/agents/api_evaluation_prompt.py @@ -31,12 +31,19 @@ class APIEvaluationPrompt(BaseModel): input: Optional[str] = None + input_tokens: Optional[str] = None + """The number of input tokens used in the prompt.""" + output: Optional[str] = None + output_tokens: Optional[str] = None + """The number of output tokens used in the prompt.""" + prompt_chunks: Optional[List[PromptChunk]] = None """The list of prompt chunks.""" prompt_id: Optional[int] = None + """Prompt ID""" prompt_level_metric_results: Optional[List[APIEvaluationMetricResult]] = None """The metric results for the prompt.""" diff --git a/src/gradientai/types/agents/api_evaluation_run.py b/src/gradientai/types/agents/api_evaluation_run.py index b879f756..5a758898 100644 --- a/src/gradientai/types/agents/api_evaluation_run.py +++ b/src/gradientai/types/agents/api_evaluation_run.py @@ -12,31 +12,42 @@ class APIEvaluationRun(BaseModel): agent_deleted: Optional[bool] = None + """Whether agent is deleted""" agent_name: Optional[str] = None + """Agent name""" agent_uuid: Optional[str] = None """Agent UUID.""" agent_version_hash: Optional[str] = None + """Version hash""" agent_workspace_uuid: Optional[str] = None + """Agent workspace uuid""" created_by_user_email: Optional[str] = None created_by_user_id: Optional[str] = None error_description: Optional[str] = None + """The error description""" evaluation_run_uuid: Optional[str] = None """Evaluation run UUID.""" + evaluation_test_case_workspace_uuid: Optional[str] = None + """Evaluation test case workspace uuid""" + finished_at: Optional[datetime] = None """Run end time.""" pass_status: Optional[bool] = None """The pass status of the evaluation run based on the star metric.""" + queued_at: Optional[datetime] = None + """Run queued time.""" + run_level_metric_results: Optional[List[APIEvaluationMetricResult]] = None run_name: Optional[str] = None @@ -60,6 +71,13 @@ class APIEvaluationRun(BaseModel): "EVALUATION_RUN_FAILED", ] ] = None + """Evaluation Run Statuses""" + + test_case_description: Optional[str] = None + """Test case description.""" + + test_case_name: Optional[str] = None + """Test case name.""" test_case_uuid: Optional[str] = None """Test-case UUID.""" diff --git a/src/gradientai/types/agents/api_evaluation_test_case.py b/src/gradientai/types/agents/api_evaluation_test_case.py index 09ce5e48..dc4c55f0 100644 --- a/src/gradientai/types/agents/api_evaluation_test_case.py +++ b/src/gradientai/types/agents/api_evaluation_test_case.py @@ -7,7 +7,27 @@ from .api_star_metric import APIStarMetric from .api_evaluation_metric import APIEvaluationMetric -__all__ = ["APIEvaluationTestCase"] +__all__ = ["APIEvaluationTestCase", "Dataset"] + + +class Dataset(BaseModel): + created_at: Optional[datetime] = None + """Time created at.""" + + dataset_name: Optional[str] = None + """Name of the dataset.""" + + dataset_uuid: Optional[str] = None + """UUID of the dataset.""" + + file_size: Optional[str] = None + """The size of the dataset uploaded file in bytes.""" + + has_ground_truth: Optional[bool] = None + """Does the dataset have a ground truth column?""" + + row_count: Optional[int] = None + """Number of rows in the dataset.""" class APIEvaluationTestCase(BaseModel): @@ -19,6 +39,8 @@ class APIEvaluationTestCase(BaseModel): created_by_user_id: Optional[str] = None + dataset: Optional[Dataset] = None + dataset_name: Optional[str] = None dataset_uuid: Optional[str] = None diff --git a/src/gradientai/types/agents/api_key_create_params.py b/src/gradientai/types/agents/api_key_create_params.py index c3fc44cd..184c330c 100644 --- a/src/gradientai/types/agents/api_key_create_params.py +++ b/src/gradientai/types/agents/api_key_create_params.py @@ -11,5 +11,7 @@ class APIKeyCreateParams(TypedDict, total=False): body_agent_uuid: Annotated[str, PropertyInfo(alias="agent_uuid")] + """Agent id""" name: str + """A human friendly name to identify the key""" diff --git a/src/gradientai/types/agents/api_key_create_response.py b/src/gradientai/types/agents/api_key_create_response.py index 09689fe7..ed8906c8 100644 --- a/src/gradientai/types/agents/api_key_create_response.py +++ b/src/gradientai/types/agents/api_key_create_response.py @@ -10,3 +10,4 @@ class APIKeyCreateResponse(BaseModel): api_key_info: Optional[APIAgentAPIKeyInfo] = None + """Agent API Key Info""" diff --git a/src/gradientai/types/agents/api_key_delete_response.py b/src/gradientai/types/agents/api_key_delete_response.py index 02b03f61..1f38c52e 100644 --- a/src/gradientai/types/agents/api_key_delete_response.py +++ b/src/gradientai/types/agents/api_key_delete_response.py @@ -10,3 +10,4 @@ class APIKeyDeleteResponse(BaseModel): api_key_info: Optional[APIAgentAPIKeyInfo] = None + """Agent API Key Info""" diff --git a/src/gradientai/types/agents/api_key_list_params.py b/src/gradientai/types/agents/api_key_list_params.py index 11da9398..1f8f96b7 100644 --- a/src/gradientai/types/agents/api_key_list_params.py +++ b/src/gradientai/types/agents/api_key_list_params.py @@ -9,7 +9,7 @@ class APIKeyListParams(TypedDict, total=False): page: int - """page number.""" + """Page number.""" per_page: int - """items per page.""" + """Items per page.""" diff --git a/src/gradientai/types/agents/api_key_list_response.py b/src/gradientai/types/agents/api_key_list_response.py index aedb88ca..0040e91c 100644 --- a/src/gradientai/types/agents/api_key_list_response.py +++ b/src/gradientai/types/agents/api_key_list_response.py @@ -12,7 +12,10 @@ class APIKeyListResponse(BaseModel): api_key_infos: Optional[List[APIAgentAPIKeyInfo]] = None + """Api key infos""" links: Optional[APILinks] = None + """Links to other pages""" meta: Optional[APIMeta] = None + """Meta information about the data set""" diff --git a/src/gradientai/types/agents/api_key_regenerate_response.py b/src/gradientai/types/agents/api_key_regenerate_response.py index ea2f761e..400140fb 100644 --- a/src/gradientai/types/agents/api_key_regenerate_response.py +++ b/src/gradientai/types/agents/api_key_regenerate_response.py @@ -10,3 +10,4 @@ class APIKeyRegenerateResponse(BaseModel): api_key_info: Optional[APIAgentAPIKeyInfo] = None + """Agent API Key Info""" diff --git a/src/gradientai/types/agents/api_key_update_params.py b/src/gradientai/types/agents/api_key_update_params.py index b49ebb38..ba997a2f 100644 --- a/src/gradientai/types/agents/api_key_update_params.py +++ b/src/gradientai/types/agents/api_key_update_params.py @@ -13,7 +13,10 @@ class APIKeyUpdateParams(TypedDict, total=False): path_agent_uuid: Required[Annotated[str, PropertyInfo(alias="agent_uuid")]] body_agent_uuid: Annotated[str, PropertyInfo(alias="agent_uuid")] + """Agent id""" body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")] + """API key ID""" name: str + """Name""" diff --git a/src/gradientai/types/agents/api_key_update_response.py b/src/gradientai/types/agents/api_key_update_response.py index 87442329..56154b16 100644 --- a/src/gradientai/types/agents/api_key_update_response.py +++ b/src/gradientai/types/agents/api_key_update_response.py @@ -10,3 +10,4 @@ class APIKeyUpdateResponse(BaseModel): api_key_info: Optional[APIAgentAPIKeyInfo] = None + """Agent API Key Info""" diff --git a/src/gradientai/types/agents/api_link_knowledge_base_output.py b/src/gradientai/types/agents/api_link_knowledge_base_output.py index a38f021b..2e7cec1e 100644 --- a/src/gradientai/types/agents/api_link_knowledge_base_output.py +++ b/src/gradientai/types/agents/api_link_knowledge_base_output.py @@ -11,6 +11,7 @@ class APILinkKnowledgeBaseOutput(BaseModel): agent: Optional["APIAgent"] = None + """An Agent""" from ..api_agent import APIAgent diff --git a/src/gradientai/types/agents/api_star_metric.py b/src/gradientai/types/agents/api_star_metric.py index c9ecc60a..0d04dea9 100644 --- a/src/gradientai/types/agents/api_star_metric.py +++ b/src/gradientai/types/agents/api_star_metric.py @@ -12,6 +12,12 @@ class APIStarMetric(BaseModel): name: Optional[str] = None + success_threshold: Optional[float] = None + """ + The success threshold for the star metric. This is a value that the metric must + reach to be considered successful. + """ + success_threshold_pct: Optional[int] = None """ The success threshold for the star metric. This is a percentage value between 0 diff --git a/src/gradientai/types/agents/api_star_metric_param.py b/src/gradientai/types/agents/api_star_metric_param.py index 5f7b2fd9..781fb2b1 100644 --- a/src/gradientai/types/agents/api_star_metric_param.py +++ b/src/gradientai/types/agents/api_star_metric_param.py @@ -12,6 +12,12 @@ class APIStarMetricParam(TypedDict, total=False): name: str + success_threshold: float + """ + The success threshold for the star metric. This is a value that the metric must + reach to be considered successful. + """ + success_threshold_pct: int """ The success threshold for the star metric. This is a percentage value between 0 diff --git a/src/gradientai/types/agents/chat/completion_create_params.py b/src/gradientai/types/agents/chat/completion_create_params.py index ec5c6b70..aaec2ba5 100644 --- a/src/gradientai/types/agents/chat/completion_create_params.py +++ b/src/gradientai/types/agents/chat/completion_create_params.py @@ -12,7 +12,15 @@ "MessageChatCompletionRequestDeveloperMessage", "MessageChatCompletionRequestUserMessage", "MessageChatCompletionRequestAssistantMessage", + "MessageChatCompletionRequestAssistantMessageToolCall", + "MessageChatCompletionRequestAssistantMessageToolCallFunction", + "MessageChatCompletionRequestToolMessage", "StreamOptions", + "ToolChoice", + "ToolChoiceChatCompletionNamedToolChoice", + "ToolChoiceChatCompletionNamedToolChoiceFunction", + "Tool", + "ToolFunction", "CompletionCreateParamsNonStreaming", "CompletionCreateParamsStreaming", ] @@ -105,6 +113,25 @@ class CompletionCreateParamsBase(TypedDict, total=False): this or `top_p` but not both. """ + tool_choice: ToolChoice + """ + Controls which (if any) tool is called by the model. `none` means the model will + not call any tool and instead generates a message. `auto` means the model can + pick between generating a message or calling one or more tools. `required` means + the model must call one or more tools. Specifying a particular tool via + `{"type": "function", "function": {"name": "my_function"}}` forces the model to + call that tool. + + `none` is the default when no tools are present. `auto` is the default if tools + are present. + """ + + tools: Iterable[Tool] + """A list of tools the model may call. + + Currently, only functions are supported as a tool. + """ + top_logprobs: Optional[int] """ An integer between 0 and 20 specifying the number of most likely tokens to @@ -152,6 +179,30 @@ class MessageChatCompletionRequestUserMessage(TypedDict, total=False): """The role of the messages author, in this case `user`.""" +class MessageChatCompletionRequestAssistantMessageToolCallFunction(TypedDict, total=False): + arguments: Required[str] + """ + The arguments to call the function with, as generated by the model in JSON + format. Note that the model does not always generate valid JSON, and may + hallucinate parameters not defined by your function schema. Validate the + arguments in your code before calling your function. + """ + + name: Required[str] + """The name of the function to call.""" + + +class MessageChatCompletionRequestAssistantMessageToolCall(TypedDict, total=False): + id: Required[str] + """The ID of the tool call.""" + + function: Required[MessageChatCompletionRequestAssistantMessageToolCallFunction] + """The function that the model called.""" + + type: Required[Literal["function"]] + """The type of the tool. Currently, only `function` is supported.""" + + class MessageChatCompletionRequestAssistantMessage(TypedDict, total=False): role: Required[Literal["assistant"]] """The role of the messages author, in this case `assistant`.""" @@ -159,12 +210,27 @@ class MessageChatCompletionRequestAssistantMessage(TypedDict, total=False): content: Union[str, List[str], None] """The contents of the assistant message.""" + tool_calls: Iterable[MessageChatCompletionRequestAssistantMessageToolCall] + """The tool calls generated by the model, such as function calls.""" + + +class MessageChatCompletionRequestToolMessage(TypedDict, total=False): + content: Required[str] + """The contents of the tool message.""" + + role: Required[Literal["tool"]] + """The role of the messages author, in this case `tool`.""" + + tool_call_id: Required[str] + """Tool call that this message is responding to.""" + Message: TypeAlias = Union[ MessageChatCompletionRequestSystemMessage, MessageChatCompletionRequestDeveloperMessage, MessageChatCompletionRequestUserMessage, MessageChatCompletionRequestAssistantMessage, + MessageChatCompletionRequestToolMessage, ] @@ -181,6 +247,53 @@ class StreamOptions(TypedDict, total=False): """ +class ToolChoiceChatCompletionNamedToolChoiceFunction(TypedDict, total=False): + name: Required[str] + """The name of the function to call.""" + + +class ToolChoiceChatCompletionNamedToolChoice(TypedDict, total=False): + function: Required[ToolChoiceChatCompletionNamedToolChoiceFunction] + + type: Required[Literal["function"]] + """The type of the tool. Currently, only `function` is supported.""" + + +ToolChoice: TypeAlias = Union[Literal["none", "auto", "required"], ToolChoiceChatCompletionNamedToolChoice] + + +class ToolFunction(TypedDict, total=False): + name: Required[str] + """The name of the function to be called. + + Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length + of 64. + """ + + description: str + """ + A description of what the function does, used by the model to choose when and + how to call the function. + """ + + parameters: Dict[str, object] + """The parameters the functions accepts, described as a JSON Schema object. + + See the [guide](/docs/guides/function-calling) for examples, and the + [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for + documentation about the format. + + Omitting `parameters` defines a function with an empty parameter list. + """ + + +class Tool(TypedDict, total=False): + function: Required[ToolFunction] + + type: Required[Literal["function"]] + """The type of the tool. Currently, only `function` is supported.""" + + class CompletionCreateParamsNonStreaming(CompletionCreateParamsBase, total=False): stream: Optional[Literal[False]] """ diff --git a/src/gradientai/types/agents/chat/completion_create_response.py b/src/gradientai/types/agents/chat/completion_create_response.py index f2860c31..4c839ded 100644 --- a/src/gradientai/types/agents/chat/completion_create_response.py +++ b/src/gradientai/types/agents/chat/completion_create_response.py @@ -4,9 +4,17 @@ from typing_extensions import Literal from ...._models import BaseModel +from ...shared.completion_usage import CompletionUsage from ...shared.chat_completion_token_logprob import ChatCompletionTokenLogprob -__all__ = ["CompletionCreateResponse", "Choice", "ChoiceLogprobs", "ChoiceMessage", "Usage"] +__all__ = [ + "CompletionCreateResponse", + "Choice", + "ChoiceLogprobs", + "ChoiceMessage", + "ChoiceMessageToolCall", + "ChoiceMessageToolCallFunction", +] class ChoiceLogprobs(BaseModel): @@ -17,6 +25,30 @@ class ChoiceLogprobs(BaseModel): """A list of message refusal tokens with log probability information.""" +class ChoiceMessageToolCallFunction(BaseModel): + arguments: str + """ + The arguments to call the function with, as generated by the model in JSON + format. Note that the model does not always generate valid JSON, and may + hallucinate parameters not defined by your function schema. Validate the + arguments in your code before calling your function. + """ + + name: str + """The name of the function to call.""" + + +class ChoiceMessageToolCall(BaseModel): + id: str + """The ID of the tool call.""" + + function: ChoiceMessageToolCallFunction + """The function that the model called.""" + + type: Literal["function"] + """The type of the tool. Currently, only `function` is supported.""" + + class ChoiceMessage(BaseModel): content: Optional[str] = None """The contents of the message.""" @@ -27,14 +59,17 @@ class ChoiceMessage(BaseModel): role: Literal["assistant"] """The role of the author of this message.""" + tool_calls: Optional[List[ChoiceMessageToolCall]] = None + """The tool calls generated by the model, such as function calls.""" + class Choice(BaseModel): - finish_reason: Literal["stop", "length"] + finish_reason: Literal["stop", "length", "tool_calls"] """The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, or `length` if the maximum number of tokens specified in the request - was reached. + was reached, `tool_calls` if the model called a tool. """ index: int @@ -47,17 +82,6 @@ class Choice(BaseModel): """A chat completion message generated by the model.""" -class Usage(BaseModel): - completion_tokens: int - """Number of tokens in the generated completion.""" - - prompt_tokens: int - """Number of tokens in the prompt.""" - - total_tokens: int - """Total number of tokens used in the request (prompt + completion).""" - - class CompletionCreateResponse(BaseModel): id: str """A unique identifier for the chat completion.""" @@ -77,5 +101,5 @@ class CompletionCreateResponse(BaseModel): object: Literal["chat.completion"] """The object type, which is always `chat.completion`.""" - usage: Optional[Usage] = None + usage: Optional[CompletionUsage] = None """Usage statistics for the completion request.""" diff --git a/src/gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py b/src/gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py index 6aa6d27a..9a4000c0 100644 --- a/src/gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py +++ b/src/gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py @@ -15,6 +15,7 @@ class EvaluationDatasetCreateFileUploadPresignedURLsParams(TypedDict, total=Fals class File(TypedDict, total=False): file_name: str + """Local filename""" file_size: str """The size of the file in bytes.""" diff --git a/src/gradientai/types/agents/evaluation_metric_list_regions_params.py b/src/gradientai/types/agents/evaluation_metric_list_regions_params.py new file mode 100644 index 00000000..701e7d4e --- /dev/null +++ b/src/gradientai/types/agents/evaluation_metric_list_regions_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["EvaluationMetricListRegionsParams"] + + +class EvaluationMetricListRegionsParams(TypedDict, total=False): + serves_batch: bool + """Include datacenters that are capable of running batch jobs.""" + + serves_inference: bool + """Include datacenters that serve inference.""" diff --git a/src/gradientai/types/agents/evaluation_metric_list_regions_response.py b/src/gradientai/types/agents/evaluation_metric_list_regions_response.py new file mode 100644 index 00000000..7246d484 --- /dev/null +++ b/src/gradientai/types/agents/evaluation_metric_list_regions_response.py @@ -0,0 +1,29 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel + +__all__ = ["EvaluationMetricListRegionsResponse", "Region"] + + +class Region(BaseModel): + inference_url: Optional[str] = None + """Url for inference server""" + + region: Optional[str] = None + """Region code""" + + serves_batch: Optional[bool] = None + """This datacenter is capable of running batch jobs""" + + serves_inference: Optional[bool] = None + """This datacenter is capable of serving inference""" + + stream_inference_url: Optional[str] = None + """The url for the inference streaming server""" + + +class EvaluationMetricListRegionsResponse(BaseModel): + regions: Optional[List[Region]] = None + """Region code""" diff --git a/src/gradientai/types/agents/evaluation_metrics/__init__.py b/src/gradientai/types/agents/evaluation_metrics/__init__.py index 7af9b074..c349624b 100644 --- a/src/gradientai/types/agents/evaluation_metrics/__init__.py +++ b/src/gradientai/types/agents/evaluation_metrics/__init__.py @@ -2,6 +2,8 @@ from __future__ import annotations +from .model_list_params import ModelListParams as ModelListParams +from .model_list_response import ModelListResponse as ModelListResponse from .workspace_create_params import WorkspaceCreateParams as WorkspaceCreateParams from .workspace_list_response import WorkspaceListResponse as WorkspaceListResponse from .workspace_update_params import WorkspaceUpdateParams as WorkspaceUpdateParams diff --git a/src/gradientai/types/model_list_params.py b/src/gradientai/types/agents/evaluation_metrics/model_list_params.py similarity index 87% rename from src/gradientai/types/model_list_params.py rename to src/gradientai/types/agents/evaluation_metrics/model_list_params.py index 4abc1dc1..a2fa066a 100644 --- a/src/gradientai/types/model_list_params.py +++ b/src/gradientai/types/agents/evaluation_metrics/model_list_params.py @@ -10,13 +10,13 @@ class ModelListParams(TypedDict, total=False): page: int - """page number.""" + """Page number.""" per_page: int - """items per page.""" + """Items per page.""" public_only: bool - """only include models that are publicly available.""" + """Only include models that are publicly available.""" usecases: List[ Literal[ @@ -29,7 +29,7 @@ class ModelListParams(TypedDict, total=False): "MODEL_USECASE_SERVERLESS", ] ] - """include only models defined for the listed usecases. + """Include only models defined for the listed usecases. - MODEL_USECASE_UNKNOWN: The use case of the model is unknown - MODEL_USECASE_AGENT: The model maybe used in an agent diff --git a/src/gradientai/types/agents/evaluation_metrics/model_list_response.py b/src/gradientai/types/agents/evaluation_metrics/model_list_response.py new file mode 100644 index 00000000..2fc17524 --- /dev/null +++ b/src/gradientai/types/agents/evaluation_metrics/model_list_response.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ...._models import BaseModel +from ...api_model import APIModel +from ...shared.api_meta import APIMeta +from ...shared.api_links import APILinks + +__all__ = ["ModelListResponse"] + + +class ModelListResponse(BaseModel): + links: Optional[APILinks] = None + """Links to other pages""" + + meta: Optional[APIMeta] = None + """Meta information about the data set""" + + models: Optional[List[APIModel]] = None + """The models""" diff --git a/src/gradientai/types/agents/evaluation_metrics/workspace_create_params.py b/src/gradientai/types/agents/evaluation_metrics/workspace_create_params.py index 73f390be..7a418e81 100644 --- a/src/gradientai/types/agents/evaluation_metrics/workspace_create_params.py +++ b/src/gradientai/types/agents/evaluation_metrics/workspace_create_params.py @@ -10,7 +10,10 @@ class WorkspaceCreateParams(TypedDict, total=False): agent_uuids: List[str] + """Ids of the agents(s) to attach to the workspace""" description: str + """Description of the workspace""" name: str + """Name of the workspace""" diff --git a/src/gradientai/types/agents/evaluation_metrics/workspace_delete_response.py b/src/gradientai/types/agents/evaluation_metrics/workspace_delete_response.py index 1fe7b5a2..3e094515 100644 --- a/src/gradientai/types/agents/evaluation_metrics/workspace_delete_response.py +++ b/src/gradientai/types/agents/evaluation_metrics/workspace_delete_response.py @@ -9,3 +9,4 @@ class WorkspaceDeleteResponse(BaseModel): workspace_uuid: Optional[str] = None + """Workspace""" diff --git a/src/gradientai/types/agents/evaluation_metrics/workspace_list_response.py b/src/gradientai/types/agents/evaluation_metrics/workspace_list_response.py index 64f9a63c..793623dd 100644 --- a/src/gradientai/types/agents/evaluation_metrics/workspace_list_response.py +++ b/src/gradientai/types/agents/evaluation_metrics/workspace_list_response.py @@ -11,6 +11,7 @@ class WorkspaceListResponse(BaseModel): workspaces: Optional[List["APIWorkspace"]] = None + """Workspaces""" from ...api_workspace import APIWorkspace diff --git a/src/gradientai/types/agents/evaluation_metrics/workspace_update_params.py b/src/gradientai/types/agents/evaluation_metrics/workspace_update_params.py index fd09079e..d5906bd9 100644 --- a/src/gradientai/types/agents/evaluation_metrics/workspace_update_params.py +++ b/src/gradientai/types/agents/evaluation_metrics/workspace_update_params.py @@ -11,8 +11,10 @@ class WorkspaceUpdateParams(TypedDict, total=False): description: str + """The new description of the workspace""" name: str + """The new name of the workspace""" body_workspace_uuid: Annotated[str, PropertyInfo(alias="workspace_uuid")] """Workspace UUID.""" diff --git a/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_list_params.py b/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_list_params.py index 277274ed..b56d0395 100644 --- a/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_list_params.py +++ b/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_list_params.py @@ -2,25 +2,17 @@ from __future__ import annotations -from typing import List from typing_extensions import TypedDict -__all__ = ["AgentListParams", "FieldMask"] +__all__ = ["AgentListParams"] class AgentListParams(TypedDict, total=False): - field_mask: FieldMask - only_deployed: bool """Only list agents that are deployed.""" page: int - """page number.""" + """Page number.""" per_page: int - """items per page.""" - - -class FieldMask(TypedDict, total=False): - paths: List[str] - """The set of field mask paths.""" + """Items per page.""" diff --git a/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_list_response.py b/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_list_response.py index 1e520736..6f9ea948 100644 --- a/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_list_response.py +++ b/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_list_response.py @@ -15,8 +15,10 @@ class AgentListResponse(BaseModel): agents: Optional[List["APIAgent"]] = None links: Optional[APILinks] = None + """Links to other pages""" meta: Optional[APIMeta] = None + """Meta information about the data set""" from ....api_agent import APIAgent diff --git a/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_move_params.py b/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_move_params.py index 8e92503a..74e27dd2 100644 --- a/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_move_params.py +++ b/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_move_params.py @@ -12,5 +12,7 @@ class AgentMoveParams(TypedDict, total=False): agent_uuids: List[str] + """Agent uuids""" body_workspace_uuid: Annotated[str, PropertyInfo(alias="workspace_uuid")] + """Workspace uuid to move agents to""" diff --git a/src/gradientai/types/agents/evaluation_run_create_params.py b/src/gradientai/types/agents/evaluation_run_create_params.py index 47bdabd6..3029e192 100644 --- a/src/gradientai/types/agents/evaluation_run_create_params.py +++ b/src/gradientai/types/agents/evaluation_run_create_params.py @@ -16,3 +16,4 @@ class EvaluationRunCreateParams(TypedDict, total=False): """The name of the run.""" test_case_uuid: str + """Test-case UUID to run""" diff --git a/src/gradientai/types/agents/evaluation_run_list_results_params.py b/src/gradientai/types/agents/evaluation_run_list_results_params.py new file mode 100644 index 00000000..bcf96c14 --- /dev/null +++ b/src/gradientai/types/agents/evaluation_run_list_results_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["EvaluationRunListResultsParams"] + + +class EvaluationRunListResultsParams(TypedDict, total=False): + page: int + """Page number.""" + + per_page: int + """Items per page.""" diff --git a/src/gradientai/types/agents/evaluation_run_list_results_response.py b/src/gradientai/types/agents/evaluation_run_list_results_response.py index f0a9882b..df830a5b 100644 --- a/src/gradientai/types/agents/evaluation_run_list_results_response.py +++ b/src/gradientai/types/agents/evaluation_run_list_results_response.py @@ -3,6 +3,8 @@ from typing import List, Optional from ..._models import BaseModel +from ..shared.api_meta import APIMeta +from ..shared.api_links import APILinks from .api_evaluation_run import APIEvaluationRun from .api_evaluation_prompt import APIEvaluationPrompt @@ -12,5 +14,11 @@ class EvaluationRunListResultsResponse(BaseModel): evaluation_run: Optional[APIEvaluationRun] = None + links: Optional[APILinks] = None + """Links to other pages""" + + meta: Optional[APIMeta] = None + """Meta information about the data set""" + prompts: Optional[List[APIEvaluationPrompt]] = None """The prompt level results.""" diff --git a/src/gradientai/types/agents/evaluation_test_case_list_response.py b/src/gradientai/types/agents/evaluation_test_case_list_response.py index ccfc263e..62b97961 100644 --- a/src/gradientai/types/agents/evaluation_test_case_list_response.py +++ b/src/gradientai/types/agents/evaluation_test_case_list_response.py @@ -10,3 +10,7 @@ class EvaluationTestCaseListResponse(BaseModel): evaluation_test_cases: Optional[List[APIEvaluationTestCase]] = None + """ + Alternative way of authentication for internal usage only - should not be + exposed to public api + """ diff --git a/src/gradientai/types/agents/evaluation_test_case_update_params.py b/src/gradientai/types/agents/evaluation_test_case_update_params.py index be70fc95..825f961b 100644 --- a/src/gradientai/types/agents/evaluation_test_case_update_params.py +++ b/src/gradientai/types/agents/evaluation_test_case_update_params.py @@ -26,6 +26,7 @@ class EvaluationTestCaseUpdateParams(TypedDict, total=False): star_metric: APIStarMetricParam body_test_case_uuid: Annotated[str, PropertyInfo(alias="test_case_uuid")] + """Test-case UUID to update""" class Metrics(TypedDict, total=False): diff --git a/src/gradientai/types/agents/function_create_params.py b/src/gradientai/types/agents/function_create_params.py index 938fb1d5..000de32b 100644 --- a/src/gradientai/types/agents/function_create_params.py +++ b/src/gradientai/types/agents/function_create_params.py @@ -11,15 +11,22 @@ class FunctionCreateParams(TypedDict, total=False): body_agent_uuid: Annotated[str, PropertyInfo(alias="agent_uuid")] + """Agent id""" description: str + """Function description""" faas_name: str + """The name of the function in the DigitalOcean functions platform""" faas_namespace: str + """The namespace of the function in the DigitalOcean functions platform""" function_name: str + """Function name""" input_schema: object + """Describe the input schema for the function so the agent may call it""" output_schema: object + """Describe the output schema for the function so the agent handle its response""" diff --git a/src/gradientai/types/agents/function_create_response.py b/src/gradientai/types/agents/function_create_response.py index 82ab984b..65a4bb2b 100644 --- a/src/gradientai/types/agents/function_create_response.py +++ b/src/gradientai/types/agents/function_create_response.py @@ -11,6 +11,7 @@ class FunctionCreateResponse(BaseModel): agent: Optional["APIAgent"] = None + """An Agent""" from ..api_agent import APIAgent diff --git a/src/gradientai/types/agents/function_delete_response.py b/src/gradientai/types/agents/function_delete_response.py index 678ef62d..26ad02e6 100644 --- a/src/gradientai/types/agents/function_delete_response.py +++ b/src/gradientai/types/agents/function_delete_response.py @@ -11,6 +11,7 @@ class FunctionDeleteResponse(BaseModel): agent: Optional["APIAgent"] = None + """An Agent""" from ..api_agent import APIAgent diff --git a/src/gradientai/types/agents/function_update_params.py b/src/gradientai/types/agents/function_update_params.py index 2fa8e8f0..67c6ea9b 100644 --- a/src/gradientai/types/agents/function_update_params.py +++ b/src/gradientai/types/agents/function_update_params.py @@ -13,17 +13,25 @@ class FunctionUpdateParams(TypedDict, total=False): path_agent_uuid: Required[Annotated[str, PropertyInfo(alias="agent_uuid")]] body_agent_uuid: Annotated[str, PropertyInfo(alias="agent_uuid")] + """Agent id""" description: str + """Funciton description""" faas_name: str + """The name of the function in the DigitalOcean functions platform""" faas_namespace: str + """The namespace of the function in the DigitalOcean functions platform""" function_name: str + """Function name""" body_function_uuid: Annotated[str, PropertyInfo(alias="function_uuid")] + """Function id""" input_schema: object + """Describe the input schema for the function so the agent may call it""" output_schema: object + """Describe the output schema for the function so the agent handle its response""" diff --git a/src/gradientai/types/agents/function_update_response.py b/src/gradientai/types/agents/function_update_response.py index 82fc63be..eebde3e6 100644 --- a/src/gradientai/types/agents/function_update_response.py +++ b/src/gradientai/types/agents/function_update_response.py @@ -11,6 +11,7 @@ class FunctionUpdateResponse(BaseModel): agent: Optional["APIAgent"] = None + """An Agent""" from ..api_agent import APIAgent diff --git a/src/gradientai/types/agents/knowledge_base_detach_response.py b/src/gradientai/types/agents/knowledge_base_detach_response.py index 76bb4236..0dc90aaf 100644 --- a/src/gradientai/types/agents/knowledge_base_detach_response.py +++ b/src/gradientai/types/agents/knowledge_base_detach_response.py @@ -11,6 +11,7 @@ class KnowledgeBaseDetachResponse(BaseModel): agent: Optional["APIAgent"] = None + """An Agent""" from ..api_agent import APIAgent diff --git a/src/gradientai/types/agents/route_add_params.py b/src/gradientai/types/agents/route_add_params.py index b4fcb417..d8dbeff8 100644 --- a/src/gradientai/types/agents/route_add_params.py +++ b/src/gradientai/types/agents/route_add_params.py @@ -13,6 +13,7 @@ class RouteAddParams(TypedDict, total=False): path_parent_agent_uuid: Required[Annotated[str, PropertyInfo(alias="parent_agent_uuid")]] body_child_agent_uuid: Annotated[str, PropertyInfo(alias="child_agent_uuid")] + """Routed agent id""" if_case: str @@ -20,3 +21,4 @@ class RouteAddParams(TypedDict, total=False): """A unique identifier for the parent agent.""" route_name: str + """Name of route""" diff --git a/src/gradientai/types/agents/route_add_response.py b/src/gradientai/types/agents/route_add_response.py index cd3bb16a..b9cc2b7d 100644 --- a/src/gradientai/types/agents/route_add_response.py +++ b/src/gradientai/types/agents/route_add_response.py @@ -9,6 +9,7 @@ class RouteAddResponse(BaseModel): child_agent_uuid: Optional[str] = None + """Routed agent id""" parent_agent_uuid: Optional[str] = None """A unique identifier for the parent agent.""" diff --git a/src/gradientai/types/agents/route_delete_response.py b/src/gradientai/types/agents/route_delete_response.py index 07105a62..b49c8b7c 100644 --- a/src/gradientai/types/agents/route_delete_response.py +++ b/src/gradientai/types/agents/route_delete_response.py @@ -9,5 +9,7 @@ class RouteDeleteResponse(BaseModel): child_agent_uuid: Optional[str] = None + """Routed agent id""" parent_agent_uuid: Optional[str] = None + """Pagent agent id""" diff --git a/src/gradientai/types/agents/route_update_params.py b/src/gradientai/types/agents/route_update_params.py index cb6d6391..453a3b93 100644 --- a/src/gradientai/types/agents/route_update_params.py +++ b/src/gradientai/types/agents/route_update_params.py @@ -13,12 +13,16 @@ class RouteUpdateParams(TypedDict, total=False): path_parent_agent_uuid: Required[Annotated[str, PropertyInfo(alias="parent_agent_uuid")]] body_child_agent_uuid: Annotated[str, PropertyInfo(alias="child_agent_uuid")] + """Routed agent id""" if_case: str + """Describes the case in which the child agent should be used""" body_parent_agent_uuid: Annotated[str, PropertyInfo(alias="parent_agent_uuid")] """A unique identifier for the parent agent.""" route_name: str + """Route name""" uuid: str + """Unique id of linkage""" diff --git a/src/gradientai/types/agents/route_update_response.py b/src/gradientai/types/agents/route_update_response.py index 75e1eda5..b79fc9fe 100644 --- a/src/gradientai/types/agents/route_update_response.py +++ b/src/gradientai/types/agents/route_update_response.py @@ -9,6 +9,7 @@ class RouteUpdateResponse(BaseModel): child_agent_uuid: Optional[str] = None + """Routed agent id""" parent_agent_uuid: Optional[str] = None """A unique identifier for the parent agent.""" @@ -16,3 +17,4 @@ class RouteUpdateResponse(BaseModel): rollback: Optional[bool] = None uuid: Optional[str] = None + """Unique id of linkage""" diff --git a/src/gradientai/types/agents/route_view_response.py b/src/gradientai/types/agents/route_view_response.py index dd9af70b..f0ee2d71 100644 --- a/src/gradientai/types/agents/route_view_response.py +++ b/src/gradientai/types/agents/route_view_response.py @@ -11,6 +11,7 @@ class RouteViewResponse(BaseModel): children: Optional[List["APIAgent"]] = None + """Child agents""" from ..api_agent import APIAgent diff --git a/src/gradientai/types/agents/version_list_params.py b/src/gradientai/types/agents/version_list_params.py index a71fd022..e8fa2f6d 100644 --- a/src/gradientai/types/agents/version_list_params.py +++ b/src/gradientai/types/agents/version_list_params.py @@ -9,7 +9,7 @@ class VersionListParams(TypedDict, total=False): page: int - """page number.""" + """Page number.""" per_page: int - """items per page.""" + """Items per page.""" diff --git a/src/gradientai/types/agents/version_list_response.py b/src/gradientai/types/agents/version_list_response.py index af25150e..c35a5ba4 100644 --- a/src/gradientai/types/agents/version_list_response.py +++ b/src/gradientai/types/agents/version_list_response.py @@ -22,97 +22,146 @@ class AgentVersionAttachedChildAgent(BaseModel): agent_name: Optional[str] = None + """Name of the child agent""" child_agent_uuid: Optional[str] = None + """Child agent unique identifier""" if_case: Optional[str] = None + """If case""" is_deleted: Optional[bool] = None + """Child agent is deleted""" route_name: Optional[str] = None + """Route name""" class AgentVersionAttachedFunction(BaseModel): description: Optional[str] = None + """Description of the function""" faas_name: Optional[str] = None + """FaaS name of the function""" faas_namespace: Optional[str] = None + """FaaS namespace of the function""" is_deleted: Optional[bool] = None + """Whether the function is deleted""" name: Optional[str] = None + """Name of the function""" class AgentVersionAttachedGuardrail(BaseModel): is_deleted: Optional[bool] = None + """Whether the guardrail is deleted""" name: Optional[str] = None + """Guardrail Name""" priority: Optional[int] = None + """Guardrail Priority""" uuid: Optional[str] = None + """Guardrail UUID""" class AgentVersionAttachedKnowledgebase(BaseModel): is_deleted: Optional[bool] = None + """Deletet at date / time""" name: Optional[str] = None + """Name of the knowledge base""" uuid: Optional[str] = None + """Unique id of the knowledge base""" class AgentVersion(BaseModel): id: Optional[str] = None + """Unique identifier""" agent_uuid: Optional[str] = None + """Uuid of the agent this version belongs to""" attached_child_agents: Optional[List[AgentVersionAttachedChildAgent]] = None + """List of child agent relationships""" attached_functions: Optional[List[AgentVersionAttachedFunction]] = None + """List of function versions""" attached_guardrails: Optional[List[AgentVersionAttachedGuardrail]] = None + """List of guardrail version""" attached_knowledgebases: Optional[List[AgentVersionAttachedKnowledgebase]] = None + """List of knowledge base agent versions""" can_rollback: Optional[bool] = None + """Whether the version is able to be rolled back to""" created_at: Optional[datetime] = None + """Creation date""" created_by_email: Optional[str] = None + """User who created this version""" currently_applied: Optional[bool] = None + """Whether this is the currently applied configuration""" description: Optional[str] = None + """Description of the agent""" instruction: Optional[str] = None + """Instruction for the agent""" k: Optional[int] = None + """K value for the agent's configuration""" max_tokens: Optional[int] = None + """Max tokens setting for the agent""" - api_model_name: Optional[str] = FieldInfo(alias="model_name", default=None) + model: Optional[str] = FieldInfo(alias="model_name", default=None) + """Name of model associated to the agent version""" name: Optional[str] = None + """Name of the agent""" provide_citations: Optional[bool] = None + """Whether the agent should provide in-response citations""" retrieval_method: Optional[APIRetrievalMethod] = None + """ + - RETRIEVAL_METHOD_UNKNOWN: The retrieval method is unknown + - RETRIEVAL_METHOD_REWRITE: The retrieval method is rewrite + - RETRIEVAL_METHOD_STEP_BACK: The retrieval method is step back + - RETRIEVAL_METHOD_SUB_QUERIES: The retrieval method is sub queries + - RETRIEVAL_METHOD_NONE: The retrieval method is none + """ tags: Optional[List[str]] = None + """Tags associated with the agent""" temperature: Optional[float] = None + """Temperature setting for the agent""" top_p: Optional[float] = None + """Top_p setting for the agent""" trigger_action: Optional[str] = None + """Action triggering the configuration update""" version_hash: Optional[str] = None + """Version hash""" class VersionListResponse(BaseModel): agent_versions: Optional[List[AgentVersion]] = None + """Agents""" links: Optional[APILinks] = None + """Links to other pages""" meta: Optional[APIMeta] = None + """Meta information about the data set""" diff --git a/src/gradientai/types/agents/version_update_params.py b/src/gradientai/types/agents/version_update_params.py index d7fb01cb..212eb05c 100644 --- a/src/gradientai/types/agents/version_update_params.py +++ b/src/gradientai/types/agents/version_update_params.py @@ -11,5 +11,7 @@ class VersionUpdateParams(TypedDict, total=False): body_uuid: Annotated[str, PropertyInfo(alias="uuid")] + """Agent unique identifier""" version_hash: str + """Unique identifier""" diff --git a/src/gradientai/types/agents/version_update_response.py b/src/gradientai/types/agents/version_update_response.py index 72058319..464ef12f 100644 --- a/src/gradientai/types/agents/version_update_response.py +++ b/src/gradientai/types/agents/version_update_response.py @@ -28,3 +28,4 @@ class VersionUpdateResponse(BaseModel): """An alternative way to provide auth information. for internal use only.""" version_hash: Optional[str] = None + """Unique identifier""" diff --git a/src/gradientai/types/api_agent.py b/src/gradientai/types/api_agent.py index 1378950a..4be22aa5 100644 --- a/src/gradientai/types/api_agent.py +++ b/src/gradientai/types/api_agent.py @@ -23,6 +23,7 @@ "Deployment", "Function", "Guardrail", + "LoggingConfig", "Template", "TemplateGuardrail", ] @@ -30,6 +31,7 @@ class APIKey(BaseModel): api_key: Optional[str] = None + """Api key""" class Chatbot(BaseModel): @@ -38,6 +40,7 @@ class Chatbot(BaseModel): logo: Optional[str] = None name: Optional[str] = None + """Name of chatbot""" primary_color: Optional[str] = None @@ -48,12 +51,15 @@ class Chatbot(BaseModel): class ChatbotIdentifier(BaseModel): agent_chatbot_identifier: Optional[str] = None + """Agent chatbot identifier""" class Deployment(BaseModel): created_at: Optional[datetime] = None + """Creation date / time""" name: Optional[str] = None + """Name""" status: Optional[ Literal[ @@ -70,22 +76,39 @@ class Deployment(BaseModel): ] = None updated_at: Optional[datetime] = None + """Last modified""" url: Optional[str] = None + """Access your deployed agent here""" uuid: Optional[str] = None + """Unique id""" visibility: Optional[APIDeploymentVisibility] = None + """ + - VISIBILITY_UNKNOWN: The status of the deployment is unknown + - VISIBILITY_DISABLED: The deployment is disabled and will no longer service + requests + - VISIBILITY_PLAYGROUND: Deprecated: No longer a valid state + - VISIBILITY_PUBLIC: The deployment is public and will service requests from the + public internet + - VISIBILITY_PRIVATE: The deployment is private and will only service requests + from other agents, or through API keys + """ class Function(BaseModel): api_key: Optional[str] = None + """Api key""" created_at: Optional[datetime] = None + """Creation date / time""" created_by: Optional[str] = None + """Created by user id from DO""" description: Optional[str] = None + """Agent description""" faas_name: Optional[str] = None @@ -94,14 +117,18 @@ class Function(BaseModel): input_schema: Optional[object] = None name: Optional[str] = None + """Name""" output_schema: Optional[object] = None updated_at: Optional[datetime] = None + """Last modified""" url: Optional[str] = None + """Download your agent here""" uuid: Optional[str] = None + """Unique id""" class Guardrail(BaseModel): @@ -139,72 +166,122 @@ class Guardrail(BaseModel): uuid: Optional[str] = None +class LoggingConfig(BaseModel): + galileo_project_id: Optional[str] = None + """Galileo project identifier""" + + galileo_project_name: Optional[str] = None + """Name of the Galileo project""" + + log_stream_id: Optional[str] = None + """Identifier for the log stream""" + + log_stream_name: Optional[str] = None + """Name of the log stream""" + + class TemplateGuardrail(BaseModel): priority: Optional[int] = None + """Priority of the guardrail""" uuid: Optional[str] = None + """Uuid of the guardrail""" class Template(BaseModel): created_at: Optional[datetime] = None + """The agent template's creation date""" description: Optional[str] = None + """Deprecated - Use summary instead""" guardrails: Optional[List[TemplateGuardrail]] = None + """List of guardrails associated with the agent template""" instruction: Optional[str] = None + """Instructions for the agent template""" k: Optional[int] = None + """The 'k' value for the agent template""" knowledge_bases: Optional[List[APIKnowledgeBase]] = None + """List of knowledge bases associated with the agent template""" long_description: Optional[str] = None + """The long description of the agent template""" max_tokens: Optional[int] = None + """The max_tokens setting for the agent template""" model: Optional[APIAgentModel] = None + """Description of a Model""" name: Optional[str] = None + """Name of the agent template""" short_description: Optional[str] = None + """The short description of the agent template""" summary: Optional[str] = None + """The summary of the agent template""" tags: Optional[List[str]] = None + """List of tags associated with the agent template""" temperature: Optional[float] = None + """The temperature setting for the agent template""" template_type: Optional[Literal["AGENT_TEMPLATE_TYPE_STANDARD", "AGENT_TEMPLATE_TYPE_ONE_CLICK"]] = None + """ + - AGENT_TEMPLATE_TYPE_STANDARD: The standard agent template + - AGENT_TEMPLATE_TYPE_ONE_CLICK: The one click agent template + """ top_p: Optional[float] = None + """The top_p setting for the agent template""" updated_at: Optional[datetime] = None + """The agent template's last updated date""" uuid: Optional[str] = None + """Unique id""" class APIAgent(BaseModel): anthropic_api_key: Optional[APIAnthropicAPIKeyInfo] = None + """Anthropic API Key Info""" api_key_infos: Optional[List[APIAgentAPIKeyInfo]] = None + """Api key infos""" api_keys: Optional[List[APIKey]] = None + """Api keys""" chatbot: Optional[Chatbot] = None + """A Chatbot""" chatbot_identifiers: Optional[List[ChatbotIdentifier]] = None + """Chatbot identifiers""" child_agents: Optional[List["APIAgent"]] = None + """Child agents""" + + conversation_logs_enabled: Optional[bool] = None + """Whether conversation logs are enabled for the agent""" created_at: Optional[datetime] = None + """Creation date / time""" deployment: Optional[Deployment] = None + """Description of deployment""" description: Optional[str] = None + """Description of agent""" functions: Optional[List[Function]] = None guardrails: Optional[List[Guardrail]] = None + """The guardrails the agent is attached to""" if_case: Optional[str] = None @@ -219,48 +296,75 @@ class APIAgent(BaseModel): k: Optional[int] = None knowledge_bases: Optional[List[APIKnowledgeBase]] = None + """Knowledge bases""" + + logging_config: Optional[LoggingConfig] = None max_tokens: Optional[int] = None model: Optional[APIAgentModel] = None + """Description of a Model""" name: Optional[str] = None + """Agent name""" openai_api_key: Optional[APIOpenAIAPIKeyInfo] = None + """OpenAI API Key Info""" parent_agents: Optional[List["APIAgent"]] = None + """Parent agents""" project_id: Optional[str] = None provide_citations: Optional[bool] = None + """Whether the agent should provide in-response citations""" region: Optional[str] = None + """Region code""" retrieval_method: Optional[APIRetrievalMethod] = None + """ + - RETRIEVAL_METHOD_UNKNOWN: The retrieval method is unknown + - RETRIEVAL_METHOD_REWRITE: The retrieval method is rewrite + - RETRIEVAL_METHOD_STEP_BACK: The retrieval method is step back + - RETRIEVAL_METHOD_SUB_QUERIES: The retrieval method is sub queries + - RETRIEVAL_METHOD_NONE: The retrieval method is none + """ route_created_at: Optional[datetime] = None + """Creation of route date / time""" route_created_by: Optional[str] = None route_name: Optional[str] = None + """Route name""" route_uuid: Optional[str] = None tags: Optional[List[str]] = None + """Agent tag to organize related resources""" temperature: Optional[float] = None template: Optional[Template] = None + """Represents an AgentTemplate entity""" top_p: Optional[float] = None updated_at: Optional[datetime] = None + """Last modified""" url: Optional[str] = None + """Access your agent under this url""" user_id: Optional[str] = None + """Id of user that created the agent""" uuid: Optional[str] = None + """Unique agent id""" + + version_hash: Optional[str] = None + """The latest version of the agent""" workspace: Optional["APIWorkspace"] = None diff --git a/src/gradientai/types/api_agent_api_key_info.py b/src/gradientai/types/api_agent_api_key_info.py index 8dc71564..7222153c 100644 --- a/src/gradientai/types/api_agent_api_key_info.py +++ b/src/gradientai/types/api_agent_api_key_info.py @@ -10,13 +10,18 @@ class APIAgentAPIKeyInfo(BaseModel): created_at: Optional[datetime] = None + """Creation date""" created_by: Optional[str] = None + """Created by""" deleted_at: Optional[datetime] = None + """Deleted date""" name: Optional[str] = None + """Name""" secret_key: Optional[str] = None uuid: Optional[str] = None + """Uuid""" diff --git a/src/gradientai/types/api_agent_model.py b/src/gradientai/types/api_agent_model.py index 1025321b..f111bfb7 100644 --- a/src/gradientai/types/api_agent_model.py +++ b/src/gradientai/types/api_agent_model.py @@ -13,30 +13,41 @@ class APIAgentModel(BaseModel): agreement: Optional[APIAgreement] = None + """Agreement Description""" created_at: Optional[datetime] = None + """Creation date / time""" inference_name: Optional[str] = None + """Internally used name""" inference_version: Optional[str] = None + """Internally used version""" is_foundational: Optional[bool] = None + """True if it is a foundational model provided by do""" metadata: Optional[object] = None + """Additional meta data""" name: Optional[str] = None + """Name of the model""" parent_uuid: Optional[str] = None + """Unique id of the model, this model is based on""" provider: Optional[Literal["MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", "MODEL_PROVIDER_OPENAI"]] = ( None ) updated_at: Optional[datetime] = None + """Last modified""" upload_complete: Optional[bool] = None + """Model has been fully uploaded""" url: Optional[str] = None + """Download url""" usecases: Optional[ List[ @@ -51,7 +62,10 @@ class APIAgentModel(BaseModel): ] ] ] = None + """Usecases of the model""" uuid: Optional[str] = None + """Unique id""" version: Optional[APIModelVersion] = None + """Version Information about a Model""" diff --git a/src/gradientai/types/api_anthropic_api_key_info.py b/src/gradientai/types/api_anthropic_api_key_info.py index e2e04a8a..6440c5ef 100644 --- a/src/gradientai/types/api_anthropic_api_key_info.py +++ b/src/gradientai/types/api_anthropic_api_key_info.py @@ -10,13 +10,19 @@ class APIAnthropicAPIKeyInfo(BaseModel): created_at: Optional[datetime] = None + """Key creation date""" created_by: Optional[str] = None + """Created by user id from DO""" deleted_at: Optional[datetime] = None + """Key deleted date""" name: Optional[str] = None + """Name""" updated_at: Optional[datetime] = None + """Key last updated date""" uuid: Optional[str] = None + """Uuid""" diff --git a/src/gradientai/types/api_knowledge_base.py b/src/gradientai/types/api_knowledge_base.py index 2b0676f0..4e4a6567 100644 --- a/src/gradientai/types/api_knowledge_base.py +++ b/src/gradientai/types/api_knowledge_base.py @@ -11,27 +11,37 @@ class APIKnowledgeBase(BaseModel): added_to_agent_at: Optional[datetime] = None + """Time when the knowledge base was added to the agent""" created_at: Optional[datetime] = None + """Creation date / time""" database_id: Optional[str] = None embedding_model_uuid: Optional[str] = None is_public: Optional[bool] = None + """Whether the knowledge base is public or not""" last_indexing_job: Optional[APIIndexingJob] = None + """IndexingJob description""" name: Optional[str] = None + """Name of knowledge base""" project_id: Optional[str] = None region: Optional[str] = None + """Region code""" tags: Optional[List[str]] = None + """Tags to organize related resources""" updated_at: Optional[datetime] = None + """Last modified""" user_id: Optional[str] = None + """Id of user that created the knowledge base""" uuid: Optional[str] = None + """Unique id for knowledge base""" diff --git a/src/gradientai/types/api_model.py b/src/gradientai/types/api_model.py index c2bc1edd..7c530ee2 100644 --- a/src/gradientai/types/api_model.py +++ b/src/gradientai/types/api_model.py @@ -12,21 +12,31 @@ class APIModel(BaseModel): agreement: Optional[APIAgreement] = None + """Agreement Description""" created_at: Optional[datetime] = None + """Creation date / time""" is_foundational: Optional[bool] = None + """True if it is a foundational model provided by do""" name: Optional[str] = None + """Name of the model""" parent_uuid: Optional[str] = None + """Unique id of the model, this model is based on""" updated_at: Optional[datetime] = None + """Last modified""" upload_complete: Optional[bool] = None + """Model has been fully uploaded""" url: Optional[str] = None + """Download url""" uuid: Optional[str] = None + """Unique id""" version: Optional[APIModelVersion] = None + """Version Information about a Model""" diff --git a/src/gradientai/types/api_model_version.py b/src/gradientai/types/api_model_version.py index 2e118632..f19a78c6 100644 --- a/src/gradientai/types/api_model_version.py +++ b/src/gradientai/types/api_model_version.py @@ -9,7 +9,10 @@ class APIModelVersion(BaseModel): major: Optional[int] = None + """Major version number""" minor: Optional[int] = None + """Minor version number""" patch: Optional[int] = None + """Patch version number""" diff --git a/src/gradientai/types/api_openai_api_key_info.py b/src/gradientai/types/api_openai_api_key_info.py index 7467cfc2..bcee992b 100644 --- a/src/gradientai/types/api_openai_api_key_info.py +++ b/src/gradientai/types/api_openai_api_key_info.py @@ -11,15 +11,22 @@ class APIOpenAIAPIKeyInfo(BaseModel): created_at: Optional[datetime] = None + """Key creation date""" created_by: Optional[str] = None + """Created by user id from DO""" deleted_at: Optional[datetime] = None + """Key deleted date""" models: Optional[List[APIAgentModel]] = None + """Models supported by the openAI api key""" name: Optional[str] = None + """Name""" updated_at: Optional[datetime] = None + """Key last updated date""" uuid: Optional[str] = None + """Uuid""" diff --git a/src/gradientai/types/api_workspace.py b/src/gradientai/types/api_workspace.py index 83e59379..564fabb6 100644 --- a/src/gradientai/types/api_workspace.py +++ b/src/gradientai/types/api_workspace.py @@ -13,24 +13,34 @@ class APIWorkspace(BaseModel): agents: Optional[List["APIAgent"]] = None + """Agents""" created_at: Optional[datetime] = None + """Creation date""" created_by: Optional[str] = None + """The id of user who created this workspace""" created_by_email: Optional[str] = None + """The email of the user who created this workspace""" deleted_at: Optional[datetime] = None + """Deleted date""" description: Optional[str] = None + """Description of the workspace""" evaluation_test_cases: Optional[List[APIEvaluationTestCase]] = None + """Evaluations""" name: Optional[str] = None + """Name of the workspace""" updated_at: Optional[datetime] = None + """Update date""" uuid: Optional[str] = None + """Unique id""" from .api_agent import APIAgent diff --git a/src/gradientai/types/chat/completion_create_params.py b/src/gradientai/types/chat/completion_create_params.py index ec5c6b70..aaec2ba5 100644 --- a/src/gradientai/types/chat/completion_create_params.py +++ b/src/gradientai/types/chat/completion_create_params.py @@ -12,7 +12,15 @@ "MessageChatCompletionRequestDeveloperMessage", "MessageChatCompletionRequestUserMessage", "MessageChatCompletionRequestAssistantMessage", + "MessageChatCompletionRequestAssistantMessageToolCall", + "MessageChatCompletionRequestAssistantMessageToolCallFunction", + "MessageChatCompletionRequestToolMessage", "StreamOptions", + "ToolChoice", + "ToolChoiceChatCompletionNamedToolChoice", + "ToolChoiceChatCompletionNamedToolChoiceFunction", + "Tool", + "ToolFunction", "CompletionCreateParamsNonStreaming", "CompletionCreateParamsStreaming", ] @@ -105,6 +113,25 @@ class CompletionCreateParamsBase(TypedDict, total=False): this or `top_p` but not both. """ + tool_choice: ToolChoice + """ + Controls which (if any) tool is called by the model. `none` means the model will + not call any tool and instead generates a message. `auto` means the model can + pick between generating a message or calling one or more tools. `required` means + the model must call one or more tools. Specifying a particular tool via + `{"type": "function", "function": {"name": "my_function"}}` forces the model to + call that tool. + + `none` is the default when no tools are present. `auto` is the default if tools + are present. + """ + + tools: Iterable[Tool] + """A list of tools the model may call. + + Currently, only functions are supported as a tool. + """ + top_logprobs: Optional[int] """ An integer between 0 and 20 specifying the number of most likely tokens to @@ -152,6 +179,30 @@ class MessageChatCompletionRequestUserMessage(TypedDict, total=False): """The role of the messages author, in this case `user`.""" +class MessageChatCompletionRequestAssistantMessageToolCallFunction(TypedDict, total=False): + arguments: Required[str] + """ + The arguments to call the function with, as generated by the model in JSON + format. Note that the model does not always generate valid JSON, and may + hallucinate parameters not defined by your function schema. Validate the + arguments in your code before calling your function. + """ + + name: Required[str] + """The name of the function to call.""" + + +class MessageChatCompletionRequestAssistantMessageToolCall(TypedDict, total=False): + id: Required[str] + """The ID of the tool call.""" + + function: Required[MessageChatCompletionRequestAssistantMessageToolCallFunction] + """The function that the model called.""" + + type: Required[Literal["function"]] + """The type of the tool. Currently, only `function` is supported.""" + + class MessageChatCompletionRequestAssistantMessage(TypedDict, total=False): role: Required[Literal["assistant"]] """The role of the messages author, in this case `assistant`.""" @@ -159,12 +210,27 @@ class MessageChatCompletionRequestAssistantMessage(TypedDict, total=False): content: Union[str, List[str], None] """The contents of the assistant message.""" + tool_calls: Iterable[MessageChatCompletionRequestAssistantMessageToolCall] + """The tool calls generated by the model, such as function calls.""" + + +class MessageChatCompletionRequestToolMessage(TypedDict, total=False): + content: Required[str] + """The contents of the tool message.""" + + role: Required[Literal["tool"]] + """The role of the messages author, in this case `tool`.""" + + tool_call_id: Required[str] + """Tool call that this message is responding to.""" + Message: TypeAlias = Union[ MessageChatCompletionRequestSystemMessage, MessageChatCompletionRequestDeveloperMessage, MessageChatCompletionRequestUserMessage, MessageChatCompletionRequestAssistantMessage, + MessageChatCompletionRequestToolMessage, ] @@ -181,6 +247,53 @@ class StreamOptions(TypedDict, total=False): """ +class ToolChoiceChatCompletionNamedToolChoiceFunction(TypedDict, total=False): + name: Required[str] + """The name of the function to call.""" + + +class ToolChoiceChatCompletionNamedToolChoice(TypedDict, total=False): + function: Required[ToolChoiceChatCompletionNamedToolChoiceFunction] + + type: Required[Literal["function"]] + """The type of the tool. Currently, only `function` is supported.""" + + +ToolChoice: TypeAlias = Union[Literal["none", "auto", "required"], ToolChoiceChatCompletionNamedToolChoice] + + +class ToolFunction(TypedDict, total=False): + name: Required[str] + """The name of the function to be called. + + Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length + of 64. + """ + + description: str + """ + A description of what the function does, used by the model to choose when and + how to call the function. + """ + + parameters: Dict[str, object] + """The parameters the functions accepts, described as a JSON Schema object. + + See the [guide](/docs/guides/function-calling) for examples, and the + [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for + documentation about the format. + + Omitting `parameters` defines a function with an empty parameter list. + """ + + +class Tool(TypedDict, total=False): + function: Required[ToolFunction] + + type: Required[Literal["function"]] + """The type of the tool. Currently, only `function` is supported.""" + + class CompletionCreateParamsNonStreaming(CompletionCreateParamsBase, total=False): stream: Optional[Literal[False]] """ diff --git a/src/gradientai/types/chat/completion_create_response.py b/src/gradientai/types/chat/completion_create_response.py index 1791373b..73a09cf5 100644 --- a/src/gradientai/types/chat/completion_create_response.py +++ b/src/gradientai/types/chat/completion_create_response.py @@ -4,9 +4,17 @@ from typing_extensions import Literal from ..._models import BaseModel +from ..shared.completion_usage import CompletionUsage from ..shared.chat_completion_token_logprob import ChatCompletionTokenLogprob -__all__ = ["CompletionCreateResponse", "Choice", "ChoiceLogprobs", "ChoiceMessage", "Usage"] +__all__ = [ + "CompletionCreateResponse", + "Choice", + "ChoiceLogprobs", + "ChoiceMessage", + "ChoiceMessageToolCall", + "ChoiceMessageToolCallFunction", +] class ChoiceLogprobs(BaseModel): @@ -17,6 +25,30 @@ class ChoiceLogprobs(BaseModel): """A list of message refusal tokens with log probability information.""" +class ChoiceMessageToolCallFunction(BaseModel): + arguments: str + """ + The arguments to call the function with, as generated by the model in JSON + format. Note that the model does not always generate valid JSON, and may + hallucinate parameters not defined by your function schema. Validate the + arguments in your code before calling your function. + """ + + name: str + """The name of the function to call.""" + + +class ChoiceMessageToolCall(BaseModel): + id: str + """The ID of the tool call.""" + + function: ChoiceMessageToolCallFunction + """The function that the model called.""" + + type: Literal["function"] + """The type of the tool. Currently, only `function` is supported.""" + + class ChoiceMessage(BaseModel): content: Optional[str] = None """The contents of the message.""" @@ -27,14 +59,17 @@ class ChoiceMessage(BaseModel): role: Literal["assistant"] """The role of the author of this message.""" + tool_calls: Optional[List[ChoiceMessageToolCall]] = None + """The tool calls generated by the model, such as function calls.""" + class Choice(BaseModel): - finish_reason: Literal["stop", "length"] + finish_reason: Literal["stop", "length", "tool_calls"] """The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, or `length` if the maximum number of tokens specified in the request - was reached. + was reached, `tool_calls` if the model called a tool. """ index: int @@ -47,17 +82,6 @@ class Choice(BaseModel): """A chat completion message generated by the model.""" -class Usage(BaseModel): - completion_tokens: int - """Number of tokens in the generated completion.""" - - prompt_tokens: int - """Number of tokens in the prompt.""" - - total_tokens: int - """Total number of tokens used in the request (prompt + completion).""" - - class CompletionCreateResponse(BaseModel): id: str """A unique identifier for the chat completion.""" @@ -77,5 +101,5 @@ class CompletionCreateResponse(BaseModel): object: Literal["chat.completion"] """The object type, which is always `chat.completion`.""" - usage: Optional[Usage] = None + usage: Optional[CompletionUsage] = None """Usage statistics for the completion request.""" diff --git a/src/gradientai/types/droplet_backup_policy.py b/src/gradientai/types/droplet_backup_policy.py new file mode 100644 index 00000000..63112e8f --- /dev/null +++ b/src/gradientai/types/droplet_backup_policy.py @@ -0,0 +1,28 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from .._models import BaseModel + +__all__ = ["DropletBackupPolicy"] + + +class DropletBackupPolicy(BaseModel): + hour: Optional[Literal[0, 4, 8, 12, 16, 20]] = None + """The hour of the day that the backup window will start.""" + + plan: Optional[Literal["daily", "weekly"]] = None + """The backup plan used for the Droplet. + + The plan can be either `daily` or `weekly`. + """ + + retention_period_days: Optional[int] = None + """The number of days the backup will be retained.""" + + weekday: Optional[Literal["SUN", "MON", "TUE", "WED", "THU", "FRI", "SAT"]] = None + """The day of the week on which the backup will occur.""" + + window_length_hours: Optional[int] = None + """The length of the backup window starting from `hour`.""" diff --git a/src/gradientai/types/droplet_backup_policy_param.py b/src/gradientai/types/droplet_backup_policy_param.py new file mode 100644 index 00000000..802f057f --- /dev/null +++ b/src/gradientai/types/droplet_backup_policy_param.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, TypedDict + +__all__ = ["DropletBackupPolicyParam"] + + +class DropletBackupPolicyParam(TypedDict, total=False): + hour: Literal[0, 4, 8, 12, 16, 20] + """The hour of the day that the backup window will start.""" + + plan: Literal["daily", "weekly"] + """The backup plan used for the Droplet. + + The plan can be either `daily` or `weekly`. + """ + + weekday: Literal["SUN", "MON", "TUE", "WED", "THU", "FRI", "SAT"] + """The day of the week on which the backup will occur.""" diff --git a/src/gradientai/types/gpu_droplet_create_params.py b/src/gradientai/types/gpu_droplet_create_params.py new file mode 100644 index 00000000..f38661fb --- /dev/null +++ b/src/gradientai/types/gpu_droplet_create_params.py @@ -0,0 +1,213 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union, Optional +from typing_extensions import Required, TypeAlias, TypedDict + +from .droplet_backup_policy_param import DropletBackupPolicyParam + +__all__ = ["GPUDropletCreateParams", "DropletSingleCreate", "DropletMultiCreate"] + + +class DropletSingleCreate(TypedDict, total=False): + image: Required[Union[str, int]] + """ + The image ID of a public or private image or the slug identifier for a public + image. This image will be the base image for your Droplet. Requires `image:read` + scope. + """ + + name: Required[str] + """The human-readable string you wish to use when displaying the Droplet name. + + The name, if set to a domain name managed in the DigitalOcean DNS management + system, will configure a PTR record for the Droplet. The name set during + creation will also determine the hostname for the Droplet in its internal + configuration. + """ + + size: Required[str] + """The slug identifier for the size that you wish to select for this Droplet.""" + + backup_policy: DropletBackupPolicyParam + """An object specifying the backup policy for the Droplet. + + If omitted and `backups` is `true`, the backup plan will default to daily. + """ + + backups: bool + """ + A boolean indicating whether automated backups should be enabled for the + Droplet. + """ + + ipv6: bool + """A boolean indicating whether to enable IPv6 on the Droplet.""" + + monitoring: bool + """A boolean indicating whether to install the DigitalOcean agent for monitoring.""" + + private_networking: bool + """This parameter has been deprecated. + + Use `vpc_uuid` instead to specify a VPC network for the Droplet. If no + `vpc_uuid` is provided, the Droplet will be placed in your account's default VPC + for the region. + """ + + region: str + """The slug identifier for the region that you wish to deploy the Droplet in. + + If the specific datacenter is not not important, a slug prefix (e.g. `nyc`) can + be used to deploy the Droplet in any of the that region's locations (`nyc1`, + `nyc2`, or `nyc3`). If the region is omitted from the create request completely, + the Droplet may deploy in any region. + """ + + ssh_keys: List[Union[str, int]] + """ + An array containing the IDs or fingerprints of the SSH keys that you wish to + embed in the Droplet's root account upon creation. You must add the keys to your + team before they can be embedded on a Droplet. Requires `ssh_key:read` scope. + """ + + tags: Optional[List[str]] + """A flat array of tag names as strings to apply to the Droplet after it is + created. + + Tag names can either be existing or new tags. Requires `tag:create` scope. + """ + + user_data: str + """ + A string containing 'user data' which may be used to configure the Droplet on + first boot, often a 'cloud-config' file or Bash script. It must be plain text + and may not exceed 64 KiB in size. + """ + + volumes: List[str] + """ + An array of IDs for block storage volumes that will be attached to the Droplet + once created. The volumes must not already be attached to an existing Droplet. + Requires `block_storage:read` scpoe. + """ + + vpc_uuid: str + """A string specifying the UUID of the VPC to which the Droplet will be assigned. + + If excluded, the Droplet will be assigned to your account's default VPC for the + region. Requires `vpc:read` scope. + """ + + with_droplet_agent: bool + """ + A boolean indicating whether to install the DigitalOcean agent used for + providing access to the Droplet web console in the control panel. By default, + the agent is installed on new Droplets but installation errors (i.e. OS not + supported) are ignored. To prevent it from being installed, set to `false`. To + make installation errors fatal, explicitly set it to `true`. + """ + + +class DropletMultiCreate(TypedDict, total=False): + image: Required[Union[str, int]] + """ + The image ID of a public or private image or the slug identifier for a public + image. This image will be the base image for your Droplet. Requires `image:read` + scope. + """ + + names: Required[List[str]] + """ + An array of human human-readable strings you wish to use when displaying the + Droplet name. Each name, if set to a domain name managed in the DigitalOcean DNS + management system, will configure a PTR record for the Droplet. Each name set + during creation will also determine the hostname for the Droplet in its internal + configuration. + """ + + size: Required[str] + """The slug identifier for the size that you wish to select for this Droplet.""" + + backup_policy: DropletBackupPolicyParam + """An object specifying the backup policy for the Droplet. + + If omitted and `backups` is `true`, the backup plan will default to daily. + """ + + backups: bool + """ + A boolean indicating whether automated backups should be enabled for the + Droplet. + """ + + ipv6: bool + """A boolean indicating whether to enable IPv6 on the Droplet.""" + + monitoring: bool + """A boolean indicating whether to install the DigitalOcean agent for monitoring.""" + + private_networking: bool + """This parameter has been deprecated. + + Use `vpc_uuid` instead to specify a VPC network for the Droplet. If no + `vpc_uuid` is provided, the Droplet will be placed in your account's default VPC + for the region. + """ + + region: str + """The slug identifier for the region that you wish to deploy the Droplet in. + + If the specific datacenter is not not important, a slug prefix (e.g. `nyc`) can + be used to deploy the Droplet in any of the that region's locations (`nyc1`, + `nyc2`, or `nyc3`). If the region is omitted from the create request completely, + the Droplet may deploy in any region. + """ + + ssh_keys: List[Union[str, int]] + """ + An array containing the IDs or fingerprints of the SSH keys that you wish to + embed in the Droplet's root account upon creation. You must add the keys to your + team before they can be embedded on a Droplet. Requires `ssh_key:read` scope. + """ + + tags: Optional[List[str]] + """A flat array of tag names as strings to apply to the Droplet after it is + created. + + Tag names can either be existing or new tags. Requires `tag:create` scope. + """ + + user_data: str + """ + A string containing 'user data' which may be used to configure the Droplet on + first boot, often a 'cloud-config' file or Bash script. It must be plain text + and may not exceed 64 KiB in size. + """ + + volumes: List[str] + """ + An array of IDs for block storage volumes that will be attached to the Droplet + once created. The volumes must not already be attached to an existing Droplet. + Requires `block_storage:read` scpoe. + """ + + vpc_uuid: str + """A string specifying the UUID of the VPC to which the Droplet will be assigned. + + If excluded, the Droplet will be assigned to your account's default VPC for the + region. Requires `vpc:read` scope. + """ + + with_droplet_agent: bool + """ + A boolean indicating whether to install the DigitalOcean agent used for + providing access to the Droplet web console in the control panel. By default, + the agent is installed on new Droplets but installation errors (i.e. OS not + supported) are ignored. To prevent it from being installed, set to `false`. To + make installation errors fatal, explicitly set it to `true`. + """ + + +GPUDropletCreateParams: TypeAlias = Union[DropletSingleCreate, DropletMultiCreate] diff --git a/src/gradientai/types/gpu_droplet_create_response.py b/src/gradientai/types/gpu_droplet_create_response.py new file mode 100644 index 00000000..72fafb96 --- /dev/null +++ b/src/gradientai/types/gpu_droplet_create_response.py @@ -0,0 +1,39 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Union, Optional +from typing_extensions import TypeAlias + +from .._models import BaseModel +from .shared.droplet import Droplet +from .shared.action_link import ActionLink + +__all__ = [ + "GPUDropletCreateResponse", + "SingleDropletResponse", + "SingleDropletResponseLinks", + "MultipleDropletResponse", + "MultipleDropletResponseLinks", +] + + +class SingleDropletResponseLinks(BaseModel): + actions: Optional[List[ActionLink]] = None + + +class SingleDropletResponse(BaseModel): + droplet: Droplet + + links: SingleDropletResponseLinks + + +class MultipleDropletResponseLinks(BaseModel): + actions: Optional[List[ActionLink]] = None + + +class MultipleDropletResponse(BaseModel): + droplets: List[Droplet] + + links: MultipleDropletResponseLinks + + +GPUDropletCreateResponse: TypeAlias = Union[SingleDropletResponse, MultipleDropletResponse] diff --git a/src/gradientai/types/gpu_droplet_delete_by_tag_params.py b/src/gradientai/types/gpu_droplet_delete_by_tag_params.py new file mode 100644 index 00000000..bc303125 --- /dev/null +++ b/src/gradientai/types/gpu_droplet_delete_by_tag_params.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, TypedDict + +__all__ = ["GPUDropletDeleteByTagParams"] + + +class GPUDropletDeleteByTagParams(TypedDict, total=False): + tag_name: Required[str] + """Specifies Droplets to be deleted by tag.""" diff --git a/src/gradientai/types/gpu_droplet_list_firewalls_params.py b/src/gradientai/types/gpu_droplet_list_firewalls_params.py new file mode 100644 index 00000000..1f0111d8 --- /dev/null +++ b/src/gradientai/types/gpu_droplet_list_firewalls_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["GPUDropletListFirewallsParams"] + + +class GPUDropletListFirewallsParams(TypedDict, total=False): + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" diff --git a/src/gradientai/types/gpu_droplet_list_firewalls_response.py b/src/gradientai/types/gpu_droplet_list_firewalls_response.py new file mode 100644 index 00000000..617cdf98 --- /dev/null +++ b/src/gradientai/types/gpu_droplet_list_firewalls_response.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from .._models import BaseModel +from .shared.page_links import PageLinks +from .gpu_droplets.firewall import Firewall +from .shared.meta_properties import MetaProperties + +__all__ = ["GPUDropletListFirewallsResponse"] + + +class GPUDropletListFirewallsResponse(BaseModel): + meta: MetaProperties + """Information about the response itself.""" + + firewalls: Optional[List[Firewall]] = None + + links: Optional[PageLinks] = None diff --git a/src/gradientai/types/gpu_droplet_list_kernels_params.py b/src/gradientai/types/gpu_droplet_list_kernels_params.py new file mode 100644 index 00000000..7aa73225 --- /dev/null +++ b/src/gradientai/types/gpu_droplet_list_kernels_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["GPUDropletListKernelsParams"] + + +class GPUDropletListKernelsParams(TypedDict, total=False): + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" diff --git a/src/gradientai/types/gpu_droplet_list_kernels_response.py b/src/gradientai/types/gpu_droplet_list_kernels_response.py new file mode 100644 index 00000000..5fa9a355 --- /dev/null +++ b/src/gradientai/types/gpu_droplet_list_kernels_response.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from .._models import BaseModel +from .shared.kernel import Kernel +from .shared.page_links import PageLinks +from .shared.meta_properties import MetaProperties + +__all__ = ["GPUDropletListKernelsResponse"] + + +class GPUDropletListKernelsResponse(BaseModel): + meta: MetaProperties + """Information about the response itself.""" + + kernels: Optional[List[Optional[Kernel]]] = None + + links: Optional[PageLinks] = None diff --git a/src/gradientai/types/gpu_droplet_list_neighbors_response.py b/src/gradientai/types/gpu_droplet_list_neighbors_response.py new file mode 100644 index 00000000..cdfce3e0 --- /dev/null +++ b/src/gradientai/types/gpu_droplet_list_neighbors_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from .._models import BaseModel +from .shared.droplet import Droplet + +__all__ = ["GPUDropletListNeighborsResponse"] + + +class GPUDropletListNeighborsResponse(BaseModel): + droplets: Optional[List[Droplet]] = None diff --git a/src/gradientai/types/gpu_droplet_list_params.py b/src/gradientai/types/gpu_droplet_list_params.py new file mode 100644 index 00000000..bf6eb793 --- /dev/null +++ b/src/gradientai/types/gpu_droplet_list_params.py @@ -0,0 +1,34 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, TypedDict + +__all__ = ["GPUDropletListParams"] + + +class GPUDropletListParams(TypedDict, total=False): + name: str + """Used to filter list response by Droplet name returning only exact matches. + + It is case-insensitive and can not be combined with `tag_name`. + """ + + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" + + tag_name: str + """Used to filter Droplets by a specific tag. + + Can not be combined with `name` or `type`. Requires `tag:read` scope. + """ + + type: Literal["droplets", "gpus"] + """When `type` is set to `gpus`, only GPU Droplets will be returned. + + By default, only non-GPU Droplets are returned. Can not be combined with + `tag_name`. + """ diff --git a/src/gradientai/types/gpu_droplet_list_response.py b/src/gradientai/types/gpu_droplet_list_response.py new file mode 100644 index 00000000..73e1e503 --- /dev/null +++ b/src/gradientai/types/gpu_droplet_list_response.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from .._models import BaseModel +from .shared.droplet import Droplet +from .shared.page_links import PageLinks +from .shared.meta_properties import MetaProperties + +__all__ = ["GPUDropletListResponse"] + + +class GPUDropletListResponse(BaseModel): + meta: MetaProperties + """Information about the response itself.""" + + droplets: Optional[List[Droplet]] = None + + links: Optional[PageLinks] = None diff --git a/src/gradientai/types/gpu_droplet_list_snapshots_params.py b/src/gradientai/types/gpu_droplet_list_snapshots_params.py new file mode 100644 index 00000000..66e65a36 --- /dev/null +++ b/src/gradientai/types/gpu_droplet_list_snapshots_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["GPUDropletListSnapshotsParams"] + + +class GPUDropletListSnapshotsParams(TypedDict, total=False): + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" diff --git a/src/gradientai/types/gpu_droplet_list_snapshots_response.py b/src/gradientai/types/gpu_droplet_list_snapshots_response.py new file mode 100644 index 00000000..4b34d670 --- /dev/null +++ b/src/gradientai/types/gpu_droplet_list_snapshots_response.py @@ -0,0 +1,53 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime +from typing_extensions import Literal + +from .._models import BaseModel +from .shared.page_links import PageLinks +from .shared.meta_properties import MetaProperties + +__all__ = ["GPUDropletListSnapshotsResponse", "Snapshot"] + + +class Snapshot(BaseModel): + id: int + """The unique identifier for the snapshot or backup.""" + + created_at: datetime + """ + A time value given in ISO8601 combined date and time format that represents when + the snapshot was created. + """ + + min_disk_size: int + """The minimum size in GB required for a volume or Droplet to use this snapshot.""" + + name: str + """A human-readable name for the snapshot.""" + + regions: List[str] + """An array of the regions that the snapshot is available in. + + The regions are represented by their identifying slug values. + """ + + size_gigabytes: float + """The billable size of the snapshot in gigabytes.""" + + type: Literal["snapshot", "backup"] + """Describes the kind of image. + + It may be one of `snapshot` or `backup`. This specifies whether an image is a + user-generated Droplet snapshot or automatically created Droplet backup. + """ + + +class GPUDropletListSnapshotsResponse(BaseModel): + meta: MetaProperties + """Information about the response itself.""" + + links: Optional[PageLinks] = None + + snapshots: Optional[List[Snapshot]] = None diff --git a/src/gradientai/types/gpu_droplet_retrieve_response.py b/src/gradientai/types/gpu_droplet_retrieve_response.py new file mode 100644 index 00000000..d8cc0f20 --- /dev/null +++ b/src/gradientai/types/gpu_droplet_retrieve_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from .._models import BaseModel +from .shared.droplet import Droplet + +__all__ = ["GPUDropletRetrieveResponse"] + + +class GPUDropletRetrieveResponse(BaseModel): + droplet: Optional[Droplet] = None diff --git a/src/gradientai/types/gpu_droplets/__init__.py b/src/gradientai/types/gpu_droplets/__init__.py new file mode 100644 index 00000000..c2f1835f --- /dev/null +++ b/src/gradientai/types/gpu_droplets/__init__.py @@ -0,0 +1,104 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .domains import Domains as Domains +from .firewall import Firewall as Firewall +from .floating_ip import FloatingIP as FloatingIP +from .lb_firewall import LbFirewall as LbFirewall +from .glb_settings import GlbSettings as GlbSettings +from .health_check import HealthCheck as HealthCheck +from .domains_param import DomainsParam as DomainsParam +from .load_balancer import LoadBalancer as LoadBalancer +from .autoscale_pool import AutoscalePool as AutoscalePool +from .firewall_param import FirewallParam as FirewallParam +from .forwarding_rule import ForwardingRule as ForwardingRule +from .sticky_sessions import StickySessions as StickySessions +from .size_list_params import SizeListParams as SizeListParams +from .image_list_params import ImageListParams as ImageListParams +from .lb_firewall_param import LbFirewallParam as LbFirewallParam +from .action_list_params import ActionListParams as ActionListParams +from .backup_list_params import BackupListParams as BackupListParams +from .glb_settings_param import GlbSettingsParam as GlbSettingsParam +from .health_check_param import HealthCheckParam as HealthCheckParam +from .size_list_response import SizeListResponse as SizeListResponse +from .volume_list_params import VolumeListParams as VolumeListParams +from .associated_resource import AssociatedResource as AssociatedResource +from .current_utilization import CurrentUtilization as CurrentUtilization +from .image_create_params import ImageCreateParams as ImageCreateParams +from .image_list_response import ImageListResponse as ImageListResponse +from .image_update_params import ImageUpdateParams as ImageUpdateParams +from .action_list_response import ActionListResponse as ActionListResponse +from .backup_list_response import BackupListResponse as BackupListResponse +from .firewall_list_params import FirewallListParams as FirewallListParams +from .snapshot_list_params import SnapshotListParams as SnapshotListParams +from .volume_create_params import VolumeCreateParams as VolumeCreateParams +from .volume_list_response import VolumeListResponse as VolumeListResponse +from .autoscale_list_params import AutoscaleListParams as AutoscaleListParams +from .forwarding_rule_param import ForwardingRuleParam as ForwardingRuleParam +from .image_create_response import ImageCreateResponse as ImageCreateResponse +from .image_update_response import ImageUpdateResponse as ImageUpdateResponse +from .sticky_sessions_param import StickySessionsParam as StickySessionsParam +from .action_initiate_params import ActionInitiateParams as ActionInitiateParams +from .firewall_create_params import FirewallCreateParams as FirewallCreateParams +from .firewall_list_response import FirewallListResponse as FirewallListResponse +from .firewall_update_params import FirewallUpdateParams as FirewallUpdateParams +from .snapshot_list_response import SnapshotListResponse as SnapshotListResponse +from .volume_create_response import VolumeCreateResponse as VolumeCreateResponse +from .autoscale_create_params import AutoscaleCreateParams as AutoscaleCreateParams +from .autoscale_list_response import AutoscaleListResponse as AutoscaleListResponse +from .autoscale_update_params import AutoscaleUpdateParams as AutoscaleUpdateParams +from .floating_ip_list_params import FloatingIPListParams as FloatingIPListParams +from .image_retrieve_response import ImageRetrieveResponse as ImageRetrieveResponse +from .action_initiate_response import ActionInitiateResponse as ActionInitiateResponse +from .action_retrieve_response import ActionRetrieveResponse as ActionRetrieveResponse +from .firewall_create_response import FirewallCreateResponse as FirewallCreateResponse +from .firewall_update_response import FirewallUpdateResponse as FirewallUpdateResponse +from .volume_retrieve_response import VolumeRetrieveResponse as VolumeRetrieveResponse +from .autoscale_create_response import AutoscaleCreateResponse as AutoscaleCreateResponse +from .autoscale_update_response import AutoscaleUpdateResponse as AutoscaleUpdateResponse +from .floating_ip_create_params import FloatingIPCreateParams as FloatingIPCreateParams +from .floating_ip_list_response import FloatingIPListResponse as FloatingIPListResponse +from .load_balancer_list_params import LoadBalancerListParams as LoadBalancerListParams +from .firewall_retrieve_response import FirewallRetrieveResponse as FirewallRetrieveResponse +from .snapshot_retrieve_response import SnapshotRetrieveResponse as SnapshotRetrieveResponse +from .action_bulk_initiate_params import ActionBulkInitiateParams as ActionBulkInitiateParams +from .autoscale_retrieve_response import AutoscaleRetrieveResponse as AutoscaleRetrieveResponse +from .backup_list_policies_params import BackupListPoliciesParams as BackupListPoliciesParams +from .floating_ip_create_response import FloatingIPCreateResponse as FloatingIPCreateResponse +from .load_balancer_create_params import LoadBalancerCreateParams as LoadBalancerCreateParams +from .load_balancer_list_response import LoadBalancerListResponse as LoadBalancerListResponse +from .load_balancer_update_params import LoadBalancerUpdateParams as LoadBalancerUpdateParams +from .autoscale_pool_static_config import AutoscalePoolStaticConfig as AutoscalePoolStaticConfig +from .volume_delete_by_name_params import VolumeDeleteByNameParams as VolumeDeleteByNameParams +from .action_bulk_initiate_response import ActionBulkInitiateResponse as ActionBulkInitiateResponse +from .autoscale_list_history_params import AutoscaleListHistoryParams as AutoscaleListHistoryParams +from .autoscale_list_members_params import AutoscaleListMembersParams as AutoscaleListMembersParams +from .autoscale_pool_dynamic_config import AutoscalePoolDynamicConfig as AutoscalePoolDynamicConfig +from .backup_list_policies_response import BackupListPoliciesResponse as BackupListPoliciesResponse +from .destroyed_associated_resource import DestroyedAssociatedResource as DestroyedAssociatedResource +from .floating_ip_retrieve_response import FloatingIPRetrieveResponse as FloatingIPRetrieveResponse +from .load_balancer_create_response import LoadBalancerCreateResponse as LoadBalancerCreateResponse +from .load_balancer_update_response import LoadBalancerUpdateResponse as LoadBalancerUpdateResponse +from .autoscale_list_history_response import AutoscaleListHistoryResponse as AutoscaleListHistoryResponse +from .autoscale_list_members_response import AutoscaleListMembersResponse as AutoscaleListMembersResponse +from .autoscale_pool_droplet_template import AutoscalePoolDropletTemplate as AutoscalePoolDropletTemplate +from .backup_retrieve_policy_response import BackupRetrievePolicyResponse as BackupRetrievePolicyResponse +from .load_balancer_retrieve_response import LoadBalancerRetrieveResponse as LoadBalancerRetrieveResponse +from .autoscale_pool_static_config_param import AutoscalePoolStaticConfigParam as AutoscalePoolStaticConfigParam +from .autoscale_pool_dynamic_config_param import AutoscalePoolDynamicConfigParam as AutoscalePoolDynamicConfigParam +from .autoscale_pool_droplet_template_param import ( + AutoscalePoolDropletTemplateParam as AutoscalePoolDropletTemplateParam, +) +from .backup_list_supported_policies_response import ( + BackupListSupportedPoliciesResponse as BackupListSupportedPoliciesResponse, +) +from .destroy_with_associated_resource_list_response import ( + DestroyWithAssociatedResourceListResponse as DestroyWithAssociatedResourceListResponse, +) +from .destroy_with_associated_resource_check_status_response import ( + DestroyWithAssociatedResourceCheckStatusResponse as DestroyWithAssociatedResourceCheckStatusResponse, +) +from .destroy_with_associated_resource_delete_selective_params import ( + DestroyWithAssociatedResourceDeleteSelectiveParams as DestroyWithAssociatedResourceDeleteSelectiveParams, +) diff --git a/src/gradientai/types/gpu_droplets/account/__init__.py b/src/gradientai/types/gpu_droplets/account/__init__.py new file mode 100644 index 00000000..4cd64974 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/account/__init__.py @@ -0,0 +1,11 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .key_list_params import KeyListParams as KeyListParams +from .key_create_params import KeyCreateParams as KeyCreateParams +from .key_list_response import KeyListResponse as KeyListResponse +from .key_update_params import KeyUpdateParams as KeyUpdateParams +from .key_create_response import KeyCreateResponse as KeyCreateResponse +from .key_update_response import KeyUpdateResponse as KeyUpdateResponse +from .key_retrieve_response import KeyRetrieveResponse as KeyRetrieveResponse diff --git a/src/gradientai/types/gpu_droplets/account/key_create_params.py b/src/gradientai/types/gpu_droplets/account/key_create_params.py new file mode 100644 index 00000000..4e7c1cef --- /dev/null +++ b/src/gradientai/types/gpu_droplets/account/key_create_params.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, TypedDict + +__all__ = ["KeyCreateParams"] + + +class KeyCreateParams(TypedDict, total=False): + name: Required[str] + """ + A human-readable display name for this key, used to easily identify the SSH keys + when they are displayed. + """ + + public_key: Required[str] + """The entire public key string that was uploaded. + + Embedded into the root user's `authorized_keys` file if you include this key + during Droplet creation. + """ diff --git a/src/gradientai/types/gpu_droplets/account/key_create_response.py b/src/gradientai/types/gpu_droplets/account/key_create_response.py new file mode 100644 index 00000000..9fe566ed --- /dev/null +++ b/src/gradientai/types/gpu_droplets/account/key_create_response.py @@ -0,0 +1,39 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel + +__all__ = ["KeyCreateResponse", "SSHKey"] + + +class SSHKey(BaseModel): + name: str + """ + A human-readable display name for this key, used to easily identify the SSH keys + when they are displayed. + """ + + public_key: str + """The entire public key string that was uploaded. + + Embedded into the root user's `authorized_keys` file if you include this key + during Droplet creation. + """ + + id: Optional[int] = None + """A unique identification number for this key. + + Can be used to embed a specific SSH key into a Droplet. + """ + + fingerprint: Optional[str] = None + """ + A unique identifier that differentiates this key from other keys using a format + that SSH recognizes. The fingerprint is created when the key is added to your + account. + """ + + +class KeyCreateResponse(BaseModel): + ssh_key: Optional[SSHKey] = None diff --git a/src/gradientai/types/gpu_droplets/account/key_list_params.py b/src/gradientai/types/gpu_droplets/account/key_list_params.py new file mode 100644 index 00000000..44a455f3 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/account/key_list_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["KeyListParams"] + + +class KeyListParams(TypedDict, total=False): + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" diff --git a/src/gradientai/types/gpu_droplets/account/key_list_response.py b/src/gradientai/types/gpu_droplets/account/key_list_response.py new file mode 100644 index 00000000..be4c721c --- /dev/null +++ b/src/gradientai/types/gpu_droplets/account/key_list_response.py @@ -0,0 +1,46 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ...._models import BaseModel +from ...shared.page_links import PageLinks +from ...shared.meta_properties import MetaProperties + +__all__ = ["KeyListResponse", "SSHKey"] + + +class SSHKey(BaseModel): + name: str + """ + A human-readable display name for this key, used to easily identify the SSH keys + when they are displayed. + """ + + public_key: str + """The entire public key string that was uploaded. + + Embedded into the root user's `authorized_keys` file if you include this key + during Droplet creation. + """ + + id: Optional[int] = None + """A unique identification number for this key. + + Can be used to embed a specific SSH key into a Droplet. + """ + + fingerprint: Optional[str] = None + """ + A unique identifier that differentiates this key from other keys using a format + that SSH recognizes. The fingerprint is created when the key is added to your + account. + """ + + +class KeyListResponse(BaseModel): + meta: MetaProperties + """Information about the response itself.""" + + links: Optional[PageLinks] = None + + ssh_keys: Optional[List[SSHKey]] = None diff --git a/src/gradientai/types/gpu_droplets/account/key_retrieve_response.py b/src/gradientai/types/gpu_droplets/account/key_retrieve_response.py new file mode 100644 index 00000000..7cd3215e --- /dev/null +++ b/src/gradientai/types/gpu_droplets/account/key_retrieve_response.py @@ -0,0 +1,39 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel + +__all__ = ["KeyRetrieveResponse", "SSHKey"] + + +class SSHKey(BaseModel): + name: str + """ + A human-readable display name for this key, used to easily identify the SSH keys + when they are displayed. + """ + + public_key: str + """The entire public key string that was uploaded. + + Embedded into the root user's `authorized_keys` file if you include this key + during Droplet creation. + """ + + id: Optional[int] = None + """A unique identification number for this key. + + Can be used to embed a specific SSH key into a Droplet. + """ + + fingerprint: Optional[str] = None + """ + A unique identifier that differentiates this key from other keys using a format + that SSH recognizes. The fingerprint is created when the key is added to your + account. + """ + + +class KeyRetrieveResponse(BaseModel): + ssh_key: Optional[SSHKey] = None diff --git a/src/gradientai/types/gpu_droplets/account/key_update_params.py b/src/gradientai/types/gpu_droplets/account/key_update_params.py new file mode 100644 index 00000000..e73d8b7b --- /dev/null +++ b/src/gradientai/types/gpu_droplets/account/key_update_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["KeyUpdateParams"] + + +class KeyUpdateParams(TypedDict, total=False): + name: str + """ + A human-readable display name for this key, used to easily identify the SSH keys + when they are displayed. + """ diff --git a/src/gradientai/types/gpu_droplets/account/key_update_response.py b/src/gradientai/types/gpu_droplets/account/key_update_response.py new file mode 100644 index 00000000..2821e44a --- /dev/null +++ b/src/gradientai/types/gpu_droplets/account/key_update_response.py @@ -0,0 +1,39 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel + +__all__ = ["KeyUpdateResponse", "SSHKey"] + + +class SSHKey(BaseModel): + name: str + """ + A human-readable display name for this key, used to easily identify the SSH keys + when they are displayed. + """ + + public_key: str + """The entire public key string that was uploaded. + + Embedded into the root user's `authorized_keys` file if you include this key + during Droplet creation. + """ + + id: Optional[int] = None + """A unique identification number for this key. + + Can be used to embed a specific SSH key into a Droplet. + """ + + fingerprint: Optional[str] = None + """ + A unique identifier that differentiates this key from other keys using a format + that SSH recognizes. The fingerprint is created when the key is added to your + account. + """ + + +class KeyUpdateResponse(BaseModel): + ssh_key: Optional[SSHKey] = None diff --git a/src/gradientai/types/gpu_droplets/action_bulk_initiate_params.py b/src/gradientai/types/gpu_droplets/action_bulk_initiate_params.py new file mode 100644 index 00000000..a6402096 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/action_bulk_initiate_params.py @@ -0,0 +1,72 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +__all__ = ["ActionBulkInitiateParams", "DropletAction", "DropletActionSnapshot"] + + +class DropletAction(TypedDict, total=False): + type: Required[ + Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ] + ] + """The type of action to initiate for the Droplet.""" + + tag_name: str + """Used to filter Droplets by a specific tag. + + Can not be combined with `name` or `type`. Requires `tag:read` scope. + """ + + +class DropletActionSnapshot(TypedDict, total=False): + type: Required[ + Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ] + ] + """The type of action to initiate for the Droplet.""" + + tag_name: str + """Used to filter Droplets by a specific tag. + + Can not be combined with `name` or `type`. Requires `tag:read` scope. + """ + + name: str + """The name to give the new snapshot of the Droplet.""" + + +ActionBulkInitiateParams: TypeAlias = Union[DropletAction, DropletActionSnapshot] diff --git a/src/gradientai/types/gpu_droplets/action_bulk_initiate_response.py b/src/gradientai/types/gpu_droplets/action_bulk_initiate_response.py new file mode 100644 index 00000000..905860d7 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/action_bulk_initiate_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel +from ..shared.action import Action + +__all__ = ["ActionBulkInitiateResponse"] + + +class ActionBulkInitiateResponse(BaseModel): + actions: Optional[List[Action]] = None diff --git a/src/gradientai/types/gpu_droplets/action_initiate_params.py b/src/gradientai/types/gpu_droplets/action_initiate_params.py new file mode 100644 index 00000000..f0ef6b1e --- /dev/null +++ b/src/gradientai/types/gpu_droplets/action_initiate_params.py @@ -0,0 +1,278 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +from ..droplet_backup_policy_param import DropletBackupPolicyParam + +__all__ = [ + "ActionInitiateParams", + "DropletAction", + "DropletActionEnableBackups", + "DropletActionChangeBackupPolicy", + "DropletActionRestore", + "DropletActionResize", + "DropletActionRebuild", + "DropletActionRename", + "DropletActionChangeKernel", + "DropletActionSnapshot", +] + + +class DropletAction(TypedDict, total=False): + type: Required[ + Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ] + ] + """The type of action to initiate for the Droplet.""" + + +class DropletActionEnableBackups(TypedDict, total=False): + type: Required[ + Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ] + ] + """The type of action to initiate for the Droplet.""" + + backup_policy: DropletBackupPolicyParam + """An object specifying the backup policy for the Droplet. + + If omitted, the backup plan will default to daily. + """ + + +class DropletActionChangeBackupPolicy(TypedDict, total=False): + type: Required[ + Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ] + ] + """The type of action to initiate for the Droplet.""" + + backup_policy: DropletBackupPolicyParam + """An object specifying the backup policy for the Droplet.""" + + +class DropletActionRestore(TypedDict, total=False): + type: Required[ + Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ] + ] + """The type of action to initiate for the Droplet.""" + + image: int + """The ID of a backup of the current Droplet instance to restore from.""" + + +class DropletActionResize(TypedDict, total=False): + type: Required[ + Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ] + ] + """The type of action to initiate for the Droplet.""" + + disk: bool + """When `true`, the Droplet's disk will be resized in addition to its RAM and CPU. + + This is a permanent change and cannot be reversed as a Droplet's disk size + cannot be decreased. + """ + + size: str + """The slug identifier for the size to which you wish to resize the Droplet.""" + + +class DropletActionRebuild(TypedDict, total=False): + type: Required[ + Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ] + ] + """The type of action to initiate for the Droplet.""" + + image: Union[str, int] + """ + The image ID of a public or private image or the slug identifier for a public + image. The Droplet will be rebuilt using this image as its base. + """ + + +class DropletActionRename(TypedDict, total=False): + type: Required[ + Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ] + ] + """The type of action to initiate for the Droplet.""" + + name: str + """The new name for the Droplet.""" + + +class DropletActionChangeKernel(TypedDict, total=False): + type: Required[ + Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ] + ] + """The type of action to initiate for the Droplet.""" + + kernel: int + """A unique number used to identify and reference a specific kernel.""" + + +class DropletActionSnapshot(TypedDict, total=False): + type: Required[ + Literal[ + "enable_backups", + "disable_backups", + "reboot", + "power_cycle", + "shutdown", + "power_off", + "power_on", + "restore", + "password_reset", + "resize", + "rebuild", + "rename", + "change_kernel", + "enable_ipv6", + "snapshot", + ] + ] + """The type of action to initiate for the Droplet.""" + + name: str + """The name to give the new snapshot of the Droplet.""" + + +ActionInitiateParams: TypeAlias = Union[ + DropletAction, + DropletActionEnableBackups, + DropletActionChangeBackupPolicy, + DropletActionRestore, + DropletActionResize, + DropletActionRebuild, + DropletActionRename, + DropletActionChangeKernel, + DropletActionSnapshot, +] diff --git a/src/gradientai/types/gpu_droplets/action_initiate_response.py b/src/gradientai/types/gpu_droplets/action_initiate_response.py new file mode 100644 index 00000000..087781d1 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/action_initiate_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from ..shared.action import Action + +__all__ = ["ActionInitiateResponse"] + + +class ActionInitiateResponse(BaseModel): + action: Optional[Action] = None diff --git a/src/gradientai/types/gpu_droplets/action_list_params.py b/src/gradientai/types/gpu_droplets/action_list_params.py new file mode 100644 index 00000000..dd873288 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/action_list_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["ActionListParams"] + + +class ActionListParams(TypedDict, total=False): + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" diff --git a/src/gradientai/types/gpu_droplets/action_list_response.py b/src/gradientai/types/gpu_droplets/action_list_response.py new file mode 100644 index 00000000..1a20f780 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/action_list_response.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel +from ..shared.action import Action +from ..shared.page_links import PageLinks +from ..shared.meta_properties import MetaProperties + +__all__ = ["ActionListResponse"] + + +class ActionListResponse(BaseModel): + meta: MetaProperties + """Information about the response itself.""" + + actions: Optional[List[Action]] = None + + links: Optional[PageLinks] = None diff --git a/src/gradientai/types/gpu_droplets/action_retrieve_response.py b/src/gradientai/types/gpu_droplets/action_retrieve_response.py new file mode 100644 index 00000000..3856228d --- /dev/null +++ b/src/gradientai/types/gpu_droplets/action_retrieve_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from ..shared.action import Action + +__all__ = ["ActionRetrieveResponse"] + + +class ActionRetrieveResponse(BaseModel): + action: Optional[Action] = None diff --git a/src/gradientai/types/gpu_droplets/associated_resource.py b/src/gradientai/types/gpu_droplets/associated_resource.py new file mode 100644 index 00000000..f72c3d32 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/associated_resource.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["AssociatedResource"] + + +class AssociatedResource(BaseModel): + id: Optional[str] = None + """The unique identifier for the resource associated with the Droplet.""" + + cost: Optional[str] = None + """ + The cost of the resource in USD per month if the resource is retained after the + Droplet is destroyed. + """ + + name: Optional[str] = None + """The name of the resource associated with the Droplet.""" diff --git a/src/gradientai/types/gpu_droplets/autoscale_create_params.py b/src/gradientai/types/gpu_droplets/autoscale_create_params.py new file mode 100644 index 00000000..0f3c05a6 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/autoscale_create_params.py @@ -0,0 +1,28 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Required, TypeAlias, TypedDict + +from .autoscale_pool_static_config_param import AutoscalePoolStaticConfigParam +from .autoscale_pool_dynamic_config_param import AutoscalePoolDynamicConfigParam +from .autoscale_pool_droplet_template_param import AutoscalePoolDropletTemplateParam + +__all__ = ["AutoscaleCreateParams", "Config"] + + +class AutoscaleCreateParams(TypedDict, total=False): + config: Required[Config] + """ + The scaling configuration for an autoscale pool, which is how the pool scales up + and down (either by resource utilization or static configuration). + """ + + droplet_template: Required[AutoscalePoolDropletTemplateParam] + + name: Required[str] + """The human-readable name of the autoscale pool. This field cannot be updated""" + + +Config: TypeAlias = Union[AutoscalePoolStaticConfigParam, AutoscalePoolDynamicConfigParam] diff --git a/src/gradientai/types/gpu_droplets/autoscale_create_response.py b/src/gradientai/types/gpu_droplets/autoscale_create_response.py new file mode 100644 index 00000000..819297e9 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/autoscale_create_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from .autoscale_pool import AutoscalePool + +__all__ = ["AutoscaleCreateResponse"] + + +class AutoscaleCreateResponse(BaseModel): + autoscale_pool: Optional[AutoscalePool] = None diff --git a/src/gradientai/types/gpu_droplets/autoscale_list_history_params.py b/src/gradientai/types/gpu_droplets/autoscale_list_history_params.py new file mode 100644 index 00000000..f837a11e --- /dev/null +++ b/src/gradientai/types/gpu_droplets/autoscale_list_history_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["AutoscaleListHistoryParams"] + + +class AutoscaleListHistoryParams(TypedDict, total=False): + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" diff --git a/src/gradientai/types/gpu_droplets/autoscale_list_history_response.py b/src/gradientai/types/gpu_droplets/autoscale_list_history_response.py new file mode 100644 index 00000000..843f44d8 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/autoscale_list_history_response.py @@ -0,0 +1,48 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime +from typing_extensions import Literal + +from ..._models import BaseModel +from ..shared.page_links import PageLinks +from ..shared.meta_properties import MetaProperties + +__all__ = ["AutoscaleListHistoryResponse", "History"] + + +class History(BaseModel): + created_at: datetime + """ + The creation time of the history event in ISO8601 combined date and time format. + """ + + current_instance_count: int + """The current number of Droplets in the autoscale pool.""" + + desired_instance_count: int + """The target number of Droplets for the autoscale pool after the scaling event.""" + + history_event_id: str + """The unique identifier of the history event.""" + + reason: Literal["CONFIGURATION_CHANGE", "SCALE_UP", "SCALE_DOWN"] + """The reason for the scaling event.""" + + status: Literal["in_progress", "success", "error"] + """The status of the scaling event.""" + + updated_at: datetime + """ + The last updated time of the history event in ISO8601 combined date and time + format. + """ + + +class AutoscaleListHistoryResponse(BaseModel): + meta: MetaProperties + """Information about the response itself.""" + + history: Optional[List[History]] = None + + links: Optional[PageLinks] = None diff --git a/src/gradientai/types/gpu_droplets/autoscale_list_members_params.py b/src/gradientai/types/gpu_droplets/autoscale_list_members_params.py new file mode 100644 index 00000000..5a7f738d --- /dev/null +++ b/src/gradientai/types/gpu_droplets/autoscale_list_members_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["AutoscaleListMembersParams"] + + +class AutoscaleListMembersParams(TypedDict, total=False): + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" diff --git a/src/gradientai/types/gpu_droplets/autoscale_list_members_response.py b/src/gradientai/types/gpu_droplets/autoscale_list_members_response.py new file mode 100644 index 00000000..337ac4e3 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/autoscale_list_members_response.py @@ -0,0 +1,47 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime +from typing_extensions import Literal + +from ..._models import BaseModel +from ..shared.page_links import PageLinks +from ..shared.meta_properties import MetaProperties + +__all__ = ["AutoscaleListMembersResponse", "Droplet", "DropletCurrentUtilization"] + + +class DropletCurrentUtilization(BaseModel): + cpu: Optional[float] = None + """The CPU utilization average of the individual Droplet.""" + + memory: Optional[float] = None + """The memory utilization average of the individual Droplet.""" + + +class Droplet(BaseModel): + created_at: datetime + """The creation time of the Droplet in ISO8601 combined date and time format.""" + + current_utilization: DropletCurrentUtilization + + droplet_id: int + """The unique identifier of the Droplet.""" + + health_status: str + """The health status of the Droplet.""" + + status: Literal["provisioning", "active", "deleting", "off"] + """The power status of the Droplet.""" + + updated_at: datetime + """The last updated time of the Droplet in ISO8601 combined date and time format.""" + + +class AutoscaleListMembersResponse(BaseModel): + meta: MetaProperties + """Information about the response itself.""" + + droplets: Optional[List[Droplet]] = None + + links: Optional[PageLinks] = None diff --git a/src/gradientai/types/gpu_droplets/autoscale_list_params.py b/src/gradientai/types/gpu_droplets/autoscale_list_params.py new file mode 100644 index 00000000..3a35e616 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/autoscale_list_params.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["AutoscaleListParams"] + + +class AutoscaleListParams(TypedDict, total=False): + name: str + """The name of the autoscale pool""" + + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" diff --git a/src/gradientai/types/gpu_droplets/autoscale_list_response.py b/src/gradientai/types/gpu_droplets/autoscale_list_response.py new file mode 100644 index 00000000..807cb17f --- /dev/null +++ b/src/gradientai/types/gpu_droplets/autoscale_list_response.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel +from .autoscale_pool import AutoscalePool +from ..shared.page_links import PageLinks +from ..shared.meta_properties import MetaProperties + +__all__ = ["AutoscaleListResponse"] + + +class AutoscaleListResponse(BaseModel): + meta: MetaProperties + """Information about the response itself.""" + + autoscale_pools: Optional[List[AutoscalePool]] = None + + links: Optional[PageLinks] = None diff --git a/src/gradientai/types/gpu_droplets/autoscale_pool.py b/src/gradientai/types/gpu_droplets/autoscale_pool.py new file mode 100644 index 00000000..2964319e --- /dev/null +++ b/src/gradientai/types/gpu_droplets/autoscale_pool.py @@ -0,0 +1,54 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union, Optional +from datetime import datetime +from typing_extensions import Literal, TypeAlias + +from ..._models import BaseModel +from .current_utilization import CurrentUtilization +from .autoscale_pool_static_config import AutoscalePoolStaticConfig +from .autoscale_pool_dynamic_config import AutoscalePoolDynamicConfig +from .autoscale_pool_droplet_template import AutoscalePoolDropletTemplate + +__all__ = ["AutoscalePool", "Config"] + +Config: TypeAlias = Union[AutoscalePoolStaticConfig, AutoscalePoolDynamicConfig] + + +class AutoscalePool(BaseModel): + id: str + """A unique identifier for each autoscale pool instance. + + This is automatically generated upon autoscale pool creation. + """ + + active_resources_count: int + """The number of active Droplets in the autoscale pool.""" + + config: Config + """ + The scaling configuration for an autoscale pool, which is how the pool scales up + and down (either by resource utilization or static configuration). + """ + + created_at: datetime + """ + A time value given in ISO8601 combined date and time format that represents when + the autoscale pool was created. + """ + + droplet_template: AutoscalePoolDropletTemplate + + name: str + """The human-readable name set for the autoscale pool.""" + + status: Literal["active", "deleting", "error"] + """The current status of the autoscale pool.""" + + updated_at: datetime + """ + A time value given in ISO8601 combined date and time format that represents when + the autoscale pool was last updated. + """ + + current_utilization: Optional[CurrentUtilization] = None diff --git a/src/gradientai/types/gpu_droplets/autoscale_pool_droplet_template.py b/src/gradientai/types/gpu_droplets/autoscale_pool_droplet_template.py new file mode 100644 index 00000000..2ab2036b --- /dev/null +++ b/src/gradientai/types/gpu_droplets/autoscale_pool_droplet_template.py @@ -0,0 +1,69 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["AutoscalePoolDropletTemplate"] + + +class AutoscalePoolDropletTemplate(BaseModel): + image: str + """The Droplet image to be used for all Droplets in the autoscale pool. + + You may specify the slug or the image ID. + """ + + region: Literal[ + "nyc1", "nyc2", "nyc3", "ams2", "ams3", "sfo1", "sfo2", "sfo3", "sgp1", "lon1", "fra1", "tor1", "blr1", "syd1" + ] + """The datacenter in which all of the Droplets will be created.""" + + size: str + """The Droplet size to be used for all Droplets in the autoscale pool.""" + + ssh_keys: List[str] + """The SSH keys to be installed on the Droplets in the autoscale pool. + + You can either specify the key ID or the fingerprint. Requires `ssh_key:read` + scope. + """ + + ipv6: Optional[bool] = None + """Assigns a unique IPv6 address to each of the Droplets in the autoscale pool.""" + + name: Optional[str] = None + """The name(s) to be applied to all Droplets in the autoscale pool.""" + + project_id: Optional[str] = None + """ + The project that the Droplets in the autoscale pool will belong to. Requires + `project:read` scope. + """ + + tags: Optional[List[str]] = None + """ + The tags to apply to each of the Droplets in the autoscale pool. Requires + `tag:read` scope. + """ + + user_data: Optional[str] = None + """ + A string containing user data that cloud-init consumes to configure a Droplet on + first boot. User data is often a cloud-config file or Bash script. It must be + plain text and may not exceed 64 KiB in size. + """ + + vpc_uuid: Optional[str] = None + """The VPC where the Droplets in the autoscale pool will be created. + + The VPC must be in the region where you want to create the Droplets. Requires + `vpc:read` scope. + """ + + with_droplet_agent: Optional[bool] = None + """Installs the Droplet agent. + + This must be set to true to monitor Droplets for resource utilization scaling. + """ diff --git a/src/gradientai/types/gpu_droplets/autoscale_pool_droplet_template_param.py b/src/gradientai/types/gpu_droplets/autoscale_pool_droplet_template_param.py new file mode 100644 index 00000000..c491ed55 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/autoscale_pool_droplet_template_param.py @@ -0,0 +1,84 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["AutoscalePoolDropletTemplateParam"] + + +class AutoscalePoolDropletTemplateParam(TypedDict, total=False): + image: Required[str] + """The Droplet image to be used for all Droplets in the autoscale pool. + + You may specify the slug or the image ID. + """ + + region: Required[ + Literal[ + "nyc1", + "nyc2", + "nyc3", + "ams2", + "ams3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "lon1", + "fra1", + "tor1", + "blr1", + "syd1", + ] + ] + """The datacenter in which all of the Droplets will be created.""" + + size: Required[str] + """The Droplet size to be used for all Droplets in the autoscale pool.""" + + ssh_keys: Required[List[str]] + """The SSH keys to be installed on the Droplets in the autoscale pool. + + You can either specify the key ID or the fingerprint. Requires `ssh_key:read` + scope. + """ + + ipv6: bool + """Assigns a unique IPv6 address to each of the Droplets in the autoscale pool.""" + + name: str + """The name(s) to be applied to all Droplets in the autoscale pool.""" + + project_id: str + """ + The project that the Droplets in the autoscale pool will belong to. Requires + `project:read` scope. + """ + + tags: List[str] + """ + The tags to apply to each of the Droplets in the autoscale pool. Requires + `tag:read` scope. + """ + + user_data: str + """ + A string containing user data that cloud-init consumes to configure a Droplet on + first boot. User data is often a cloud-config file or Bash script. It must be + plain text and may not exceed 64 KiB in size. + """ + + vpc_uuid: str + """The VPC where the Droplets in the autoscale pool will be created. + + The VPC must be in the region where you want to create the Droplets. Requires + `vpc:read` scope. + """ + + with_droplet_agent: bool + """Installs the Droplet agent. + + This must be set to true to monitor Droplets for resource utilization scaling. + """ diff --git a/src/gradientai/types/gpu_droplets/autoscale_pool_dynamic_config.py b/src/gradientai/types/gpu_droplets/autoscale_pool_dynamic_config.py new file mode 100644 index 00000000..10f9781b --- /dev/null +++ b/src/gradientai/types/gpu_droplets/autoscale_pool_dynamic_config.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["AutoscalePoolDynamicConfig"] + + +class AutoscalePoolDynamicConfig(BaseModel): + max_instances: int + """The maximum number of Droplets in an autoscale pool.""" + + min_instances: int + """The minimum number of Droplets in an autoscale pool.""" + + cooldown_minutes: Optional[int] = None + """The number of minutes to wait between scaling events in an autoscale pool. + + Defaults to 10 minutes. + """ + + target_cpu_utilization: Optional[float] = None + """Target CPU utilization as a decimal.""" + + target_memory_utilization: Optional[float] = None + """Target memory utilization as a decimal.""" diff --git a/src/gradientai/types/gpu_droplets/autoscale_pool_dynamic_config_param.py b/src/gradientai/types/gpu_droplets/autoscale_pool_dynamic_config_param.py new file mode 100644 index 00000000..af06e73a --- /dev/null +++ b/src/gradientai/types/gpu_droplets/autoscale_pool_dynamic_config_param.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, TypedDict + +__all__ = ["AutoscalePoolDynamicConfigParam"] + + +class AutoscalePoolDynamicConfigParam(TypedDict, total=False): + max_instances: Required[int] + """The maximum number of Droplets in an autoscale pool.""" + + min_instances: Required[int] + """The minimum number of Droplets in an autoscale pool.""" + + cooldown_minutes: int + """The number of minutes to wait between scaling events in an autoscale pool. + + Defaults to 10 minutes. + """ + + target_cpu_utilization: float + """Target CPU utilization as a decimal.""" + + target_memory_utilization: float + """Target memory utilization as a decimal.""" diff --git a/src/gradientai/types/gpu_droplets/autoscale_pool_static_config.py b/src/gradientai/types/gpu_droplets/autoscale_pool_static_config.py new file mode 100644 index 00000000..cc891007 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/autoscale_pool_static_config.py @@ -0,0 +1,10 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from ..._models import BaseModel + +__all__ = ["AutoscalePoolStaticConfig"] + + +class AutoscalePoolStaticConfig(BaseModel): + target_number_instances: int + """Fixed number of instances in an autoscale pool.""" diff --git a/src/gradientai/types/gpu_droplets/autoscale_pool_static_config_param.py b/src/gradientai/types/gpu_droplets/autoscale_pool_static_config_param.py new file mode 100644 index 00000000..a7510d22 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/autoscale_pool_static_config_param.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, TypedDict + +__all__ = ["AutoscalePoolStaticConfigParam"] + + +class AutoscalePoolStaticConfigParam(TypedDict, total=False): + target_number_instances: Required[int] + """Fixed number of instances in an autoscale pool.""" diff --git a/src/gradientai/types/gpu_droplets/autoscale_retrieve_response.py b/src/gradientai/types/gpu_droplets/autoscale_retrieve_response.py new file mode 100644 index 00000000..f383ed03 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/autoscale_retrieve_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from .autoscale_pool import AutoscalePool + +__all__ = ["AutoscaleRetrieveResponse"] + + +class AutoscaleRetrieveResponse(BaseModel): + autoscale_pool: Optional[AutoscalePool] = None diff --git a/src/gradientai/types/gpu_droplets/autoscale_update_params.py b/src/gradientai/types/gpu_droplets/autoscale_update_params.py new file mode 100644 index 00000000..1b96af1e --- /dev/null +++ b/src/gradientai/types/gpu_droplets/autoscale_update_params.py @@ -0,0 +1,28 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Required, TypeAlias, TypedDict + +from .autoscale_pool_static_config_param import AutoscalePoolStaticConfigParam +from .autoscale_pool_dynamic_config_param import AutoscalePoolDynamicConfigParam +from .autoscale_pool_droplet_template_param import AutoscalePoolDropletTemplateParam + +__all__ = ["AutoscaleUpdateParams", "Config"] + + +class AutoscaleUpdateParams(TypedDict, total=False): + config: Required[Config] + """ + The scaling configuration for an autoscale pool, which is how the pool scales up + and down (either by resource utilization or static configuration). + """ + + droplet_template: Required[AutoscalePoolDropletTemplateParam] + + name: Required[str] + """The human-readable name of the autoscale pool. This field cannot be updated""" + + +Config: TypeAlias = Union[AutoscalePoolStaticConfigParam, AutoscalePoolDynamicConfigParam] diff --git a/src/gradientai/types/gpu_droplets/autoscale_update_response.py b/src/gradientai/types/gpu_droplets/autoscale_update_response.py new file mode 100644 index 00000000..09dde2a4 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/autoscale_update_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from .autoscale_pool import AutoscalePool + +__all__ = ["AutoscaleUpdateResponse"] + + +class AutoscaleUpdateResponse(BaseModel): + autoscale_pool: Optional[AutoscalePool] = None diff --git a/src/gradientai/types/gpu_droplets/backup_list_params.py b/src/gradientai/types/gpu_droplets/backup_list_params.py new file mode 100644 index 00000000..66fe92aa --- /dev/null +++ b/src/gradientai/types/gpu_droplets/backup_list_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["BackupListParams"] + + +class BackupListParams(TypedDict, total=False): + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" diff --git a/src/gradientai/types/gpu_droplets/backup_list_policies_params.py b/src/gradientai/types/gpu_droplets/backup_list_policies_params.py new file mode 100644 index 00000000..0cdb0ddb --- /dev/null +++ b/src/gradientai/types/gpu_droplets/backup_list_policies_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["BackupListPoliciesParams"] + + +class BackupListPoliciesParams(TypedDict, total=False): + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" diff --git a/src/gradientai/types/gpu_droplets/backup_list_policies_response.py b/src/gradientai/types/gpu_droplets/backup_list_policies_response.py new file mode 100644 index 00000000..73aa9458 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/backup_list_policies_response.py @@ -0,0 +1,41 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, Optional + +from ..._models import BaseModel +from ..shared.page_links import PageLinks +from ..droplet_backup_policy import DropletBackupPolicy +from ..shared.meta_properties import MetaProperties +from ..shared.droplet_next_backup_window import DropletNextBackupWindow + +__all__ = ["BackupListPoliciesResponse", "Policies"] + + +class Policies(BaseModel): + backup_enabled: Optional[bool] = None + """A boolean value indicating whether backups are enabled for the Droplet.""" + + backup_policy: Optional[DropletBackupPolicy] = None + """An object specifying the backup policy for the Droplet.""" + + droplet_id: Optional[int] = None + """The unique identifier for the Droplet.""" + + next_backup_window: Optional[DropletNextBackupWindow] = None + """ + An object containing keys with the start and end times of the window during + which the backup will occur. + """ + + +class BackupListPoliciesResponse(BaseModel): + meta: MetaProperties + """Information about the response itself.""" + + links: Optional[PageLinks] = None + + policies: Optional[Dict[str, Policies]] = None + """ + A map where the keys are the Droplet IDs and the values are objects containing + the backup policy information for each Droplet. + """ diff --git a/src/gradientai/types/gpu_droplets/backup_list_response.py b/src/gradientai/types/gpu_droplets/backup_list_response.py new file mode 100644 index 00000000..c96d573a --- /dev/null +++ b/src/gradientai/types/gpu_droplets/backup_list_response.py @@ -0,0 +1,53 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime +from typing_extensions import Literal + +from ..._models import BaseModel +from ..shared.page_links import PageLinks +from ..shared.meta_properties import MetaProperties + +__all__ = ["BackupListResponse", "Backup"] + + +class Backup(BaseModel): + id: int + """The unique identifier for the snapshot or backup.""" + + created_at: datetime + """ + A time value given in ISO8601 combined date and time format that represents when + the snapshot was created. + """ + + min_disk_size: int + """The minimum size in GB required for a volume or Droplet to use this snapshot.""" + + name: str + """A human-readable name for the snapshot.""" + + regions: List[str] + """An array of the regions that the snapshot is available in. + + The regions are represented by their identifying slug values. + """ + + size_gigabytes: float + """The billable size of the snapshot in gigabytes.""" + + type: Literal["snapshot", "backup"] + """Describes the kind of image. + + It may be one of `snapshot` or `backup`. This specifies whether an image is a + user-generated Droplet snapshot or automatically created Droplet backup. + """ + + +class BackupListResponse(BaseModel): + meta: MetaProperties + """Information about the response itself.""" + + backups: Optional[List[Backup]] = None + + links: Optional[PageLinks] = None diff --git a/src/gradientai/types/gpu_droplets/backup_list_supported_policies_response.py b/src/gradientai/types/gpu_droplets/backup_list_supported_policies_response.py new file mode 100644 index 00000000..219cfc34 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/backup_list_supported_policies_response.py @@ -0,0 +1,28 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel + +__all__ = ["BackupListSupportedPoliciesResponse", "SupportedPolicy"] + + +class SupportedPolicy(BaseModel): + name: Optional[str] = None + """The name of the Droplet backup plan.""" + + possible_days: Optional[List[str]] = None + """The day of the week the backup will occur.""" + + possible_window_starts: Optional[List[int]] = None + """An array of integers representing the hours of the day that a backup can start.""" + + retention_period_days: Optional[int] = None + """The number of days that a backup will be kept.""" + + window_length_hours: Optional[int] = None + """The number of hours that a backup window is open.""" + + +class BackupListSupportedPoliciesResponse(BaseModel): + supported_policies: Optional[List[SupportedPolicy]] = None diff --git a/src/gradientai/types/gpu_droplets/backup_retrieve_policy_response.py b/src/gradientai/types/gpu_droplets/backup_retrieve_policy_response.py new file mode 100644 index 00000000..38288dea --- /dev/null +++ b/src/gradientai/types/gpu_droplets/backup_retrieve_policy_response.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from ..droplet_backup_policy import DropletBackupPolicy +from ..shared.droplet_next_backup_window import DropletNextBackupWindow + +__all__ = ["BackupRetrievePolicyResponse", "Policy"] + + +class Policy(BaseModel): + backup_enabled: Optional[bool] = None + """A boolean value indicating whether backups are enabled for the Droplet.""" + + backup_policy: Optional[DropletBackupPolicy] = None + """An object specifying the backup policy for the Droplet.""" + + droplet_id: Optional[int] = None + """The unique identifier for the Droplet.""" + + next_backup_window: Optional[DropletNextBackupWindow] = None + """ + An object containing keys with the start and end times of the window during + which the backup will occur. + """ + + +class BackupRetrievePolicyResponse(BaseModel): + policy: Optional[Policy] = None diff --git a/src/gradientai/types/gpu_droplets/current_utilization.py b/src/gradientai/types/gpu_droplets/current_utilization.py new file mode 100644 index 00000000..f2cb0b6c --- /dev/null +++ b/src/gradientai/types/gpu_droplets/current_utilization.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["CurrentUtilization"] + + +class CurrentUtilization(BaseModel): + cpu: Optional[float] = None + """The average CPU utilization of the autoscale pool.""" + + memory: Optional[float] = None + """The average memory utilization of the autoscale pool.""" diff --git a/src/gradientai/types/gpu_droplets/destroy_with_associated_resource_check_status_response.py b/src/gradientai/types/gpu_droplets/destroy_with_associated_resource_check_status_response.py new file mode 100644 index 00000000..f2f2ff67 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/destroy_with_associated_resource_check_status_response.py @@ -0,0 +1,41 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime + +from ..._models import BaseModel +from .destroyed_associated_resource import DestroyedAssociatedResource + +__all__ = ["DestroyWithAssociatedResourceCheckStatusResponse", "Resources"] + + +class Resources(BaseModel): + floating_ips: Optional[List[DestroyedAssociatedResource]] = None + + reserved_ips: Optional[List[DestroyedAssociatedResource]] = None + + snapshots: Optional[List[DestroyedAssociatedResource]] = None + + volume_snapshots: Optional[List[DestroyedAssociatedResource]] = None + + volumes: Optional[List[DestroyedAssociatedResource]] = None + + +class DestroyWithAssociatedResourceCheckStatusResponse(BaseModel): + completed_at: Optional[datetime] = None + """ + A time value given in ISO8601 combined date and time format indicating when the + requested action was completed. + """ + + droplet: Optional[DestroyedAssociatedResource] = None + """An object containing information about a resource scheduled for deletion.""" + + failures: Optional[int] = None + """A count of the associated resources that failed to be destroyed, if any.""" + + resources: Optional[Resources] = None + """ + An object containing additional information about resource related to a Droplet + requested to be destroyed. + """ diff --git a/src/gradientai/types/gpu_droplets/destroy_with_associated_resource_delete_selective_params.py b/src/gradientai/types/gpu_droplets/destroy_with_associated_resource_delete_selective_params.py new file mode 100644 index 00000000..f4037b6b --- /dev/null +++ b/src/gradientai/types/gpu_droplets/destroy_with_associated_resource_delete_selective_params.py @@ -0,0 +1,34 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import TypedDict + +__all__ = ["DestroyWithAssociatedResourceDeleteSelectiveParams"] + + +class DestroyWithAssociatedResourceDeleteSelectiveParams(TypedDict, total=False): + floating_ips: List[str] + """ + An array of unique identifiers for the floating IPs to be scheduled for + deletion. + """ + + reserved_ips: List[str] + """ + An array of unique identifiers for the reserved IPs to be scheduled for + deletion. + """ + + snapshots: List[str] + """An array of unique identifiers for the snapshots to be scheduled for deletion.""" + + volume_snapshots: List[str] + """ + An array of unique identifiers for the volume snapshots to be scheduled for + deletion. + """ + + volumes: List[str] + """An array of unique identifiers for the volumes to be scheduled for deletion.""" diff --git a/src/gradientai/types/gpu_droplets/destroy_with_associated_resource_list_response.py b/src/gradientai/types/gpu_droplets/destroy_with_associated_resource_list_response.py new file mode 100644 index 00000000..ef4c6c99 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/destroy_with_associated_resource_list_response.py @@ -0,0 +1,37 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel +from .associated_resource import AssociatedResource + +__all__ = ["DestroyWithAssociatedResourceListResponse"] + + +class DestroyWithAssociatedResourceListResponse(BaseModel): + floating_ips: Optional[List[AssociatedResource]] = None + """ + Floating IPs that are associated with this Droplet. Requires `reserved_ip:read` + scope. + """ + + reserved_ips: Optional[List[AssociatedResource]] = None + """ + Reserved IPs that are associated with this Droplet. Requires `reserved_ip:read` + scope. + """ + + snapshots: Optional[List[AssociatedResource]] = None + """Snapshots that are associated with this Droplet. Requires `image:read` scope.""" + + volume_snapshots: Optional[List[AssociatedResource]] = None + """ + Volume Snapshots that are associated with this Droplet. Requires + `block_storage_snapshot:read` scope. + """ + + volumes: Optional[List[AssociatedResource]] = None + """ + Volumes that are associated with this Droplet. Requires `block_storage:read` + scope. + """ diff --git a/src/gradientai/types/gpu_droplets/destroyed_associated_resource.py b/src/gradientai/types/gpu_droplets/destroyed_associated_resource.py new file mode 100644 index 00000000..358c14e9 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/destroyed_associated_resource.py @@ -0,0 +1,28 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from datetime import datetime + +from ..._models import BaseModel + +__all__ = ["DestroyedAssociatedResource"] + + +class DestroyedAssociatedResource(BaseModel): + id: Optional[str] = None + """The unique identifier for the resource scheduled for deletion.""" + + destroyed_at: Optional[datetime] = None + """ + A time value given in ISO8601 combined date and time format indicating when the + resource was destroyed if the request was successful. + """ + + error_message: Optional[str] = None + """ + A string indicating that the resource was not successfully destroyed and + providing additional information. + """ + + name: Optional[str] = None + """The name of the resource scheduled for deletion.""" diff --git a/src/gradientai/types/gpu_droplets/domains.py b/src/gradientai/types/gpu_droplets/domains.py new file mode 100644 index 00000000..6a9400f9 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/domains.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["Domains"] + + +class Domains(BaseModel): + certificate_id: Optional[str] = None + """The ID of the TLS certificate used for SSL termination.""" + + is_managed: Optional[bool] = None + """A boolean value indicating if the domain is already managed by DigitalOcean. + + If true, all A and AAAA records required to enable Global load balancers will be + automatically added. + """ + + name: Optional[str] = None + """FQDN to associate with a Global load balancer.""" diff --git a/src/gradientai/types/gpu_droplets/domains_param.py b/src/gradientai/types/gpu_droplets/domains_param.py new file mode 100644 index 00000000..d2d21faf --- /dev/null +++ b/src/gradientai/types/gpu_droplets/domains_param.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["DomainsParam"] + + +class DomainsParam(TypedDict, total=False): + certificate_id: str + """The ID of the TLS certificate used for SSL termination.""" + + is_managed: bool + """A boolean value indicating if the domain is already managed by DigitalOcean. + + If true, all A and AAAA records required to enable Global load balancers will be + automatically added. + """ + + name: str + """FQDN to associate with a Global load balancer.""" diff --git a/src/gradientai/types/gpu_droplets/firewall.py b/src/gradientai/types/gpu_droplets/firewall.py new file mode 100644 index 00000000..0eb352a1 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/firewall.py @@ -0,0 +1,98 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime +from typing_extensions import Literal + +from ..._models import BaseModel +from ..shared.firewall_rule_target import FirewallRuleTarget + +__all__ = ["Firewall", "InboundRule", "OutboundRule", "PendingChange"] + + +class InboundRule(BaseModel): + ports: str + """ + The ports on which traffic will be allowed specified as a string containing a + single port, a range (e.g. "8000-9000"), or "0" when all ports are open for a + protocol. For ICMP rules this parameter will always return "0". + """ + + protocol: Literal["tcp", "udp", "icmp"] + """The type of traffic to be allowed. This may be one of `tcp`, `udp`, or `icmp`.""" + + sources: FirewallRuleTarget + """An object specifying locations from which inbound traffic will be accepted.""" + + +class OutboundRule(BaseModel): + destinations: FirewallRuleTarget + """An object specifying locations to which outbound traffic that will be allowed.""" + + ports: str + """ + The ports on which traffic will be allowed specified as a string containing a + single port, a range (e.g. "8000-9000"), or "0" when all ports are open for a + protocol. For ICMP rules this parameter will always return "0". + """ + + protocol: Literal["tcp", "udp", "icmp"] + """The type of traffic to be allowed. This may be one of `tcp`, `udp`, or `icmp`.""" + + +class PendingChange(BaseModel): + droplet_id: Optional[int] = None + + removing: Optional[bool] = None + + status: Optional[str] = None + + +class Firewall(BaseModel): + id: Optional[str] = None + """A unique ID that can be used to identify and reference a firewall.""" + + created_at: Optional[datetime] = None + """ + A time value given in ISO8601 combined date and time format that represents when + the firewall was created. + """ + + droplet_ids: Optional[List[int]] = None + """An array containing the IDs of the Droplets assigned to the firewall. + + Requires `droplet:read` scope. + """ + + inbound_rules: Optional[List[InboundRule]] = None + + name: Optional[str] = None + """A human-readable name for a firewall. + + The name must begin with an alphanumeric character. Subsequent characters must + either be alphanumeric characters, a period (.), or a dash (-). + """ + + outbound_rules: Optional[List[OutboundRule]] = None + + pending_changes: Optional[List[PendingChange]] = None + """ + An array of objects each containing the fields "droplet_id", "removing", and + "status". It is provided to detail exactly which Droplets are having their + security policies updated. When empty, all changes have been successfully + applied. + """ + + status: Optional[Literal["waiting", "succeeded", "failed"]] = None + """A status string indicating the current state of the firewall. + + This can be "waiting", "succeeded", or "failed". + """ + + tags: Optional[List[str]] = None + """A flat array of tag names as strings to be applied to the resource. + + Tag names must exist in order to be referenced in a request. + + Requires `tag:create` and `tag:read` scopes. + """ diff --git a/src/gradientai/types/gpu_droplets/firewall_create_params.py b/src/gradientai/types/gpu_droplets/firewall_create_params.py new file mode 100644 index 00000000..b10ae98e --- /dev/null +++ b/src/gradientai/types/gpu_droplets/firewall_create_params.py @@ -0,0 +1,17 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +from .firewall_param import FirewallParam + +__all__ = ["FirewallCreateParams", "Body"] + + +class FirewallCreateParams(TypedDict, total=False): + body: Body + + +class Body(FirewallParam, total=False): + pass diff --git a/src/gradientai/types/gpu_droplets/firewall_create_response.py b/src/gradientai/types/gpu_droplets/firewall_create_response.py new file mode 100644 index 00000000..be30113a --- /dev/null +++ b/src/gradientai/types/gpu_droplets/firewall_create_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from .firewall import Firewall +from ..._models import BaseModel + +__all__ = ["FirewallCreateResponse"] + + +class FirewallCreateResponse(BaseModel): + firewall: Optional[Firewall] = None diff --git a/src/gradientai/types/gpu_droplets/firewall_list_params.py b/src/gradientai/types/gpu_droplets/firewall_list_params.py new file mode 100644 index 00000000..155cc480 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/firewall_list_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["FirewallListParams"] + + +class FirewallListParams(TypedDict, total=False): + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" diff --git a/src/gradientai/types/gpu_droplets/firewall_list_response.py b/src/gradientai/types/gpu_droplets/firewall_list_response.py new file mode 100644 index 00000000..ec0af688 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/firewall_list_response.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from .firewall import Firewall +from ..._models import BaseModel +from ..shared.page_links import PageLinks +from ..shared.meta_properties import MetaProperties + +__all__ = ["FirewallListResponse"] + + +class FirewallListResponse(BaseModel): + meta: MetaProperties + """Information about the response itself.""" + + firewalls: Optional[List[Firewall]] = None + + links: Optional[PageLinks] = None diff --git a/src/gradientai/types/gpu_droplets/firewall_param.py b/src/gradientai/types/gpu_droplets/firewall_param.py new file mode 100644 index 00000000..1be9cf6a --- /dev/null +++ b/src/gradientai/types/gpu_droplets/firewall_param.py @@ -0,0 +1,67 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Iterable, Optional +from typing_extensions import Literal, Required, TypedDict + +from ..shared_params.firewall_rule_target import FirewallRuleTarget + +__all__ = ["FirewallParam", "InboundRule", "OutboundRule"] + + +class InboundRule(TypedDict, total=False): + ports: Required[str] + """ + The ports on which traffic will be allowed specified as a string containing a + single port, a range (e.g. "8000-9000"), or "0" when all ports are open for a + protocol. For ICMP rules this parameter will always return "0". + """ + + protocol: Required[Literal["tcp", "udp", "icmp"]] + """The type of traffic to be allowed. This may be one of `tcp`, `udp`, or `icmp`.""" + + sources: Required[FirewallRuleTarget] + """An object specifying locations from which inbound traffic will be accepted.""" + + +class OutboundRule(TypedDict, total=False): + destinations: Required[FirewallRuleTarget] + """An object specifying locations to which outbound traffic that will be allowed.""" + + ports: Required[str] + """ + The ports on which traffic will be allowed specified as a string containing a + single port, a range (e.g. "8000-9000"), or "0" when all ports are open for a + protocol. For ICMP rules this parameter will always return "0". + """ + + protocol: Required[Literal["tcp", "udp", "icmp"]] + """The type of traffic to be allowed. This may be one of `tcp`, `udp`, or `icmp`.""" + + +class FirewallParam(TypedDict, total=False): + droplet_ids: Optional[Iterable[int]] + """An array containing the IDs of the Droplets assigned to the firewall. + + Requires `droplet:read` scope. + """ + + inbound_rules: Optional[Iterable[InboundRule]] + + name: str + """A human-readable name for a firewall. + + The name must begin with an alphanumeric character. Subsequent characters must + either be alphanumeric characters, a period (.), or a dash (-). + """ + + outbound_rules: Optional[Iterable[OutboundRule]] + + tags: Optional[List[str]] + """A flat array of tag names as strings to be applied to the resource. + + Tag names must exist in order to be referenced in a request. + + Requires `tag:create` and `tag:read` scopes. + """ diff --git a/src/gradientai/types/gpu_droplets/firewall_retrieve_response.py b/src/gradientai/types/gpu_droplets/firewall_retrieve_response.py new file mode 100644 index 00000000..bb29a174 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/firewall_retrieve_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from .firewall import Firewall +from ..._models import BaseModel + +__all__ = ["FirewallRetrieveResponse"] + + +class FirewallRetrieveResponse(BaseModel): + firewall: Optional[Firewall] = None diff --git a/src/gradientai/types/gpu_droplets/firewall_update_params.py b/src/gradientai/types/gpu_droplets/firewall_update_params.py new file mode 100644 index 00000000..c2d0691d --- /dev/null +++ b/src/gradientai/types/gpu_droplets/firewall_update_params.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, TypedDict + +from .firewall_param import FirewallParam + +__all__ = ["FirewallUpdateParams"] + + +class FirewallUpdateParams(TypedDict, total=False): + firewall: Required[FirewallParam] diff --git a/src/gradientai/types/gpu_droplets/firewall_update_response.py b/src/gradientai/types/gpu_droplets/firewall_update_response.py new file mode 100644 index 00000000..cb8ff702 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/firewall_update_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from .firewall import Firewall +from ..._models import BaseModel + +__all__ = ["FirewallUpdateResponse"] + + +class FirewallUpdateResponse(BaseModel): + firewall: Optional[Firewall] = None diff --git a/src/gradientai/types/gpu_droplets/firewalls/__init__.py b/src/gradientai/types/gpu_droplets/firewalls/__init__.py new file mode 100644 index 00000000..6ba459d9 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/firewalls/__init__.py @@ -0,0 +1,10 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .tag_add_params import TagAddParams as TagAddParams +from .rule_add_params import RuleAddParams as RuleAddParams +from .tag_remove_params import TagRemoveParams as TagRemoveParams +from .droplet_add_params import DropletAddParams as DropletAddParams +from .rule_remove_params import RuleRemoveParams as RuleRemoveParams +from .droplet_remove_params import DropletRemoveParams as DropletRemoveParams diff --git a/src/gradientai/types/gpu_droplets/firewalls/droplet_add_params.py b/src/gradientai/types/gpu_droplets/firewalls/droplet_add_params.py new file mode 100644 index 00000000..35a403a5 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/firewalls/droplet_add_params.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable +from typing_extensions import Required, TypedDict + +__all__ = ["DropletAddParams"] + + +class DropletAddParams(TypedDict, total=False): + droplet_ids: Required[Iterable[int]] + """An array containing the IDs of the Droplets to be assigned to the firewall.""" diff --git a/src/gradientai/types/gpu_droplets/firewalls/droplet_remove_params.py b/src/gradientai/types/gpu_droplets/firewalls/droplet_remove_params.py new file mode 100644 index 00000000..5aea18e8 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/firewalls/droplet_remove_params.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable +from typing_extensions import Required, TypedDict + +__all__ = ["DropletRemoveParams"] + + +class DropletRemoveParams(TypedDict, total=False): + droplet_ids: Required[Iterable[int]] + """An array containing the IDs of the Droplets to be removed from the firewall.""" diff --git a/src/gradientai/types/gpu_droplets/firewalls/rule_add_params.py b/src/gradientai/types/gpu_droplets/firewalls/rule_add_params.py new file mode 100644 index 00000000..1f49e55a --- /dev/null +++ b/src/gradientai/types/gpu_droplets/firewalls/rule_add_params.py @@ -0,0 +1,46 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable, Optional +from typing_extensions import Literal, Required, TypedDict + +from ...shared_params.firewall_rule_target import FirewallRuleTarget + +__all__ = ["RuleAddParams", "InboundRule", "OutboundRule"] + + +class RuleAddParams(TypedDict, total=False): + inbound_rules: Optional[Iterable[InboundRule]] + + outbound_rules: Optional[Iterable[OutboundRule]] + + +class InboundRule(TypedDict, total=False): + ports: Required[str] + """ + The ports on which traffic will be allowed specified as a string containing a + single port, a range (e.g. "8000-9000"), or "0" when all ports are open for a + protocol. For ICMP rules this parameter will always return "0". + """ + + protocol: Required[Literal["tcp", "udp", "icmp"]] + """The type of traffic to be allowed. This may be one of `tcp`, `udp`, or `icmp`.""" + + sources: Required[FirewallRuleTarget] + """An object specifying locations from which inbound traffic will be accepted.""" + + +class OutboundRule(TypedDict, total=False): + destinations: Required[FirewallRuleTarget] + """An object specifying locations to which outbound traffic that will be allowed.""" + + ports: Required[str] + """ + The ports on which traffic will be allowed specified as a string containing a + single port, a range (e.g. "8000-9000"), or "0" when all ports are open for a + protocol. For ICMP rules this parameter will always return "0". + """ + + protocol: Required[Literal["tcp", "udp", "icmp"]] + """The type of traffic to be allowed. This may be one of `tcp`, `udp`, or `icmp`.""" diff --git a/src/gradientai/types/gpu_droplets/firewalls/rule_remove_params.py b/src/gradientai/types/gpu_droplets/firewalls/rule_remove_params.py new file mode 100644 index 00000000..b6bb05df --- /dev/null +++ b/src/gradientai/types/gpu_droplets/firewalls/rule_remove_params.py @@ -0,0 +1,46 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable, Optional +from typing_extensions import Literal, Required, TypedDict + +from ...shared_params.firewall_rule_target import FirewallRuleTarget + +__all__ = ["RuleRemoveParams", "InboundRule", "OutboundRule"] + + +class RuleRemoveParams(TypedDict, total=False): + inbound_rules: Optional[Iterable[InboundRule]] + + outbound_rules: Optional[Iterable[OutboundRule]] + + +class InboundRule(TypedDict, total=False): + ports: Required[str] + """ + The ports on which traffic will be allowed specified as a string containing a + single port, a range (e.g. "8000-9000"), or "0" when all ports are open for a + protocol. For ICMP rules this parameter will always return "0". + """ + + protocol: Required[Literal["tcp", "udp", "icmp"]] + """The type of traffic to be allowed. This may be one of `tcp`, `udp`, or `icmp`.""" + + sources: Required[FirewallRuleTarget] + """An object specifying locations from which inbound traffic will be accepted.""" + + +class OutboundRule(TypedDict, total=False): + destinations: Required[FirewallRuleTarget] + """An object specifying locations to which outbound traffic that will be allowed.""" + + ports: Required[str] + """ + The ports on which traffic will be allowed specified as a string containing a + single port, a range (e.g. "8000-9000"), or "0" when all ports are open for a + protocol. For ICMP rules this parameter will always return "0". + """ + + protocol: Required[Literal["tcp", "udp", "icmp"]] + """The type of traffic to be allowed. This may be one of `tcp`, `udp`, or `icmp`.""" diff --git a/src/gradientai/types/gpu_droplets/firewalls/tag_add_params.py b/src/gradientai/types/gpu_droplets/firewalls/tag_add_params.py new file mode 100644 index 00000000..63af7640 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/firewalls/tag_add_params.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Optional +from typing_extensions import Required, TypedDict + +__all__ = ["TagAddParams"] + + +class TagAddParams(TypedDict, total=False): + tags: Required[Optional[List[str]]] + """A flat array of tag names as strings to be applied to the resource. + + Tag names must exist in order to be referenced in a request. + + Requires `tag:create` and `tag:read` scopes. + """ diff --git a/src/gradientai/types/gpu_droplets/firewalls/tag_remove_params.py b/src/gradientai/types/gpu_droplets/firewalls/tag_remove_params.py new file mode 100644 index 00000000..91a3e382 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/firewalls/tag_remove_params.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Optional +from typing_extensions import Required, TypedDict + +__all__ = ["TagRemoveParams"] + + +class TagRemoveParams(TypedDict, total=False): + tags: Required[Optional[List[str]]] + """A flat array of tag names as strings to be applied to the resource. + + Tag names must exist in order to be referenced in a request. + + Requires `tag:create` and `tag:read` scopes. + """ diff --git a/src/gradientai/types/gpu_droplets/floating_ip.py b/src/gradientai/types/gpu_droplets/floating_ip.py new file mode 100644 index 00000000..81c58753 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/floating_ip.py @@ -0,0 +1,47 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union, Optional +from typing_extensions import TypeAlias + +from ..shared import region, droplet +from ..._models import BaseModel + +__all__ = ["FloatingIP", "Droplet", "Region"] + +Droplet: TypeAlias = Union[droplet.Droplet, Optional[object]] + + +class Region(region.Region): + pass + + +class FloatingIP(BaseModel): + droplet: Optional[Droplet] = None + """The Droplet that the floating IP has been assigned to. + + When you query a floating IP, if it is assigned to a Droplet, the entire Droplet + object will be returned. If it is not assigned, the value will be null. + + Requires `droplet:read` scope. + """ + + ip: Optional[str] = None + """The public IP address of the floating IP. It also serves as its identifier.""" + + locked: Optional[bool] = None + """ + A boolean value indicating whether or not the floating IP has pending actions + preventing new ones from being submitted. + """ + + project_id: Optional[str] = None + """The UUID of the project to which the reserved IP currently belongs. + + Requires `project:read` scope. + """ + + region: Optional[Region] = None + """The region that the floating IP is reserved to. + + When you query a floating IP, the entire region object will be returned. + """ diff --git a/src/gradientai/types/gpu_droplets/floating_ip_create_params.py b/src/gradientai/types/gpu_droplets/floating_ip_create_params.py new file mode 100644 index 00000000..2adadc27 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/floating_ip_create_params.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Required, TypeAlias, TypedDict + +__all__ = ["FloatingIPCreateParams", "AssignToDroplet", "ReserveToRegion"] + + +class AssignToDroplet(TypedDict, total=False): + droplet_id: Required[int] + """The ID of the Droplet that the floating IP will be assigned to.""" + + +class ReserveToRegion(TypedDict, total=False): + region: Required[str] + """The slug identifier for the region the floating IP will be reserved to.""" + + project_id: str + """The UUID of the project to which the floating IP will be assigned.""" + + +FloatingIPCreateParams: TypeAlias = Union[AssignToDroplet, ReserveToRegion] diff --git a/src/gradientai/types/gpu_droplets/floating_ip_create_response.py b/src/gradientai/types/gpu_droplets/floating_ip_create_response.py new file mode 100644 index 00000000..04668b84 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/floating_ip_create_response.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel +from .floating_ip import FloatingIP +from ..shared.action_link import ActionLink + +__all__ = ["FloatingIPCreateResponse", "Links"] + + +class Links(BaseModel): + actions: Optional[List[ActionLink]] = None + + droplets: Optional[List[ActionLink]] = None + + +class FloatingIPCreateResponse(BaseModel): + floating_ip: Optional[FloatingIP] = None + + links: Optional[Links] = None diff --git a/src/gradientai/types/gpu_droplets/floating_ip_list_params.py b/src/gradientai/types/gpu_droplets/floating_ip_list_params.py new file mode 100644 index 00000000..2e054075 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/floating_ip_list_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["FloatingIPListParams"] + + +class FloatingIPListParams(TypedDict, total=False): + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" diff --git a/src/gradientai/types/gpu_droplets/floating_ip_list_response.py b/src/gradientai/types/gpu_droplets/floating_ip_list_response.py new file mode 100644 index 00000000..734011d2 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/floating_ip_list_response.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel +from .floating_ip import FloatingIP +from ..shared.page_links import PageLinks +from ..shared.meta_properties import MetaProperties + +__all__ = ["FloatingIPListResponse"] + + +class FloatingIPListResponse(BaseModel): + meta: MetaProperties + """Information about the response itself.""" + + floating_ips: Optional[List[FloatingIP]] = None + + links: Optional[PageLinks] = None diff --git a/src/gradientai/types/gpu_droplets/floating_ip_retrieve_response.py b/src/gradientai/types/gpu_droplets/floating_ip_retrieve_response.py new file mode 100644 index 00000000..b7ec77d4 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/floating_ip_retrieve_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from .floating_ip import FloatingIP + +__all__ = ["FloatingIPRetrieveResponse"] + + +class FloatingIPRetrieveResponse(BaseModel): + floating_ip: Optional[FloatingIP] = None diff --git a/src/gradientai/types/gpu_droplets/floating_ips/__init__.py b/src/gradientai/types/gpu_droplets/floating_ips/__init__.py new file mode 100644 index 00000000..a597418e --- /dev/null +++ b/src/gradientai/types/gpu_droplets/floating_ips/__init__.py @@ -0,0 +1,8 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .action_create_params import ActionCreateParams as ActionCreateParams +from .action_list_response import ActionListResponse as ActionListResponse +from .action_create_response import ActionCreateResponse as ActionCreateResponse +from .action_retrieve_response import ActionRetrieveResponse as ActionRetrieveResponse diff --git a/src/gradientai/types/gpu_droplets/floating_ips/action_create_params.py b/src/gradientai/types/gpu_droplets/floating_ips/action_create_params.py new file mode 100644 index 00000000..c84f5df7 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/floating_ips/action_create_params.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +__all__ = ["ActionCreateParams", "FloatingIPActionUnassign", "FloatingIPActionAssign"] + + +class FloatingIPActionUnassign(TypedDict, total=False): + type: Required[Literal["assign", "unassign"]] + """The type of action to initiate for the floating IP.""" + + +class FloatingIPActionAssign(TypedDict, total=False): + droplet_id: Required[int] + """The ID of the Droplet that the floating IP will be assigned to.""" + + type: Required[Literal["assign", "unassign"]] + """The type of action to initiate for the floating IP.""" + + +ActionCreateParams: TypeAlias = Union[FloatingIPActionUnassign, FloatingIPActionAssign] diff --git a/src/gradientai/types/gpu_droplets/floating_ips/action_create_response.py b/src/gradientai/types/gpu_droplets/floating_ips/action_create_response.py new file mode 100644 index 00000000..90acd8c9 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/floating_ips/action_create_response.py @@ -0,0 +1,17 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...shared import action +from ...._models import BaseModel + +__all__ = ["ActionCreateResponse", "Action"] + + +class Action(action.Action): + project_id: Optional[str] = None + """The UUID of the project to which the reserved IP currently belongs.""" + + +class ActionCreateResponse(BaseModel): + action: Optional[Action] = None diff --git a/src/gradientai/types/gpu_droplets/floating_ips/action_list_response.py b/src/gradientai/types/gpu_droplets/floating_ips/action_list_response.py new file mode 100644 index 00000000..2f4edac5 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/floating_ips/action_list_response.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ...._models import BaseModel +from ...shared.action import Action +from ...shared.page_links import PageLinks +from ...shared.meta_properties import MetaProperties + +__all__ = ["ActionListResponse"] + + +class ActionListResponse(BaseModel): + meta: MetaProperties + """Information about the response itself.""" + + actions: Optional[List[Action]] = None + + links: Optional[PageLinks] = None diff --git a/src/gradientai/types/gpu_droplets/floating_ips/action_retrieve_response.py b/src/gradientai/types/gpu_droplets/floating_ips/action_retrieve_response.py new file mode 100644 index 00000000..d94554be --- /dev/null +++ b/src/gradientai/types/gpu_droplets/floating_ips/action_retrieve_response.py @@ -0,0 +1,17 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...shared import action +from ...._models import BaseModel + +__all__ = ["ActionRetrieveResponse", "Action"] + + +class Action(action.Action): + project_id: Optional[str] = None + """The UUID of the project to which the reserved IP currently belongs.""" + + +class ActionRetrieveResponse(BaseModel): + action: Optional[Action] = None diff --git a/src/gradientai/types/gpu_droplets/forwarding_rule.py b/src/gradientai/types/gpu_droplets/forwarding_rule.py new file mode 100644 index 00000000..40a310ab --- /dev/null +++ b/src/gradientai/types/gpu_droplets/forwarding_rule.py @@ -0,0 +1,49 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ForwardingRule"] + + +class ForwardingRule(BaseModel): + entry_port: int + """ + An integer representing the port on which the load balancer instance will + listen. + """ + + entry_protocol: Literal["http", "https", "http2", "http3", "tcp", "udp"] + """The protocol used for traffic to the load balancer. + + The possible values are: `http`, `https`, `http2`, `http3`, `tcp`, or `udp`. If + you set the `entry_protocol` to `udp`, the `target_protocol` must be set to + `udp`. When using UDP, the load balancer requires that you set up a health check + with a port that uses TCP, HTTP, or HTTPS to work properly. + """ + + target_port: int + """ + An integer representing the port on the backend Droplets to which the load + balancer will send traffic. + """ + + target_protocol: Literal["http", "https", "http2", "tcp", "udp"] + """The protocol used for traffic from the load balancer to the backend Droplets. + + The possible values are: `http`, `https`, `http2`, `tcp`, or `udp`. If you set + the `target_protocol` to `udp`, the `entry_protocol` must be set to `udp`. When + using UDP, the load balancer requires that you set up a health check with a port + that uses TCP, HTTP, or HTTPS to work properly. + """ + + certificate_id: Optional[str] = None + """The ID of the TLS certificate used for SSL termination if enabled.""" + + tls_passthrough: Optional[bool] = None + """ + A boolean value indicating whether SSL encrypted traffic will be passed through + to the backend Droplets. + """ diff --git a/src/gradientai/types/gpu_droplets/forwarding_rule_param.py b/src/gradientai/types/gpu_droplets/forwarding_rule_param.py new file mode 100644 index 00000000..70285bf6 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/forwarding_rule_param.py @@ -0,0 +1,48 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ForwardingRuleParam"] + + +class ForwardingRuleParam(TypedDict, total=False): + entry_port: Required[int] + """ + An integer representing the port on which the load balancer instance will + listen. + """ + + entry_protocol: Required[Literal["http", "https", "http2", "http3", "tcp", "udp"]] + """The protocol used for traffic to the load balancer. + + The possible values are: `http`, `https`, `http2`, `http3`, `tcp`, or `udp`. If + you set the `entry_protocol` to `udp`, the `target_protocol` must be set to + `udp`. When using UDP, the load balancer requires that you set up a health check + with a port that uses TCP, HTTP, or HTTPS to work properly. + """ + + target_port: Required[int] + """ + An integer representing the port on the backend Droplets to which the load + balancer will send traffic. + """ + + target_protocol: Required[Literal["http", "https", "http2", "tcp", "udp"]] + """The protocol used for traffic from the load balancer to the backend Droplets. + + The possible values are: `http`, `https`, `http2`, `tcp`, or `udp`. If you set + the `target_protocol` to `udp`, the `entry_protocol` must be set to `udp`. When + using UDP, the load balancer requires that you set up a health check with a port + that uses TCP, HTTP, or HTTPS to work properly. + """ + + certificate_id: str + """The ID of the TLS certificate used for SSL termination if enabled.""" + + tls_passthrough: bool + """ + A boolean value indicating whether SSL encrypted traffic will be passed through + to the backend Droplets. + """ diff --git a/src/gradientai/types/gpu_droplets/glb_settings.py b/src/gradientai/types/gpu_droplets/glb_settings.py new file mode 100644 index 00000000..9aa790d8 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/glb_settings.py @@ -0,0 +1,45 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["GlbSettings", "Cdn"] + + +class Cdn(BaseModel): + is_enabled: Optional[bool] = None + """A boolean flag to enable CDN caching.""" + + +class GlbSettings(BaseModel): + cdn: Optional[Cdn] = None + """An object specifying CDN configurations for a Global load balancer.""" + + failover_threshold: Optional[int] = None + """ + An integer value as a percentage to indicate failure threshold to decide how the + regional priorities will take effect. A value of `50` would indicate that the + Global load balancer will choose a lower priority region to forward traffic to + once this failure threshold has been reached for the higher priority region. + """ + + region_priorities: Optional[Dict[str, int]] = None + """ + A map of region string to an integer priority value indicating preference for + which regional target a Global load balancer will forward traffic to. A lower + value indicates a higher priority. + """ + + target_port: Optional[int] = None + """ + An integer representing the port on the target backends which the load balancer + will forward traffic to. + """ + + target_protocol: Optional[Literal["http", "https", "http2"]] = None + """ + The protocol used for forwarding traffic from the load balancer to the target + backends. The possible values are `http`, `https` and `http2`. + """ diff --git a/src/gradientai/types/gpu_droplets/glb_settings_param.py b/src/gradientai/types/gpu_droplets/glb_settings_param.py new file mode 100644 index 00000000..f1b25c8b --- /dev/null +++ b/src/gradientai/types/gpu_droplets/glb_settings_param.py @@ -0,0 +1,45 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict +from typing_extensions import Literal, TypedDict + +__all__ = ["GlbSettingsParam", "Cdn"] + + +class Cdn(TypedDict, total=False): + is_enabled: bool + """A boolean flag to enable CDN caching.""" + + +class GlbSettingsParam(TypedDict, total=False): + cdn: Cdn + """An object specifying CDN configurations for a Global load balancer.""" + + failover_threshold: int + """ + An integer value as a percentage to indicate failure threshold to decide how the + regional priorities will take effect. A value of `50` would indicate that the + Global load balancer will choose a lower priority region to forward traffic to + once this failure threshold has been reached for the higher priority region. + """ + + region_priorities: Dict[str, int] + """ + A map of region string to an integer priority value indicating preference for + which regional target a Global load balancer will forward traffic to. A lower + value indicates a higher priority. + """ + + target_port: int + """ + An integer representing the port on the target backends which the load balancer + will forward traffic to. + """ + + target_protocol: Literal["http", "https", "http2"] + """ + The protocol used for forwarding traffic from the load balancer to the target + backends. The possible values are `http`, `https` and `http2`. + """ diff --git a/src/gradientai/types/gpu_droplets/health_check.py b/src/gradientai/types/gpu_droplets/health_check.py new file mode 100644 index 00000000..db44d84e --- /dev/null +++ b/src/gradientai/types/gpu_droplets/health_check.py @@ -0,0 +1,49 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["HealthCheck"] + + +class HealthCheck(BaseModel): + check_interval_seconds: Optional[int] = None + """The number of seconds between between two consecutive health checks.""" + + healthy_threshold: Optional[int] = None + """ + The number of times a health check must pass for a backend Droplet to be marked + "healthy" and be re-added to the pool. + """ + + path: Optional[str] = None + """ + The path on the backend Droplets to which the load balancer instance will send a + request. + """ + + port: Optional[int] = None + """ + An integer representing the port on the backend Droplets on which the health + check will attempt a connection. + """ + + protocol: Optional[Literal["http", "https", "tcp"]] = None + """The protocol used for health checks sent to the backend Droplets. + + The possible values are `http`, `https`, or `tcp`. + """ + + response_timeout_seconds: Optional[int] = None + """ + The number of seconds the load balancer instance will wait for a response until + marking a health check as failed. + """ + + unhealthy_threshold: Optional[int] = None + """ + The number of times a health check must fail for a backend Droplet to be marked + "unhealthy" and be removed from the pool. + """ diff --git a/src/gradientai/types/gpu_droplets/health_check_param.py b/src/gradientai/types/gpu_droplets/health_check_param.py new file mode 100644 index 00000000..e840f818 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/health_check_param.py @@ -0,0 +1,48 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, TypedDict + +__all__ = ["HealthCheckParam"] + + +class HealthCheckParam(TypedDict, total=False): + check_interval_seconds: int + """The number of seconds between between two consecutive health checks.""" + + healthy_threshold: int + """ + The number of times a health check must pass for a backend Droplet to be marked + "healthy" and be re-added to the pool. + """ + + path: str + """ + The path on the backend Droplets to which the load balancer instance will send a + request. + """ + + port: int + """ + An integer representing the port on the backend Droplets on which the health + check will attempt a connection. + """ + + protocol: Literal["http", "https", "tcp"] + """The protocol used for health checks sent to the backend Droplets. + + The possible values are `http`, `https`, or `tcp`. + """ + + response_timeout_seconds: int + """ + The number of seconds the load balancer instance will wait for a response until + marking a health check as failed. + """ + + unhealthy_threshold: int + """ + The number of times a health check must fail for a backend Droplet to be marked + "unhealthy" and be removed from the pool. + """ diff --git a/src/gradientai/types/gpu_droplets/image_create_params.py b/src/gradientai/types/gpu_droplets/image_create_params.py new file mode 100644 index 00000000..efbd684c --- /dev/null +++ b/src/gradientai/types/gpu_droplets/image_create_params.py @@ -0,0 +1,81 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Optional +from typing_extensions import Literal, TypedDict + +__all__ = ["ImageCreateParams"] + + +class ImageCreateParams(TypedDict, total=False): + description: str + """An optional free-form text field to describe an image.""" + + distribution: Literal[ + "Arch Linux", + "CentOS", + "CoreOS", + "Debian", + "Fedora", + "Fedora Atomic", + "FreeBSD", + "Gentoo", + "openSUSE", + "RancherOS", + "Rocky Linux", + "Ubuntu", + "Unknown", + ] + """The name of a custom image's distribution. + + Currently, the valid values are `Arch Linux`, `CentOS`, `CoreOS`, `Debian`, + `Fedora`, `Fedora Atomic`, `FreeBSD`, `Gentoo`, `openSUSE`, `RancherOS`, + `Rocky Linux`, `Ubuntu`, and `Unknown`. Any other value will be accepted but + ignored, and `Unknown` will be used in its place. + """ + + name: str + """The display name that has been given to an image. + + This is what is shown in the control panel and is generally a descriptive title + for the image in question. + """ + + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + """ + The slug identifier for the region where the resource will initially be + available. + """ + + tags: Optional[List[str]] + """A flat array of tag names as strings to be applied to the resource. + + Tag names may be for either existing or new tags. + + Requires `tag:create` scope. + """ + + url: str + """A URL from which the custom Linux virtual machine image may be retrieved. + + The image it points to must be in the raw, qcow2, vhdx, vdi, or vmdk format. It + may be compressed using gzip or bzip2 and must be smaller than 100 GB after + being decompressed. + """ diff --git a/src/gradientai/types/gpu_droplets/image_create_response.py b/src/gradientai/types/gpu_droplets/image_create_response.py new file mode 100644 index 00000000..87ebbb01 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/image_create_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from ..shared.image import Image + +__all__ = ["ImageCreateResponse"] + + +class ImageCreateResponse(BaseModel): + image: Optional[Image] = None diff --git a/src/gradientai/types/gpu_droplets/image_list_params.py b/src/gradientai/types/gpu_droplets/image_list_params.py new file mode 100644 index 00000000..d8e90efa --- /dev/null +++ b/src/gradientai/types/gpu_droplets/image_list_params.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, TypedDict + +__all__ = ["ImageListParams"] + + +class ImageListParams(TypedDict, total=False): + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" + + private: bool + """Used to filter only user images.""" + + tag_name: str + """Used to filter images by a specific tag.""" + + type: Literal["application", "distribution"] + """ + Filters results based on image type which can be either `application` or + `distribution`. + """ diff --git a/src/gradientai/types/gpu_droplets/image_list_response.py b/src/gradientai/types/gpu_droplets/image_list_response.py new file mode 100644 index 00000000..d4bb5697 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/image_list_response.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel +from ..shared.image import Image +from ..shared.page_links import PageLinks +from ..shared.meta_properties import MetaProperties + +__all__ = ["ImageListResponse"] + + +class ImageListResponse(BaseModel): + images: List[Image] + + meta: MetaProperties + """Information about the response itself.""" + + links: Optional[PageLinks] = None diff --git a/src/gradientai/types/gpu_droplets/image_retrieve_response.py b/src/gradientai/types/gpu_droplets/image_retrieve_response.py new file mode 100644 index 00000000..394dd384 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/image_retrieve_response.py @@ -0,0 +1,10 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from ..._models import BaseModel +from ..shared.image import Image + +__all__ = ["ImageRetrieveResponse"] + + +class ImageRetrieveResponse(BaseModel): + image: Image diff --git a/src/gradientai/types/gpu_droplets/image_update_params.py b/src/gradientai/types/gpu_droplets/image_update_params.py new file mode 100644 index 00000000..2ff851f8 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/image_update_params.py @@ -0,0 +1,42 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, TypedDict + +__all__ = ["ImageUpdateParams"] + + +class ImageUpdateParams(TypedDict, total=False): + description: str + """An optional free-form text field to describe an image.""" + + distribution: Literal[ + "Arch Linux", + "CentOS", + "CoreOS", + "Debian", + "Fedora", + "Fedora Atomic", + "FreeBSD", + "Gentoo", + "openSUSE", + "RancherOS", + "Rocky Linux", + "Ubuntu", + "Unknown", + ] + """The name of a custom image's distribution. + + Currently, the valid values are `Arch Linux`, `CentOS`, `CoreOS`, `Debian`, + `Fedora`, `Fedora Atomic`, `FreeBSD`, `Gentoo`, `openSUSE`, `RancherOS`, + `Rocky Linux`, `Ubuntu`, and `Unknown`. Any other value will be accepted but + ignored, and `Unknown` will be used in its place. + """ + + name: str + """The display name that has been given to an image. + + This is what is shown in the control panel and is generally a descriptive title + for the image in question. + """ diff --git a/src/gradientai/types/gpu_droplets/image_update_response.py b/src/gradientai/types/gpu_droplets/image_update_response.py new file mode 100644 index 00000000..3d07f5ac --- /dev/null +++ b/src/gradientai/types/gpu_droplets/image_update_response.py @@ -0,0 +1,10 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from ..._models import BaseModel +from ..shared.image import Image + +__all__ = ["ImageUpdateResponse"] + + +class ImageUpdateResponse(BaseModel): + image: Image diff --git a/src/gradientai/types/gpu_droplets/images/__init__.py b/src/gradientai/types/gpu_droplets/images/__init__.py new file mode 100644 index 00000000..7e78954c --- /dev/null +++ b/src/gradientai/types/gpu_droplets/images/__init__.py @@ -0,0 +1,6 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .action_create_params import ActionCreateParams as ActionCreateParams +from .action_list_response import ActionListResponse as ActionListResponse diff --git a/src/gradientai/types/gpu_droplets/images/action_create_params.py b/src/gradientai/types/gpu_droplets/images/action_create_params.py new file mode 100644 index 00000000..a1b57d47 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/images/action_create_params.py @@ -0,0 +1,45 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +__all__ = ["ActionCreateParams", "ImageActionBase", "ImageActionTransfer"] + + +class ImageActionBase(TypedDict, total=False): + type: Required[Literal["convert", "transfer"]] + """The action to be taken on the image. Can be either `convert` or `transfer`.""" + + +class ImageActionTransfer(TypedDict, total=False): + region: Required[ + Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + ] + """ + The slug identifier for the region where the resource will initially be + available. + """ + + type: Required[Literal["convert", "transfer"]] + """The action to be taken on the image. Can be either `convert` or `transfer`.""" + + +ActionCreateParams: TypeAlias = Union[ImageActionBase, ImageActionTransfer] diff --git a/src/gradientai/types/gpu_droplets/images/action_list_response.py b/src/gradientai/types/gpu_droplets/images/action_list_response.py new file mode 100644 index 00000000..2f4edac5 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/images/action_list_response.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ...._models import BaseModel +from ...shared.action import Action +from ...shared.page_links import PageLinks +from ...shared.meta_properties import MetaProperties + +__all__ = ["ActionListResponse"] + + +class ActionListResponse(BaseModel): + meta: MetaProperties + """Information about the response itself.""" + + actions: Optional[List[Action]] = None + + links: Optional[PageLinks] = None diff --git a/src/gradientai/types/gpu_droplets/lb_firewall.py b/src/gradientai/types/gpu_droplets/lb_firewall.py new file mode 100644 index 00000000..aea1887c --- /dev/null +++ b/src/gradientai/types/gpu_droplets/lb_firewall.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel + +__all__ = ["LbFirewall"] + + +class LbFirewall(BaseModel): + allow: Optional[List[str]] = None + """ + the rules for allowing traffic to the load balancer (in the form 'ip:1.2.3.4' or + 'cidr:1.2.0.0/16') + """ + + deny: Optional[List[str]] = None + """ + the rules for denying traffic to the load balancer (in the form 'ip:1.2.3.4' or + 'cidr:1.2.0.0/16') + """ diff --git a/src/gradientai/types/gpu_droplets/lb_firewall_param.py b/src/gradientai/types/gpu_droplets/lb_firewall_param.py new file mode 100644 index 00000000..6f1dcf10 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/lb_firewall_param.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import TypedDict + +__all__ = ["LbFirewallParam"] + + +class LbFirewallParam(TypedDict, total=False): + allow: List[str] + """ + the rules for allowing traffic to the load balancer (in the form 'ip:1.2.3.4' or + 'cidr:1.2.0.0/16') + """ + + deny: List[str] + """ + the rules for denying traffic to the load balancer (in the form 'ip:1.2.3.4' or + 'cidr:1.2.0.0/16') + """ diff --git a/src/gradientai/types/gpu_droplets/load_balancer.py b/src/gradientai/types/gpu_droplets/load_balancer.py new file mode 100644 index 00000000..d0e7597a --- /dev/null +++ b/src/gradientai/types/gpu_droplets/load_balancer.py @@ -0,0 +1,185 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime +from typing_extensions import Literal + +from .domains import Domains +from ..._models import BaseModel +from .lb_firewall import LbFirewall +from .glb_settings import GlbSettings +from .health_check import HealthCheck +from ..shared.region import Region +from .forwarding_rule import ForwardingRule +from .sticky_sessions import StickySessions + +__all__ = ["LoadBalancer"] + + +class LoadBalancer(BaseModel): + forwarding_rules: List[ForwardingRule] + """An array of objects specifying the forwarding rules for a load balancer.""" + + id: Optional[str] = None + """A unique ID that can be used to identify and reference a load balancer.""" + + algorithm: Optional[Literal["round_robin", "least_connections"]] = None + """This field has been deprecated. + + You can no longer specify an algorithm for load balancers. + """ + + created_at: Optional[datetime] = None + """ + A time value given in ISO8601 combined date and time format that represents when + the load balancer was created. + """ + + disable_lets_encrypt_dns_records: Optional[bool] = None + """ + A boolean value indicating whether to disable automatic DNS record creation for + Let's Encrypt certificates that are added to the load balancer. + """ + + domains: Optional[List[Domains]] = None + """ + An array of objects specifying the domain configurations for a Global load + balancer. + """ + + droplet_ids: Optional[List[int]] = None + """An array containing the IDs of the Droplets assigned to the load balancer.""" + + enable_backend_keepalive: Optional[bool] = None + """ + A boolean value indicating whether HTTP keepalive connections are maintained to + target Droplets. + """ + + enable_proxy_protocol: Optional[bool] = None + """A boolean value indicating whether PROXY Protocol is in use.""" + + firewall: Optional[LbFirewall] = None + """ + An object specifying allow and deny rules to control traffic to the load + balancer. + """ + + glb_settings: Optional[GlbSettings] = None + """An object specifying forwarding configurations for a Global load balancer.""" + + health_check: Optional[HealthCheck] = None + """An object specifying health check settings for the load balancer.""" + + http_idle_timeout_seconds: Optional[int] = None + """ + An integer value which configures the idle timeout for HTTP requests to the + target droplets. + """ + + ip: Optional[str] = None + """An attribute containing the public-facing IP address of the load balancer.""" + + ipv6: Optional[str] = None + """An attribute containing the public-facing IPv6 address of the load balancer.""" + + name: Optional[str] = None + """A human-readable name for a load balancer instance.""" + + network: Optional[Literal["EXTERNAL", "INTERNAL"]] = None + """A string indicating whether the load balancer should be external or internal. + + Internal load balancers have no public IPs and are only accessible to resources + on the same VPC network. This property cannot be updated after creating the load + balancer. + """ + + network_stack: Optional[Literal["IPV4", "DUALSTACK"]] = None + """ + A string indicating whether the load balancer will support IPv4 or both IPv4 and + IPv6 networking. This property cannot be updated after creating the load + balancer. + """ + + project_id: Optional[str] = None + """The ID of the project that the load balancer is associated with. + + If no ID is provided at creation, the load balancer associates with the user's + default project. If an invalid project ID is provided, the load balancer will + not be created. + """ + + redirect_http_to_https: Optional[bool] = None + """ + A boolean value indicating whether HTTP requests to the load balancer on port 80 + will be redirected to HTTPS on port 443. + """ + + region: Optional[Region] = None + """The region where the load balancer instance is located. + + When setting a region, the value should be the slug identifier for the region. + When you query a load balancer, an entire region object will be returned. + """ + + size: Optional[Literal["lb-small", "lb-medium", "lb-large"]] = None + """ + This field has been replaced by the `size_unit` field for all regions except in + AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load + balancer having a set number of nodes. + + - `lb-small` = 1 node + - `lb-medium` = 3 nodes + - `lb-large` = 6 nodes + + You can resize load balancers after creation up to once per hour. You cannot + resize a load balancer within the first hour of its creation. + """ + + size_unit: Optional[int] = None + """How many nodes the load balancer contains. + + Each additional node increases the load balancer's ability to manage more + connections. Load balancers can be scaled up or down, and you can change the + number of nodes after creation up to once per hour. This field is currently not + available in the AMS2, NYC2, or SFO1 regions. Use the `size` field to scale load + balancers that reside in these regions. + """ + + status: Optional[Literal["new", "active", "errored"]] = None + """A status string indicating the current state of the load balancer. + + This can be `new`, `active`, or `errored`. + """ + + sticky_sessions: Optional[StickySessions] = None + """An object specifying sticky sessions settings for the load balancer.""" + + tag: Optional[str] = None + """ + The name of a Droplet tag corresponding to Droplets assigned to the load + balancer. + """ + + target_load_balancer_ids: Optional[List[str]] = None + """ + An array containing the UUIDs of the Regional load balancers to be used as + target backends for a Global load balancer. + """ + + tls_cipher_policy: Optional[Literal["DEFAULT", "STRONG"]] = None + """ + A string indicating the policy for the TLS cipher suites used by the load + balancer. The possible values are `DEFAULT` or `STRONG`. The default value is + `DEFAULT`. + """ + + type: Optional[Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"]] = None + """ + A string indicating whether the load balancer should be a standard regional HTTP + load balancer, a regional network load balancer that routes traffic at the + TCP/UDP transport layer, or a global load balancer. + """ + + vpc_uuid: Optional[str] = None + """A string specifying the UUID of the VPC to which the load balancer is assigned.""" diff --git a/src/gradientai/types/gpu_droplets/load_balancer_create_params.py b/src/gradientai/types/gpu_droplets/load_balancer_create_params.py new file mode 100644 index 00000000..a87d9148 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/load_balancer_create_params.py @@ -0,0 +1,335 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union, Iterable +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +from .domains_param import DomainsParam +from .lb_firewall_param import LbFirewallParam +from .glb_settings_param import GlbSettingsParam +from .health_check_param import HealthCheckParam +from .forwarding_rule_param import ForwardingRuleParam +from .sticky_sessions_param import StickySessionsParam + +__all__ = ["LoadBalancerCreateParams", "AssignDropletsByID", "AssignDropletsByTag"] + + +class AssignDropletsByID(TypedDict, total=False): + forwarding_rules: Required[Iterable[ForwardingRuleParam]] + """An array of objects specifying the forwarding rules for a load balancer.""" + + algorithm: Literal["round_robin", "least_connections"] + """This field has been deprecated. + + You can no longer specify an algorithm for load balancers. + """ + + disable_lets_encrypt_dns_records: bool + """ + A boolean value indicating whether to disable automatic DNS record creation for + Let's Encrypt certificates that are added to the load balancer. + """ + + domains: Iterable[DomainsParam] + """ + An array of objects specifying the domain configurations for a Global load + balancer. + """ + + droplet_ids: Iterable[int] + """An array containing the IDs of the Droplets assigned to the load balancer.""" + + enable_backend_keepalive: bool + """ + A boolean value indicating whether HTTP keepalive connections are maintained to + target Droplets. + """ + + enable_proxy_protocol: bool + """A boolean value indicating whether PROXY Protocol is in use.""" + + firewall: LbFirewallParam + """ + An object specifying allow and deny rules to control traffic to the load + balancer. + """ + + glb_settings: GlbSettingsParam + """An object specifying forwarding configurations for a Global load balancer.""" + + health_check: HealthCheckParam + """An object specifying health check settings for the load balancer.""" + + http_idle_timeout_seconds: int + """ + An integer value which configures the idle timeout for HTTP requests to the + target droplets. + """ + + name: str + """A human-readable name for a load balancer instance.""" + + network: Literal["EXTERNAL", "INTERNAL"] + """A string indicating whether the load balancer should be external or internal. + + Internal load balancers have no public IPs and are only accessible to resources + on the same VPC network. This property cannot be updated after creating the load + balancer. + """ + + network_stack: Literal["IPV4", "DUALSTACK"] + """ + A string indicating whether the load balancer will support IPv4 or both IPv4 and + IPv6 networking. This property cannot be updated after creating the load + balancer. + """ + + project_id: str + """The ID of the project that the load balancer is associated with. + + If no ID is provided at creation, the load balancer associates with the user's + default project. If an invalid project ID is provided, the load balancer will + not be created. + """ + + redirect_http_to_https: bool + """ + A boolean value indicating whether HTTP requests to the load balancer on port 80 + will be redirected to HTTPS on port 443. + """ + + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + """ + The slug identifier for the region where the resource will initially be + available. + """ + + size: Literal["lb-small", "lb-medium", "lb-large"] + """ + This field has been replaced by the `size_unit` field for all regions except in + AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load + balancer having a set number of nodes. + + - `lb-small` = 1 node + - `lb-medium` = 3 nodes + - `lb-large` = 6 nodes + + You can resize load balancers after creation up to once per hour. You cannot + resize a load balancer within the first hour of its creation. + """ + + size_unit: int + """How many nodes the load balancer contains. + + Each additional node increases the load balancer's ability to manage more + connections. Load balancers can be scaled up or down, and you can change the + number of nodes after creation up to once per hour. This field is currently not + available in the AMS2, NYC2, or SFO1 regions. Use the `size` field to scale load + balancers that reside in these regions. + """ + + sticky_sessions: StickySessionsParam + """An object specifying sticky sessions settings for the load balancer.""" + + target_load_balancer_ids: List[str] + """ + An array containing the UUIDs of the Regional load balancers to be used as + target backends for a Global load balancer. + """ + + tls_cipher_policy: Literal["DEFAULT", "STRONG"] + """ + A string indicating the policy for the TLS cipher suites used by the load + balancer. The possible values are `DEFAULT` or `STRONG`. The default value is + `DEFAULT`. + """ + + type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] + """ + A string indicating whether the load balancer should be a standard regional HTTP + load balancer, a regional network load balancer that routes traffic at the + TCP/UDP transport layer, or a global load balancer. + """ + + vpc_uuid: str + """A string specifying the UUID of the VPC to which the load balancer is assigned.""" + + +class AssignDropletsByTag(TypedDict, total=False): + forwarding_rules: Required[Iterable[ForwardingRuleParam]] + """An array of objects specifying the forwarding rules for a load balancer.""" + + algorithm: Literal["round_robin", "least_connections"] + """This field has been deprecated. + + You can no longer specify an algorithm for load balancers. + """ + + disable_lets_encrypt_dns_records: bool + """ + A boolean value indicating whether to disable automatic DNS record creation for + Let's Encrypt certificates that are added to the load balancer. + """ + + domains: Iterable[DomainsParam] + """ + An array of objects specifying the domain configurations for a Global load + balancer. + """ + + enable_backend_keepalive: bool + """ + A boolean value indicating whether HTTP keepalive connections are maintained to + target Droplets. + """ + + enable_proxy_protocol: bool + """A boolean value indicating whether PROXY Protocol is in use.""" + + firewall: LbFirewallParam + """ + An object specifying allow and deny rules to control traffic to the load + balancer. + """ + + glb_settings: GlbSettingsParam + """An object specifying forwarding configurations for a Global load balancer.""" + + health_check: HealthCheckParam + """An object specifying health check settings for the load balancer.""" + + http_idle_timeout_seconds: int + """ + An integer value which configures the idle timeout for HTTP requests to the + target droplets. + """ + + name: str + """A human-readable name for a load balancer instance.""" + + network: Literal["EXTERNAL", "INTERNAL"] + """A string indicating whether the load balancer should be external or internal. + + Internal load balancers have no public IPs and are only accessible to resources + on the same VPC network. This property cannot be updated after creating the load + balancer. + """ + + network_stack: Literal["IPV4", "DUALSTACK"] + """ + A string indicating whether the load balancer will support IPv4 or both IPv4 and + IPv6 networking. This property cannot be updated after creating the load + balancer. + """ + + project_id: str + """The ID of the project that the load balancer is associated with. + + If no ID is provided at creation, the load balancer associates with the user's + default project. If an invalid project ID is provided, the load balancer will + not be created. + """ + + redirect_http_to_https: bool + """ + A boolean value indicating whether HTTP requests to the load balancer on port 80 + will be redirected to HTTPS on port 443. + """ + + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + """ + The slug identifier for the region where the resource will initially be + available. + """ + + size: Literal["lb-small", "lb-medium", "lb-large"] + """ + This field has been replaced by the `size_unit` field for all regions except in + AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load + balancer having a set number of nodes. + + - `lb-small` = 1 node + - `lb-medium` = 3 nodes + - `lb-large` = 6 nodes + + You can resize load balancers after creation up to once per hour. You cannot + resize a load balancer within the first hour of its creation. + """ + + size_unit: int + """How many nodes the load balancer contains. + + Each additional node increases the load balancer's ability to manage more + connections. Load balancers can be scaled up or down, and you can change the + number of nodes after creation up to once per hour. This field is currently not + available in the AMS2, NYC2, or SFO1 regions. Use the `size` field to scale load + balancers that reside in these regions. + """ + + sticky_sessions: StickySessionsParam + """An object specifying sticky sessions settings for the load balancer.""" + + tag: str + """ + The name of a Droplet tag corresponding to Droplets assigned to the load + balancer. + """ + + target_load_balancer_ids: List[str] + """ + An array containing the UUIDs of the Regional load balancers to be used as + target backends for a Global load balancer. + """ + + tls_cipher_policy: Literal["DEFAULT", "STRONG"] + """ + A string indicating the policy for the TLS cipher suites used by the load + balancer. The possible values are `DEFAULT` or `STRONG`. The default value is + `DEFAULT`. + """ + + type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] + """ + A string indicating whether the load balancer should be a standard regional HTTP + load balancer, a regional network load balancer that routes traffic at the + TCP/UDP transport layer, or a global load balancer. + """ + + vpc_uuid: str + """A string specifying the UUID of the VPC to which the load balancer is assigned.""" + + +LoadBalancerCreateParams: TypeAlias = Union[AssignDropletsByID, AssignDropletsByTag] diff --git a/src/gradientai/types/gpu_droplets/load_balancer_create_response.py b/src/gradientai/types/gpu_droplets/load_balancer_create_response.py new file mode 100644 index 00000000..ed4f2211 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/load_balancer_create_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from .load_balancer import LoadBalancer + +__all__ = ["LoadBalancerCreateResponse"] + + +class LoadBalancerCreateResponse(BaseModel): + load_balancer: Optional[LoadBalancer] = None diff --git a/src/gradientai/types/gpu_droplets/load_balancer_list_params.py b/src/gradientai/types/gpu_droplets/load_balancer_list_params.py new file mode 100644 index 00000000..d0daff3f --- /dev/null +++ b/src/gradientai/types/gpu_droplets/load_balancer_list_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["LoadBalancerListParams"] + + +class LoadBalancerListParams(TypedDict, total=False): + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" diff --git a/src/gradientai/types/gpu_droplets/load_balancer_list_response.py b/src/gradientai/types/gpu_droplets/load_balancer_list_response.py new file mode 100644 index 00000000..d5d0b4ac --- /dev/null +++ b/src/gradientai/types/gpu_droplets/load_balancer_list_response.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel +from .load_balancer import LoadBalancer +from ..shared.page_links import PageLinks +from ..shared.meta_properties import MetaProperties + +__all__ = ["LoadBalancerListResponse"] + + +class LoadBalancerListResponse(BaseModel): + meta: MetaProperties + """Information about the response itself.""" + + links: Optional[PageLinks] = None + + load_balancers: Optional[List[LoadBalancer]] = None diff --git a/src/gradientai/types/gpu_droplets/load_balancer_retrieve_response.py b/src/gradientai/types/gpu_droplets/load_balancer_retrieve_response.py new file mode 100644 index 00000000..779e9693 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/load_balancer_retrieve_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from .load_balancer import LoadBalancer + +__all__ = ["LoadBalancerRetrieveResponse"] + + +class LoadBalancerRetrieveResponse(BaseModel): + load_balancer: Optional[LoadBalancer] = None diff --git a/src/gradientai/types/gpu_droplets/load_balancer_update_params.py b/src/gradientai/types/gpu_droplets/load_balancer_update_params.py new file mode 100644 index 00000000..9a1906cb --- /dev/null +++ b/src/gradientai/types/gpu_droplets/load_balancer_update_params.py @@ -0,0 +1,335 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union, Iterable +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +from .domains_param import DomainsParam +from .lb_firewall_param import LbFirewallParam +from .glb_settings_param import GlbSettingsParam +from .health_check_param import HealthCheckParam +from .forwarding_rule_param import ForwardingRuleParam +from .sticky_sessions_param import StickySessionsParam + +__all__ = ["LoadBalancerUpdateParams", "AssignDropletsByID", "AssignDropletsByTag"] + + +class AssignDropletsByID(TypedDict, total=False): + forwarding_rules: Required[Iterable[ForwardingRuleParam]] + """An array of objects specifying the forwarding rules for a load balancer.""" + + algorithm: Literal["round_robin", "least_connections"] + """This field has been deprecated. + + You can no longer specify an algorithm for load balancers. + """ + + disable_lets_encrypt_dns_records: bool + """ + A boolean value indicating whether to disable automatic DNS record creation for + Let's Encrypt certificates that are added to the load balancer. + """ + + domains: Iterable[DomainsParam] + """ + An array of objects specifying the domain configurations for a Global load + balancer. + """ + + droplet_ids: Iterable[int] + """An array containing the IDs of the Droplets assigned to the load balancer.""" + + enable_backend_keepalive: bool + """ + A boolean value indicating whether HTTP keepalive connections are maintained to + target Droplets. + """ + + enable_proxy_protocol: bool + """A boolean value indicating whether PROXY Protocol is in use.""" + + firewall: LbFirewallParam + """ + An object specifying allow and deny rules to control traffic to the load + balancer. + """ + + glb_settings: GlbSettingsParam + """An object specifying forwarding configurations for a Global load balancer.""" + + health_check: HealthCheckParam + """An object specifying health check settings for the load balancer.""" + + http_idle_timeout_seconds: int + """ + An integer value which configures the idle timeout for HTTP requests to the + target droplets. + """ + + name: str + """A human-readable name for a load balancer instance.""" + + network: Literal["EXTERNAL", "INTERNAL"] + """A string indicating whether the load balancer should be external or internal. + + Internal load balancers have no public IPs and are only accessible to resources + on the same VPC network. This property cannot be updated after creating the load + balancer. + """ + + network_stack: Literal["IPV4", "DUALSTACK"] + """ + A string indicating whether the load balancer will support IPv4 or both IPv4 and + IPv6 networking. This property cannot be updated after creating the load + balancer. + """ + + project_id: str + """The ID of the project that the load balancer is associated with. + + If no ID is provided at creation, the load balancer associates with the user's + default project. If an invalid project ID is provided, the load balancer will + not be created. + """ + + redirect_http_to_https: bool + """ + A boolean value indicating whether HTTP requests to the load balancer on port 80 + will be redirected to HTTPS on port 443. + """ + + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + """ + The slug identifier for the region where the resource will initially be + available. + """ + + size: Literal["lb-small", "lb-medium", "lb-large"] + """ + This field has been replaced by the `size_unit` field for all regions except in + AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load + balancer having a set number of nodes. + + - `lb-small` = 1 node + - `lb-medium` = 3 nodes + - `lb-large` = 6 nodes + + You can resize load balancers after creation up to once per hour. You cannot + resize a load balancer within the first hour of its creation. + """ + + size_unit: int + """How many nodes the load balancer contains. + + Each additional node increases the load balancer's ability to manage more + connections. Load balancers can be scaled up or down, and you can change the + number of nodes after creation up to once per hour. This field is currently not + available in the AMS2, NYC2, or SFO1 regions. Use the `size` field to scale load + balancers that reside in these regions. + """ + + sticky_sessions: StickySessionsParam + """An object specifying sticky sessions settings for the load balancer.""" + + target_load_balancer_ids: List[str] + """ + An array containing the UUIDs of the Regional load balancers to be used as + target backends for a Global load balancer. + """ + + tls_cipher_policy: Literal["DEFAULT", "STRONG"] + """ + A string indicating the policy for the TLS cipher suites used by the load + balancer. The possible values are `DEFAULT` or `STRONG`. The default value is + `DEFAULT`. + """ + + type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] + """ + A string indicating whether the load balancer should be a standard regional HTTP + load balancer, a regional network load balancer that routes traffic at the + TCP/UDP transport layer, or a global load balancer. + """ + + vpc_uuid: str + """A string specifying the UUID of the VPC to which the load balancer is assigned.""" + + +class AssignDropletsByTag(TypedDict, total=False): + forwarding_rules: Required[Iterable[ForwardingRuleParam]] + """An array of objects specifying the forwarding rules for a load balancer.""" + + algorithm: Literal["round_robin", "least_connections"] + """This field has been deprecated. + + You can no longer specify an algorithm for load balancers. + """ + + disable_lets_encrypt_dns_records: bool + """ + A boolean value indicating whether to disable automatic DNS record creation for + Let's Encrypt certificates that are added to the load balancer. + """ + + domains: Iterable[DomainsParam] + """ + An array of objects specifying the domain configurations for a Global load + balancer. + """ + + enable_backend_keepalive: bool + """ + A boolean value indicating whether HTTP keepalive connections are maintained to + target Droplets. + """ + + enable_proxy_protocol: bool + """A boolean value indicating whether PROXY Protocol is in use.""" + + firewall: LbFirewallParam + """ + An object specifying allow and deny rules to control traffic to the load + balancer. + """ + + glb_settings: GlbSettingsParam + """An object specifying forwarding configurations for a Global load balancer.""" + + health_check: HealthCheckParam + """An object specifying health check settings for the load balancer.""" + + http_idle_timeout_seconds: int + """ + An integer value which configures the idle timeout for HTTP requests to the + target droplets. + """ + + name: str + """A human-readable name for a load balancer instance.""" + + network: Literal["EXTERNAL", "INTERNAL"] + """A string indicating whether the load balancer should be external or internal. + + Internal load balancers have no public IPs and are only accessible to resources + on the same VPC network. This property cannot be updated after creating the load + balancer. + """ + + network_stack: Literal["IPV4", "DUALSTACK"] + """ + A string indicating whether the load balancer will support IPv4 or both IPv4 and + IPv6 networking. This property cannot be updated after creating the load + balancer. + """ + + project_id: str + """The ID of the project that the load balancer is associated with. + + If no ID is provided at creation, the load balancer associates with the user's + default project. If an invalid project ID is provided, the load balancer will + not be created. + """ + + redirect_http_to_https: bool + """ + A boolean value indicating whether HTTP requests to the load balancer on port 80 + will be redirected to HTTPS on port 443. + """ + + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + """ + The slug identifier for the region where the resource will initially be + available. + """ + + size: Literal["lb-small", "lb-medium", "lb-large"] + """ + This field has been replaced by the `size_unit` field for all regions except in + AMS2, NYC2, and SFO1. Each available load balancer size now equates to the load + balancer having a set number of nodes. + + - `lb-small` = 1 node + - `lb-medium` = 3 nodes + - `lb-large` = 6 nodes + + You can resize load balancers after creation up to once per hour. You cannot + resize a load balancer within the first hour of its creation. + """ + + size_unit: int + """How many nodes the load balancer contains. + + Each additional node increases the load balancer's ability to manage more + connections. Load balancers can be scaled up or down, and you can change the + number of nodes after creation up to once per hour. This field is currently not + available in the AMS2, NYC2, or SFO1 regions. Use the `size` field to scale load + balancers that reside in these regions. + """ + + sticky_sessions: StickySessionsParam + """An object specifying sticky sessions settings for the load balancer.""" + + tag: str + """ + The name of a Droplet tag corresponding to Droplets assigned to the load + balancer. + """ + + target_load_balancer_ids: List[str] + """ + An array containing the UUIDs of the Regional load balancers to be used as + target backends for a Global load balancer. + """ + + tls_cipher_policy: Literal["DEFAULT", "STRONG"] + """ + A string indicating the policy for the TLS cipher suites used by the load + balancer. The possible values are `DEFAULT` or `STRONG`. The default value is + `DEFAULT`. + """ + + type: Literal["REGIONAL", "REGIONAL_NETWORK", "GLOBAL"] + """ + A string indicating whether the load balancer should be a standard regional HTTP + load balancer, a regional network load balancer that routes traffic at the + TCP/UDP transport layer, or a global load balancer. + """ + + vpc_uuid: str + """A string specifying the UUID of the VPC to which the load balancer is assigned.""" + + +LoadBalancerUpdateParams: TypeAlias = Union[AssignDropletsByID, AssignDropletsByTag] diff --git a/src/gradientai/types/gpu_droplets/load_balancer_update_response.py b/src/gradientai/types/gpu_droplets/load_balancer_update_response.py new file mode 100644 index 00000000..2b24b376 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/load_balancer_update_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from .load_balancer import LoadBalancer + +__all__ = ["LoadBalancerUpdateResponse"] + + +class LoadBalancerUpdateResponse(BaseModel): + load_balancer: Optional[LoadBalancer] = None diff --git a/src/gradientai/types/gpu_droplets/load_balancers/__init__.py b/src/gradientai/types/gpu_droplets/load_balancers/__init__.py new file mode 100644 index 00000000..806a71be --- /dev/null +++ b/src/gradientai/types/gpu_droplets/load_balancers/__init__.py @@ -0,0 +1,8 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .droplet_add_params import DropletAddParams as DropletAddParams +from .droplet_remove_params import DropletRemoveParams as DropletRemoveParams +from .forwarding_rule_add_params import ForwardingRuleAddParams as ForwardingRuleAddParams +from .forwarding_rule_remove_params import ForwardingRuleRemoveParams as ForwardingRuleRemoveParams diff --git a/src/gradientai/types/gpu_droplets/load_balancers/droplet_add_params.py b/src/gradientai/types/gpu_droplets/load_balancers/droplet_add_params.py new file mode 100644 index 00000000..ee403f5f --- /dev/null +++ b/src/gradientai/types/gpu_droplets/load_balancers/droplet_add_params.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable +from typing_extensions import Required, TypedDict + +__all__ = ["DropletAddParams"] + + +class DropletAddParams(TypedDict, total=False): + droplet_ids: Required[Iterable[int]] + """An array containing the IDs of the Droplets assigned to the load balancer.""" diff --git a/src/gradientai/types/gpu_droplets/load_balancers/droplet_remove_params.py b/src/gradientai/types/gpu_droplets/load_balancers/droplet_remove_params.py new file mode 100644 index 00000000..d48795e9 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/load_balancers/droplet_remove_params.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable +from typing_extensions import Required, TypedDict + +__all__ = ["DropletRemoveParams"] + + +class DropletRemoveParams(TypedDict, total=False): + droplet_ids: Required[Iterable[int]] + """An array containing the IDs of the Droplets assigned to the load balancer.""" diff --git a/src/gradientai/types/gpu_droplets/load_balancers/forwarding_rule_add_params.py b/src/gradientai/types/gpu_droplets/load_balancers/forwarding_rule_add_params.py new file mode 100644 index 00000000..2cc6a2df --- /dev/null +++ b/src/gradientai/types/gpu_droplets/load_balancers/forwarding_rule_add_params.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable +from typing_extensions import Required, TypedDict + +from ..forwarding_rule_param import ForwardingRuleParam + +__all__ = ["ForwardingRuleAddParams"] + + +class ForwardingRuleAddParams(TypedDict, total=False): + forwarding_rules: Required[Iterable[ForwardingRuleParam]] diff --git a/src/gradientai/types/gpu_droplets/load_balancers/forwarding_rule_remove_params.py b/src/gradientai/types/gpu_droplets/load_balancers/forwarding_rule_remove_params.py new file mode 100644 index 00000000..e5209543 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/load_balancers/forwarding_rule_remove_params.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable +from typing_extensions import Required, TypedDict + +from ..forwarding_rule_param import ForwardingRuleParam + +__all__ = ["ForwardingRuleRemoveParams"] + + +class ForwardingRuleRemoveParams(TypedDict, total=False): + forwarding_rules: Required[Iterable[ForwardingRuleParam]] diff --git a/src/gradientai/types/gpu_droplets/size_list_params.py b/src/gradientai/types/gpu_droplets/size_list_params.py new file mode 100644 index 00000000..5df85a9c --- /dev/null +++ b/src/gradientai/types/gpu_droplets/size_list_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["SizeListParams"] + + +class SizeListParams(TypedDict, total=False): + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" diff --git a/src/gradientai/types/gpu_droplets/size_list_response.py b/src/gradientai/types/gpu_droplets/size_list_response.py new file mode 100644 index 00000000..c0c600b4 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/size_list_response.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel +from ..shared.size import Size +from ..shared.page_links import PageLinks +from ..shared.meta_properties import MetaProperties + +__all__ = ["SizeListResponse"] + + +class SizeListResponse(BaseModel): + meta: MetaProperties + """Information about the response itself.""" + + sizes: List[Size] + + links: Optional[PageLinks] = None diff --git a/src/gradientai/types/gpu_droplets/snapshot_list_params.py b/src/gradientai/types/gpu_droplets/snapshot_list_params.py new file mode 100644 index 00000000..6d1b6f5b --- /dev/null +++ b/src/gradientai/types/gpu_droplets/snapshot_list_params.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, TypedDict + +__all__ = ["SnapshotListParams"] + + +class SnapshotListParams(TypedDict, total=False): + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" + + resource_type: Literal["droplet", "volume"] + """Used to filter snapshots by a resource type.""" diff --git a/src/gradientai/types/gpu_droplets/snapshot_list_response.py b/src/gradientai/types/gpu_droplets/snapshot_list_response.py new file mode 100644 index 00000000..29b6ec3b --- /dev/null +++ b/src/gradientai/types/gpu_droplets/snapshot_list_response.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel +from ..shared.snapshots import Snapshots +from ..shared.page_links import PageLinks +from ..shared.meta_properties import MetaProperties + +__all__ = ["SnapshotListResponse"] + + +class SnapshotListResponse(BaseModel): + meta: MetaProperties + """Information about the response itself.""" + + links: Optional[PageLinks] = None + + snapshots: Optional[List[Snapshots]] = None diff --git a/src/gradientai/types/gpu_droplets/snapshot_retrieve_response.py b/src/gradientai/types/gpu_droplets/snapshot_retrieve_response.py new file mode 100644 index 00000000..38d84c7a --- /dev/null +++ b/src/gradientai/types/gpu_droplets/snapshot_retrieve_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from ..shared.snapshots import Snapshots + +__all__ = ["SnapshotRetrieveResponse"] + + +class SnapshotRetrieveResponse(BaseModel): + snapshot: Optional[Snapshots] = None diff --git a/src/gradientai/types/gpu_droplets/sticky_sessions.py b/src/gradientai/types/gpu_droplets/sticky_sessions.py new file mode 100644 index 00000000..78debc07 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/sticky_sessions.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["StickySessions"] + + +class StickySessions(BaseModel): + cookie_name: Optional[str] = None + """The name of the cookie sent to the client. + + This attribute is only returned when using `cookies` for the sticky sessions + type. + """ + + cookie_ttl_seconds: Optional[int] = None + """The number of seconds until the cookie set by the load balancer expires. + + This attribute is only returned when using `cookies` for the sticky sessions + type. + """ + + type: Optional[Literal["cookies", "none"]] = None + """ + An attribute indicating how and if requests from a client will be persistently + served by the same backend Droplet. The possible values are `cookies` or `none`. + """ diff --git a/src/gradientai/types/gpu_droplets/sticky_sessions_param.py b/src/gradientai/types/gpu_droplets/sticky_sessions_param.py new file mode 100644 index 00000000..acea4a4a --- /dev/null +++ b/src/gradientai/types/gpu_droplets/sticky_sessions_param.py @@ -0,0 +1,29 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, TypedDict + +__all__ = ["StickySessionsParam"] + + +class StickySessionsParam(TypedDict, total=False): + cookie_name: str + """The name of the cookie sent to the client. + + This attribute is only returned when using `cookies` for the sticky sessions + type. + """ + + cookie_ttl_seconds: int + """The number of seconds until the cookie set by the load balancer expires. + + This attribute is only returned when using `cookies` for the sticky sessions + type. + """ + + type: Literal["cookies", "none"] + """ + An attribute indicating how and if requests from a client will be persistently + served by the same backend Droplet. The possible values are `cookies` or `none`. + """ diff --git a/src/gradientai/types/gpu_droplets/volume_create_params.py b/src/gradientai/types/gpu_droplets/volume_create_params.py new file mode 100644 index 00000000..fc889801 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/volume_create_params.py @@ -0,0 +1,153 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union, Optional +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +__all__ = ["VolumeCreateParams", "VolumesExt4", "VolumesXfs"] + + +class VolumesExt4(TypedDict, total=False): + name: Required[str] + """A human-readable name for the block storage volume. + + Must be lowercase and be composed only of numbers, letters and "-", up to a + limit of 64 characters. The name must begin with a letter. + """ + + region: Required[ + Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + ] + """ + The slug identifier for the region where the resource will initially be + available. + """ + + size_gigabytes: Required[int] + """The size of the block storage volume in GiB (1024^3). + + This field does not apply when creating a volume from a snapshot. + """ + + description: str + """An optional free-form text field to describe a block storage volume.""" + + filesystem_label: str + """The label applied to the filesystem. + + Labels for ext4 type filesystems may contain 16 characters while labels for xfs + type filesystems are limited to 12 characters. May only be used in conjunction + with filesystem_type. + """ + + filesystem_type: str + """The name of the filesystem type to be used on the volume. + + When provided, the volume will automatically be formatted to the specified + filesystem type. Currently, the available options are `ext4` and `xfs`. + Pre-formatted volumes are automatically mounted when attached to Ubuntu, Debian, + Fedora, Fedora Atomic, and CentOS Droplets created on or after April 26, 2018. + Attaching pre-formatted volumes to other Droplets is not recommended. + """ + + snapshot_id: str + """The unique identifier for the volume snapshot from which to create the volume.""" + + tags: Optional[List[str]] + """A flat array of tag names as strings to be applied to the resource. + + Tag names may be for either existing or new tags. + + Requires `tag:create` scope. + """ + + +class VolumesXfs(TypedDict, total=False): + name: Required[str] + """A human-readable name for the block storage volume. + + Must be lowercase and be composed only of numbers, letters and "-", up to a + limit of 64 characters. The name must begin with a letter. + """ + + region: Required[ + Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + ] + """ + The slug identifier for the region where the resource will initially be + available. + """ + + size_gigabytes: Required[int] + """The size of the block storage volume in GiB (1024^3). + + This field does not apply when creating a volume from a snapshot. + """ + + description: str + """An optional free-form text field to describe a block storage volume.""" + + filesystem_label: str + """The label applied to the filesystem. + + Labels for ext4 type filesystems may contain 16 characters while labels for xfs + type filesystems are limited to 12 characters. May only be used in conjunction + with filesystem_type. + """ + + filesystem_type: str + """The name of the filesystem type to be used on the volume. + + When provided, the volume will automatically be formatted to the specified + filesystem type. Currently, the available options are `ext4` and `xfs`. + Pre-formatted volumes are automatically mounted when attached to Ubuntu, Debian, + Fedora, Fedora Atomic, and CentOS Droplets created on or after April 26, 2018. + Attaching pre-formatted volumes to other Droplets is not recommended. + """ + + snapshot_id: str + """The unique identifier for the volume snapshot from which to create the volume.""" + + tags: Optional[List[str]] + """A flat array of tag names as strings to be applied to the resource. + + Tag names may be for either existing or new tags. + + Requires `tag:create` scope. + """ + + +VolumeCreateParams: TypeAlias = Union[VolumesExt4, VolumesXfs] diff --git a/src/gradientai/types/gpu_droplets/volume_create_response.py b/src/gradientai/types/gpu_droplets/volume_create_response.py new file mode 100644 index 00000000..1bca9965 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/volume_create_response.py @@ -0,0 +1,65 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel +from ..shared.region import Region + +__all__ = ["VolumeCreateResponse", "Volume"] + + +class Volume(BaseModel): + id: Optional[str] = None + """The unique identifier for the block storage volume.""" + + created_at: Optional[str] = None + """ + A time value given in ISO8601 combined date and time format that represents when + the block storage volume was created. + """ + + description: Optional[str] = None + """An optional free-form text field to describe a block storage volume.""" + + droplet_ids: Optional[List[int]] = None + """An array containing the IDs of the Droplets the volume is attached to. + + Note that at this time, a volume can only be attached to a single Droplet. + """ + + filesystem_label: Optional[str] = None + """The label currently applied to the filesystem.""" + + filesystem_type: Optional[str] = None + """The type of filesystem currently in-use on the volume.""" + + name: Optional[str] = None + """A human-readable name for the block storage volume. + + Must be lowercase and be composed only of numbers, letters and "-", up to a + limit of 64 characters. The name must begin with a letter. + """ + + region: Optional[Region] = None + """The region that the block storage volume is located in. + + When setting a region, the value should be the slug identifier for the region. + When you query a block storage volume, the entire region object will be + returned. + """ + + size_gigabytes: Optional[int] = None + """The size of the block storage volume in GiB (1024^3). + + This field does not apply when creating a volume from a snapshot. + """ + + tags: Optional[List[str]] = None + """A flat array of tag names as strings applied to the resource. + + Requires `tag:read` scope. + """ + + +class VolumeCreateResponse(BaseModel): + volume: Optional[Volume] = None diff --git a/src/gradientai/types/gpu_droplets/volume_delete_by_name_params.py b/src/gradientai/types/gpu_droplets/volume_delete_by_name_params.py new file mode 100644 index 00000000..26d173f0 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/volume_delete_by_name_params.py @@ -0,0 +1,31 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, TypedDict + +__all__ = ["VolumeDeleteByNameParams"] + + +class VolumeDeleteByNameParams(TypedDict, total=False): + name: str + """The block storage volume's name.""" + + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + """The slug identifier for the region where the resource is available.""" diff --git a/src/gradientai/types/gpu_droplets/volume_list_params.py b/src/gradientai/types/gpu_droplets/volume_list_params.py new file mode 100644 index 00000000..b4549651 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/volume_list_params.py @@ -0,0 +1,37 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, TypedDict + +__all__ = ["VolumeListParams"] + + +class VolumeListParams(TypedDict, total=False): + name: str + """The block storage volume's name.""" + + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" + + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + """The slug identifier for the region where the resource is available.""" diff --git a/src/gradientai/types/gpu_droplets/volume_list_response.py b/src/gradientai/types/gpu_droplets/volume_list_response.py new file mode 100644 index 00000000..69ff421a --- /dev/null +++ b/src/gradientai/types/gpu_droplets/volume_list_response.py @@ -0,0 +1,73 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel +from ..shared.region import Region +from ..shared.page_links import PageLinks +from ..shared.meta_properties import MetaProperties + +__all__ = ["VolumeListResponse", "Volume"] + + +class Volume(BaseModel): + id: Optional[str] = None + """The unique identifier for the block storage volume.""" + + created_at: Optional[str] = None + """ + A time value given in ISO8601 combined date and time format that represents when + the block storage volume was created. + """ + + description: Optional[str] = None + """An optional free-form text field to describe a block storage volume.""" + + droplet_ids: Optional[List[int]] = None + """An array containing the IDs of the Droplets the volume is attached to. + + Note that at this time, a volume can only be attached to a single Droplet. + """ + + filesystem_label: Optional[str] = None + """The label currently applied to the filesystem.""" + + filesystem_type: Optional[str] = None + """The type of filesystem currently in-use on the volume.""" + + name: Optional[str] = None + """A human-readable name for the block storage volume. + + Must be lowercase and be composed only of numbers, letters and "-", up to a + limit of 64 characters. The name must begin with a letter. + """ + + region: Optional[Region] = None + """The region that the block storage volume is located in. + + When setting a region, the value should be the slug identifier for the region. + When you query a block storage volume, the entire region object will be + returned. + """ + + size_gigabytes: Optional[int] = None + """The size of the block storage volume in GiB (1024^3). + + This field does not apply when creating a volume from a snapshot. + """ + + tags: Optional[List[str]] = None + """A flat array of tag names as strings applied to the resource. + + Requires `tag:read` scope. + """ + + +class VolumeListResponse(BaseModel): + meta: MetaProperties + """Information about the response itself.""" + + volumes: List[Volume] + """Array of volumes.""" + + links: Optional[PageLinks] = None diff --git a/src/gradientai/types/gpu_droplets/volume_retrieve_response.py b/src/gradientai/types/gpu_droplets/volume_retrieve_response.py new file mode 100644 index 00000000..3efe8de7 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/volume_retrieve_response.py @@ -0,0 +1,65 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel +from ..shared.region import Region + +__all__ = ["VolumeRetrieveResponse", "Volume"] + + +class Volume(BaseModel): + id: Optional[str] = None + """The unique identifier for the block storage volume.""" + + created_at: Optional[str] = None + """ + A time value given in ISO8601 combined date and time format that represents when + the block storage volume was created. + """ + + description: Optional[str] = None + """An optional free-form text field to describe a block storage volume.""" + + droplet_ids: Optional[List[int]] = None + """An array containing the IDs of the Droplets the volume is attached to. + + Note that at this time, a volume can only be attached to a single Droplet. + """ + + filesystem_label: Optional[str] = None + """The label currently applied to the filesystem.""" + + filesystem_type: Optional[str] = None + """The type of filesystem currently in-use on the volume.""" + + name: Optional[str] = None + """A human-readable name for the block storage volume. + + Must be lowercase and be composed only of numbers, letters and "-", up to a + limit of 64 characters. The name must begin with a letter. + """ + + region: Optional[Region] = None + """The region that the block storage volume is located in. + + When setting a region, the value should be the slug identifier for the region. + When you query a block storage volume, the entire region object will be + returned. + """ + + size_gigabytes: Optional[int] = None + """The size of the block storage volume in GiB (1024^3). + + This field does not apply when creating a volume from a snapshot. + """ + + tags: Optional[List[str]] = None + """A flat array of tag names as strings applied to the resource. + + Requires `tag:read` scope. + """ + + +class VolumeRetrieveResponse(BaseModel): + volume: Optional[Volume] = None diff --git a/src/gradientai/types/gpu_droplets/volumes/__init__.py b/src/gradientai/types/gpu_droplets/volumes/__init__.py new file mode 100644 index 00000000..68d3d1e9 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/volumes/__init__.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .volume_action import VolumeAction as VolumeAction +from .action_list_params import ActionListParams as ActionListParams +from .action_list_response import ActionListResponse as ActionListResponse +from .snapshot_list_params import SnapshotListParams as SnapshotListParams +from .action_retrieve_params import ActionRetrieveParams as ActionRetrieveParams +from .snapshot_create_params import SnapshotCreateParams as SnapshotCreateParams +from .snapshot_list_response import SnapshotListResponse as SnapshotListResponse +from .action_retrieve_response import ActionRetrieveResponse as ActionRetrieveResponse +from .snapshot_create_response import SnapshotCreateResponse as SnapshotCreateResponse +from .snapshot_retrieve_response import SnapshotRetrieveResponse as SnapshotRetrieveResponse +from .action_initiate_by_id_params import ActionInitiateByIDParams as ActionInitiateByIDParams +from .action_initiate_by_id_response import ActionInitiateByIDResponse as ActionInitiateByIDResponse +from .action_initiate_by_name_params import ActionInitiateByNameParams as ActionInitiateByNameParams +from .action_initiate_by_name_response import ActionInitiateByNameResponse as ActionInitiateByNameResponse diff --git a/src/gradientai/types/gpu_droplets/volumes/action_initiate_by_id_params.py b/src/gradientai/types/gpu_droplets/volumes/action_initiate_by_id_params.py new file mode 100644 index 00000000..6d41d463 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/volumes/action_initiate_by_id_params.py @@ -0,0 +1,133 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union, Optional +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +__all__ = ["ActionInitiateByIDParams", "VolumeActionPostAttach", "VolumeActionPostDetach", "VolumeActionPostResize"] + + +class VolumeActionPostAttach(TypedDict, total=False): + droplet_id: Required[int] + """ + The unique identifier for the Droplet the volume will be attached or detached + from. + """ + + type: Required[Literal["attach", "detach", "resize"]] + """The volume action to initiate.""" + + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" + + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + """ + The slug identifier for the region where the resource will initially be + available. + """ + + tags: Optional[List[str]] + """A flat array of tag names as strings to be applied to the resource. + + Tag names may be for either existing or new tags. + + Requires `tag:create` scope. + """ + + +class VolumeActionPostDetach(TypedDict, total=False): + droplet_id: Required[int] + """ + The unique identifier for the Droplet the volume will be attached or detached + from. + """ + + type: Required[Literal["attach", "detach", "resize"]] + """The volume action to initiate.""" + + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" + + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + """ + The slug identifier for the region where the resource will initially be + available. + """ + + +class VolumeActionPostResize(TypedDict, total=False): + size_gigabytes: Required[int] + """The new size of the block storage volume in GiB (1024^3).""" + + type: Required[Literal["attach", "detach", "resize"]] + """The volume action to initiate.""" + + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" + + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + """ + The slug identifier for the region where the resource will initially be + available. + """ + + +ActionInitiateByIDParams: TypeAlias = Union[VolumeActionPostAttach, VolumeActionPostDetach, VolumeActionPostResize] diff --git a/src/gradientai/types/gpu_droplets/volumes/action_initiate_by_id_response.py b/src/gradientai/types/gpu_droplets/volumes/action_initiate_by_id_response.py new file mode 100644 index 00000000..d8460f22 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/volumes/action_initiate_by_id_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel +from .volume_action import VolumeAction + +__all__ = ["ActionInitiateByIDResponse"] + + +class ActionInitiateByIDResponse(BaseModel): + action: Optional[VolumeAction] = None diff --git a/src/gradientai/types/gpu_droplets/volumes/action_initiate_by_name_params.py b/src/gradientai/types/gpu_droplets/volumes/action_initiate_by_name_params.py new file mode 100644 index 00000000..d1a7d084 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/volumes/action_initiate_by_name_params.py @@ -0,0 +1,97 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Union, Optional +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +__all__ = ["ActionInitiateByNameParams", "VolumeActionPostAttach", "VolumeActionPostDetach"] + + +class VolumeActionPostAttach(TypedDict, total=False): + droplet_id: Required[int] + """ + The unique identifier for the Droplet the volume will be attached or detached + from. + """ + + type: Required[Literal["attach", "detach", "resize"]] + """The volume action to initiate.""" + + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" + + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + """ + The slug identifier for the region where the resource will initially be + available. + """ + + tags: Optional[List[str]] + """A flat array of tag names as strings to be applied to the resource. + + Tag names may be for either existing or new tags. + + Requires `tag:create` scope. + """ + + +class VolumeActionPostDetach(TypedDict, total=False): + droplet_id: Required[int] + """ + The unique identifier for the Droplet the volume will be attached or detached + from. + """ + + type: Required[Literal["attach", "detach", "resize"]] + """The volume action to initiate.""" + + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" + + region: Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + """ + The slug identifier for the region where the resource will initially be + available. + """ + + +ActionInitiateByNameParams: TypeAlias = Union[VolumeActionPostAttach, VolumeActionPostDetach] diff --git a/src/gradientai/types/gpu_droplets/volumes/action_initiate_by_name_response.py b/src/gradientai/types/gpu_droplets/volumes/action_initiate_by_name_response.py new file mode 100644 index 00000000..9a935bdf --- /dev/null +++ b/src/gradientai/types/gpu_droplets/volumes/action_initiate_by_name_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel +from .volume_action import VolumeAction + +__all__ = ["ActionInitiateByNameResponse"] + + +class ActionInitiateByNameResponse(BaseModel): + action: Optional[VolumeAction] = None diff --git a/src/gradientai/types/gpu_droplets/volumes/action_list_params.py b/src/gradientai/types/gpu_droplets/volumes/action_list_params.py new file mode 100644 index 00000000..dd873288 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/volumes/action_list_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["ActionListParams"] + + +class ActionListParams(TypedDict, total=False): + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" diff --git a/src/gradientai/types/gpu_droplets/volumes/action_list_response.py b/src/gradientai/types/gpu_droplets/volumes/action_list_response.py new file mode 100644 index 00000000..35964633 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/volumes/action_list_response.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ...._models import BaseModel +from .volume_action import VolumeAction +from ...shared.page_links import PageLinks +from ...shared.meta_properties import MetaProperties + +__all__ = ["ActionListResponse"] + + +class ActionListResponse(BaseModel): + meta: MetaProperties + """Information about the response itself.""" + + actions: Optional[List[VolumeAction]] = None + + links: Optional[PageLinks] = None diff --git a/src/gradientai/types/gpu_droplets/volumes/action_retrieve_params.py b/src/gradientai/types/gpu_droplets/volumes/action_retrieve_params.py new file mode 100644 index 00000000..93ab443f --- /dev/null +++ b/src/gradientai/types/gpu_droplets/volumes/action_retrieve_params.py @@ -0,0 +1,17 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, TypedDict + +__all__ = ["ActionRetrieveParams"] + + +class ActionRetrieveParams(TypedDict, total=False): + volume_id: Required[str] + + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" diff --git a/src/gradientai/types/gpu_droplets/volumes/action_retrieve_response.py b/src/gradientai/types/gpu_droplets/volumes/action_retrieve_response.py new file mode 100644 index 00000000..cd47f37e --- /dev/null +++ b/src/gradientai/types/gpu_droplets/volumes/action_retrieve_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel +from .volume_action import VolumeAction + +__all__ = ["ActionRetrieveResponse"] + + +class ActionRetrieveResponse(BaseModel): + action: Optional[VolumeAction] = None diff --git a/src/gradientai/types/gpu_droplets/volumes/snapshot_create_params.py b/src/gradientai/types/gpu_droplets/volumes/snapshot_create_params.py new file mode 100644 index 00000000..8cce4a59 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/volumes/snapshot_create_params.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Optional +from typing_extensions import Required, TypedDict + +__all__ = ["SnapshotCreateParams"] + + +class SnapshotCreateParams(TypedDict, total=False): + name: Required[str] + """A human-readable name for the volume snapshot.""" + + tags: Optional[List[str]] + """A flat array of tag names as strings to be applied to the resource. + + Tag names may be for either existing or new tags. + + Requires `tag:create` scope. + """ diff --git a/src/gradientai/types/gpu_droplets/volumes/snapshot_create_response.py b/src/gradientai/types/gpu_droplets/volumes/snapshot_create_response.py new file mode 100644 index 00000000..41701795 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/volumes/snapshot_create_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel +from ...shared.snapshots import Snapshots + +__all__ = ["SnapshotCreateResponse"] + + +class SnapshotCreateResponse(BaseModel): + snapshot: Optional[Snapshots] = None diff --git a/src/gradientai/types/gpu_droplets/volumes/snapshot_list_params.py b/src/gradientai/types/gpu_droplets/volumes/snapshot_list_params.py new file mode 100644 index 00000000..65221a79 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/volumes/snapshot_list_params.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["SnapshotListParams"] + + +class SnapshotListParams(TypedDict, total=False): + page: int + """Which 'page' of paginated results to return.""" + + per_page: int + """Number of items returned per page""" diff --git a/src/gradientai/types/gpu_droplets/volumes/snapshot_list_response.py b/src/gradientai/types/gpu_droplets/volumes/snapshot_list_response.py new file mode 100644 index 00000000..25d91ed2 --- /dev/null +++ b/src/gradientai/types/gpu_droplets/volumes/snapshot_list_response.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ...._models import BaseModel +from ...shared.snapshots import Snapshots +from ...shared.page_links import PageLinks +from ...shared.meta_properties import MetaProperties + +__all__ = ["SnapshotListResponse"] + + +class SnapshotListResponse(BaseModel): + meta: MetaProperties + """Information about the response itself.""" + + links: Optional[PageLinks] = None + + snapshots: Optional[List[Snapshots]] = None diff --git a/src/gradientai/types/gpu_droplets/volumes/snapshot_retrieve_response.py b/src/gradientai/types/gpu_droplets/volumes/snapshot_retrieve_response.py new file mode 100644 index 00000000..3defa47d --- /dev/null +++ b/src/gradientai/types/gpu_droplets/volumes/snapshot_retrieve_response.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...._models import BaseModel +from ...shared.snapshots import Snapshots + +__all__ = ["SnapshotRetrieveResponse"] + + +class SnapshotRetrieveResponse(BaseModel): + snapshot: Optional[Snapshots] = None diff --git a/src/gradientai/types/gpu_droplets/volumes/volume_action.py b/src/gradientai/types/gpu_droplets/volumes/volume_action.py new file mode 100644 index 00000000..e1c01f6c --- /dev/null +++ b/src/gradientai/types/gpu_droplets/volumes/volume_action.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ...shared.action import Action + +__all__ = ["VolumeAction"] + + +class VolumeAction(Action): + resource_id: Optional[int] = None # type: ignore + + type: Optional[str] = None # type: ignore + """This is the type of action that the object represents. + + For example, this could be "attach_volume" to represent the state of a volume + attach action. + """ diff --git a/src/gradientai/types/inference/api_key_create_params.py b/src/gradientai/types/inference/api_key_create_params.py index 16cc23c9..10edfbbe 100644 --- a/src/gradientai/types/inference/api_key_create_params.py +++ b/src/gradientai/types/inference/api_key_create_params.py @@ -9,3 +9,4 @@ class APIKeyCreateParams(TypedDict, total=False): name: str + """A human friendly name to identify the key""" diff --git a/src/gradientai/types/inference/api_key_create_response.py b/src/gradientai/types/inference/api_key_create_response.py index 654e9f1e..f2469e43 100644 --- a/src/gradientai/types/inference/api_key_create_response.py +++ b/src/gradientai/types/inference/api_key_create_response.py @@ -10,3 +10,4 @@ class APIKeyCreateResponse(BaseModel): api_key_info: Optional[APIModelAPIKeyInfo] = None + """Model API Key Info""" diff --git a/src/gradientai/types/inference/api_key_delete_response.py b/src/gradientai/types/inference/api_key_delete_response.py index 4d81d047..89102258 100644 --- a/src/gradientai/types/inference/api_key_delete_response.py +++ b/src/gradientai/types/inference/api_key_delete_response.py @@ -10,3 +10,4 @@ class APIKeyDeleteResponse(BaseModel): api_key_info: Optional[APIModelAPIKeyInfo] = None + """Model API Key Info""" diff --git a/src/gradientai/types/inference/api_key_list_params.py b/src/gradientai/types/inference/api_key_list_params.py index 11da9398..1f8f96b7 100644 --- a/src/gradientai/types/inference/api_key_list_params.py +++ b/src/gradientai/types/inference/api_key_list_params.py @@ -9,7 +9,7 @@ class APIKeyListParams(TypedDict, total=False): page: int - """page number.""" + """Page number.""" per_page: int - """items per page.""" + """Items per page.""" diff --git a/src/gradientai/types/inference/api_key_list_response.py b/src/gradientai/types/inference/api_key_list_response.py index 3e937950..7c474873 100644 --- a/src/gradientai/types/inference/api_key_list_response.py +++ b/src/gradientai/types/inference/api_key_list_response.py @@ -12,7 +12,10 @@ class APIKeyListResponse(BaseModel): api_key_infos: Optional[List[APIModelAPIKeyInfo]] = None + """Api key infos""" links: Optional[APILinks] = None + """Links to other pages""" meta: Optional[APIMeta] = None + """Meta information about the data set""" diff --git a/src/gradientai/types/inference/api_key_update_params.py b/src/gradientai/types/inference/api_key_update_params.py index 23c1c0b9..7f79240a 100644 --- a/src/gradientai/types/inference/api_key_update_params.py +++ b/src/gradientai/types/inference/api_key_update_params.py @@ -11,5 +11,7 @@ class APIKeyUpdateParams(TypedDict, total=False): body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")] + """API key ID""" name: str + """Name""" diff --git a/src/gradientai/types/inference/api_key_update_regenerate_response.py b/src/gradientai/types/inference/api_key_update_regenerate_response.py index 44a316dc..c7ce5f0a 100644 --- a/src/gradientai/types/inference/api_key_update_regenerate_response.py +++ b/src/gradientai/types/inference/api_key_update_regenerate_response.py @@ -10,3 +10,4 @@ class APIKeyUpdateRegenerateResponse(BaseModel): api_key_info: Optional[APIModelAPIKeyInfo] = None + """Model API Key Info""" diff --git a/src/gradientai/types/inference/api_key_update_response.py b/src/gradientai/types/inference/api_key_update_response.py index 3671addf..1b7f92ef 100644 --- a/src/gradientai/types/inference/api_key_update_response.py +++ b/src/gradientai/types/inference/api_key_update_response.py @@ -10,3 +10,4 @@ class APIKeyUpdateResponse(BaseModel): api_key_info: Optional[APIModelAPIKeyInfo] = None + """Model API Key Info""" diff --git a/src/gradientai/types/inference/api_model_api_key_info.py b/src/gradientai/types/inference/api_model_api_key_info.py index bf354a47..3da1c70a 100644 --- a/src/gradientai/types/inference/api_model_api_key_info.py +++ b/src/gradientai/types/inference/api_model_api_key_info.py @@ -10,13 +10,18 @@ class APIModelAPIKeyInfo(BaseModel): created_at: Optional[datetime] = None + """Creation date""" created_by: Optional[str] = None + """Created by""" deleted_at: Optional[datetime] = None + """Deleted date""" name: Optional[str] = None + """Name""" secret_key: Optional[str] = None uuid: Optional[str] = None + """Uuid""" diff --git a/src/gradientai/types/knowledge_base_create_params.py b/src/gradientai/types/knowledge_base_create_params.py index acf52e30..9ecd777d 100644 --- a/src/gradientai/types/knowledge_base_create_params.py +++ b/src/gradientai/types/knowledge_base_create_params.py @@ -48,14 +48,18 @@ class KnowledgeBaseCreateParams(TypedDict, total=False): """Tags to organize your knowledge base.""" vpc_uuid: str + """The VPC to deploy the knowledge base database in""" class Datasource(TypedDict, total=False): aws_data_source: AwsDataSourceParam + """AWS S3 Data Source""" bucket_name: str + """Deprecated, moved to data_source_details""" bucket_region: str + """Deprecated, moved to data_source_details""" file_upload_data_source: APIFileUploadDataSourceParam """File to upload as data source for knowledge base.""" @@ -63,5 +67,7 @@ class Datasource(TypedDict, total=False): item_path: str spaces_data_source: APISpacesDataSourceParam + """Spaces Bucket Data Source""" web_crawler_data_source: APIWebCrawlerDataSourceParam + """WebCrawlerDataSource""" diff --git a/src/gradientai/types/knowledge_base_create_response.py b/src/gradientai/types/knowledge_base_create_response.py index cc2d8b9f..6d846fa5 100644 --- a/src/gradientai/types/knowledge_base_create_response.py +++ b/src/gradientai/types/knowledge_base_create_response.py @@ -10,3 +10,4 @@ class KnowledgeBaseCreateResponse(BaseModel): knowledge_base: Optional[APIKnowledgeBase] = None + """Knowledgebase Description""" diff --git a/src/gradientai/types/knowledge_base_delete_response.py b/src/gradientai/types/knowledge_base_delete_response.py index 6401e25a..b0605a20 100644 --- a/src/gradientai/types/knowledge_base_delete_response.py +++ b/src/gradientai/types/knowledge_base_delete_response.py @@ -9,3 +9,4 @@ class KnowledgeBaseDeleteResponse(BaseModel): uuid: Optional[str] = None + """The id of the deleted knowledge base""" diff --git a/src/gradientai/types/knowledge_base_list_params.py b/src/gradientai/types/knowledge_base_list_params.py index dcf9a0ec..b2c0eb31 100644 --- a/src/gradientai/types/knowledge_base_list_params.py +++ b/src/gradientai/types/knowledge_base_list_params.py @@ -9,7 +9,7 @@ class KnowledgeBaseListParams(TypedDict, total=False): page: int - """page number.""" + """Page number.""" per_page: int - """items per page.""" + """Items per page.""" diff --git a/src/gradientai/types/knowledge_base_list_response.py b/src/gradientai/types/knowledge_base_list_response.py index e8998b51..08227316 100644 --- a/src/gradientai/types/knowledge_base_list_response.py +++ b/src/gradientai/types/knowledge_base_list_response.py @@ -12,7 +12,10 @@ class KnowledgeBaseListResponse(BaseModel): knowledge_bases: Optional[List[APIKnowledgeBase]] = None + """The knowledge bases""" links: Optional[APILinks] = None + """Links to other pages""" meta: Optional[APIMeta] = None + """Meta information about the data set""" diff --git a/src/gradientai/types/knowledge_base_retrieve_response.py b/src/gradientai/types/knowledge_base_retrieve_response.py index 5a3b5f2c..55994f70 100644 --- a/src/gradientai/types/knowledge_base_retrieve_response.py +++ b/src/gradientai/types/knowledge_base_retrieve_response.py @@ -28,3 +28,4 @@ class KnowledgeBaseRetrieveResponse(BaseModel): ] = None knowledge_base: Optional[APIKnowledgeBase] = None + """Knowledgebase Description""" diff --git a/src/gradientai/types/knowledge_base_update_params.py b/src/gradientai/types/knowledge_base_update_params.py index 297c79de..7a86b40c 100644 --- a/src/gradientai/types/knowledge_base_update_params.py +++ b/src/gradientai/types/knowledge_base_update_params.py @@ -12,16 +12,19 @@ class KnowledgeBaseUpdateParams(TypedDict, total=False): database_id: str - """the id of the DigitalOcean database this knowledge base will use, optiona.""" + """The id of the DigitalOcean database this knowledge base will use, optiona.""" embedding_model_uuid: str """Identifier for the foundation model.""" name: str + """Knowledge base name""" project_id: str + """The id of the DigitalOcean project this knowledge base will belong to""" tags: List[str] """Tags to organize your knowledge base.""" body_uuid: Annotated[str, PropertyInfo(alias="uuid")] + """Knowledge base id""" diff --git a/src/gradientai/types/knowledge_base_update_response.py b/src/gradientai/types/knowledge_base_update_response.py index f3ba2c32..0840622c 100644 --- a/src/gradientai/types/knowledge_base_update_response.py +++ b/src/gradientai/types/knowledge_base_update_response.py @@ -10,3 +10,4 @@ class KnowledgeBaseUpdateResponse(BaseModel): knowledge_base: Optional[APIKnowledgeBase] = None + """Knowledgebase Description""" diff --git a/src/gradientai/types/knowledge_bases/api_file_upload_data_source.py b/src/gradientai/types/knowledge_bases/api_file_upload_data_source.py index 1dcc9639..a1c23e09 100644 --- a/src/gradientai/types/knowledge_bases/api_file_upload_data_source.py +++ b/src/gradientai/types/knowledge_bases/api_file_upload_data_source.py @@ -9,7 +9,10 @@ class APIFileUploadDataSource(BaseModel): original_file_name: Optional[str] = None + """The original file name""" size_in_bytes: Optional[str] = None + """The size of the file in bytes""" stored_object_key: Optional[str] = None + """The object key the file was stored as""" diff --git a/src/gradientai/types/knowledge_bases/api_file_upload_data_source_param.py b/src/gradientai/types/knowledge_bases/api_file_upload_data_source_param.py index 37221059..562f8a34 100644 --- a/src/gradientai/types/knowledge_bases/api_file_upload_data_source_param.py +++ b/src/gradientai/types/knowledge_bases/api_file_upload_data_source_param.py @@ -9,7 +9,10 @@ class APIFileUploadDataSourceParam(TypedDict, total=False): original_file_name: str + """The original file name""" size_in_bytes: str + """The size of the file in bytes""" stored_object_key: str + """The object key the file was stored as""" diff --git a/src/gradientai/types/knowledge_bases/api_indexed_data_source.py b/src/gradientai/types/knowledge_bases/api_indexed_data_source.py index 2449e9fd..151b29de 100644 --- a/src/gradientai/types/knowledge_bases/api_indexed_data_source.py +++ b/src/gradientai/types/knowledge_bases/api_indexed_data_source.py @@ -11,24 +11,34 @@ class APIIndexedDataSource(BaseModel): completed_at: Optional[datetime] = None + """Timestamp when data source completed indexing""" data_source_uuid: Optional[str] = None + """Uuid of the indexed data source""" error_details: Optional[str] = None + """A detailed error description""" error_msg: Optional[str] = None + """A string code provinding a hint which part of the system experienced an error""" failed_item_count: Optional[str] = None + """Total count of files that have failed""" indexed_file_count: Optional[str] = None + """Total count of files that have been indexed""" indexed_item_count: Optional[str] = None + """Total count of files that have been indexed""" removed_item_count: Optional[str] = None + """Total count of files that have been removed""" skipped_item_count: Optional[str] = None + """Total count of files that have been skipped""" started_at: Optional[datetime] = None + """Timestamp when data source started indexing""" status: Optional[ Literal[ @@ -42,7 +52,10 @@ class APIIndexedDataSource(BaseModel): ] = None total_bytes: Optional[str] = None + """Total size of files in data source in bytes""" total_bytes_indexed: Optional[str] = None + """Total size of files in data source in bytes that have been indexed""" total_file_count: Optional[str] = None + """Total file count in the data source""" diff --git a/src/gradientai/types/knowledge_bases/api_indexing_job.py b/src/gradientai/types/knowledge_bases/api_indexing_job.py index 573a7c4e..240fd709 100644 --- a/src/gradientai/types/knowledge_bases/api_indexing_job.py +++ b/src/gradientai/types/knowledge_bases/api_indexing_job.py @@ -11,14 +11,17 @@ class APIIndexingJob(BaseModel): completed_datasources: Optional[int] = None + """Number of datasources indexed completed""" created_at: Optional[datetime] = None + """Creation date / time""" data_source_uuids: Optional[List[str]] = None finished_at: Optional[datetime] = None knowledge_base_uuid: Optional[str] = None + """Knowledge base id""" phase: Optional[ Literal[ @@ -47,9 +50,13 @@ class APIIndexingJob(BaseModel): ] = None tokens: Optional[int] = None + """Number of tokens""" total_datasources: Optional[int] = None + """Number of datasources being indexed""" updated_at: Optional[datetime] = None + """Last modified""" uuid: Optional[str] = None + """Unique id""" diff --git a/src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py b/src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py index 202e4202..a4d695d2 100644 --- a/src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py +++ b/src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py @@ -15,34 +15,46 @@ class AwsDataSource(BaseModel): bucket_name: Optional[str] = None + """Spaces bucket name""" item_path: Optional[str] = None region: Optional[str] = None + """Region of bucket""" class APIKnowledgeBaseDataSource(BaseModel): aws_data_source: Optional[AwsDataSource] = None + """AWS S3 Data Source for Display""" bucket_name: Optional[str] = None + """Name of storage bucket - Deprecated, moved to data_source_details""" created_at: Optional[datetime] = None + """Creation date / time""" file_upload_data_source: Optional[APIFileUploadDataSource] = None """File to upload as data source for knowledge base.""" item_path: Optional[str] = None + """Path of folder or object in bucket - Deprecated, moved to data_source_details""" last_datasource_indexing_job: Optional[APIIndexedDataSource] = None last_indexing_job: Optional[APIIndexingJob] = None + """IndexingJob description""" region: Optional[str] = None + """Region code - Deprecated, moved to data_source_details""" spaces_data_source: Optional[APISpacesDataSource] = None + """Spaces Bucket Data Source""" updated_at: Optional[datetime] = None + """Last modified""" uuid: Optional[str] = None + """Unique id of knowledge base""" web_crawler_data_source: Optional[APIWebCrawlerDataSource] = None + """WebCrawlerDataSource""" diff --git a/src/gradientai/types/knowledge_bases/api_spaces_data_source.py b/src/gradientai/types/knowledge_bases/api_spaces_data_source.py index f3a0421a..02aa479a 100644 --- a/src/gradientai/types/knowledge_bases/api_spaces_data_source.py +++ b/src/gradientai/types/knowledge_bases/api_spaces_data_source.py @@ -9,7 +9,9 @@ class APISpacesDataSource(BaseModel): bucket_name: Optional[str] = None + """Spaces bucket name""" item_path: Optional[str] = None region: Optional[str] = None + """Region of bucket""" diff --git a/src/gradientai/types/knowledge_bases/api_spaces_data_source_param.py b/src/gradientai/types/knowledge_bases/api_spaces_data_source_param.py index b7f2f657..5eaeb0ad 100644 --- a/src/gradientai/types/knowledge_bases/api_spaces_data_source_param.py +++ b/src/gradientai/types/knowledge_bases/api_spaces_data_source_param.py @@ -9,7 +9,9 @@ class APISpacesDataSourceParam(TypedDict, total=False): bucket_name: str + """Spaces bucket name""" item_path: str region: str + """Region of bucket""" diff --git a/src/gradientai/types/knowledge_bases/aws_data_source_param.py b/src/gradientai/types/knowledge_bases/aws_data_source_param.py index 93d49228..912e3e29 100644 --- a/src/gradientai/types/knowledge_bases/aws_data_source_param.py +++ b/src/gradientai/types/knowledge_bases/aws_data_source_param.py @@ -9,11 +9,15 @@ class AwsDataSourceParam(TypedDict, total=False): bucket_name: str + """Spaces bucket name""" item_path: str key_id: str + """The AWS Key ID""" region: str + """Region of bucket""" secret_key: str + """The AWS Secret Key""" diff --git a/src/gradientai/types/knowledge_bases/data_source_create_params.py b/src/gradientai/types/knowledge_bases/data_source_create_params.py index 22bd76e7..ac3aa93c 100644 --- a/src/gradientai/types/knowledge_bases/data_source_create_params.py +++ b/src/gradientai/types/knowledge_bases/data_source_create_params.py @@ -14,9 +14,13 @@ class DataSourceCreateParams(TypedDict, total=False): aws_data_source: AwsDataSourceParam + """AWS S3 Data Source""" body_knowledge_base_uuid: Annotated[str, PropertyInfo(alias="knowledge_base_uuid")] + """Knowledge base id""" spaces_data_source: APISpacesDataSourceParam + """Spaces Bucket Data Source""" web_crawler_data_source: APIWebCrawlerDataSourceParam + """WebCrawlerDataSource""" diff --git a/src/gradientai/types/knowledge_bases/data_source_create_response.py b/src/gradientai/types/knowledge_bases/data_source_create_response.py index 1035d3f4..76ec88e2 100644 --- a/src/gradientai/types/knowledge_bases/data_source_create_response.py +++ b/src/gradientai/types/knowledge_bases/data_source_create_response.py @@ -10,3 +10,4 @@ class DataSourceCreateResponse(BaseModel): knowledge_base_data_source: Optional[APIKnowledgeBaseDataSource] = None + """Data Source configuration for Knowledge Bases""" diff --git a/src/gradientai/types/knowledge_bases/data_source_delete_response.py b/src/gradientai/types/knowledge_bases/data_source_delete_response.py index 53954d7f..eaad72ff 100644 --- a/src/gradientai/types/knowledge_bases/data_source_delete_response.py +++ b/src/gradientai/types/knowledge_bases/data_source_delete_response.py @@ -9,5 +9,7 @@ class DataSourceDeleteResponse(BaseModel): data_source_uuid: Optional[str] = None + """Data source id""" knowledge_base_uuid: Optional[str] = None + """Knowledge base id""" diff --git a/src/gradientai/types/knowledge_bases/data_source_list_params.py b/src/gradientai/types/knowledge_bases/data_source_list_params.py index e3ed5e3c..089eb291 100644 --- a/src/gradientai/types/knowledge_bases/data_source_list_params.py +++ b/src/gradientai/types/knowledge_bases/data_source_list_params.py @@ -9,7 +9,7 @@ class DataSourceListParams(TypedDict, total=False): page: int - """page number.""" + """Page number.""" per_page: int - """items per page.""" + """Items per page.""" diff --git a/src/gradientai/types/knowledge_bases/data_source_list_response.py b/src/gradientai/types/knowledge_bases/data_source_list_response.py index 2e5fc517..f05a49bc 100644 --- a/src/gradientai/types/knowledge_bases/data_source_list_response.py +++ b/src/gradientai/types/knowledge_bases/data_source_list_response.py @@ -12,7 +12,10 @@ class DataSourceListResponse(BaseModel): knowledge_base_data_sources: Optional[List[APIKnowledgeBaseDataSource]] = None + """The data sources""" links: Optional[APILinks] = None + """Links to other pages""" meta: Optional[APIMeta] = None + """Meta information about the data set""" diff --git a/src/gradientai/types/knowledge_bases/indexing_job_create_params.py b/src/gradientai/types/knowledge_bases/indexing_job_create_params.py index 04838472..d92c5790 100644 --- a/src/gradientai/types/knowledge_bases/indexing_job_create_params.py +++ b/src/gradientai/types/knowledge_bases/indexing_job_create_params.py @@ -10,5 +10,10 @@ class IndexingJobCreateParams(TypedDict, total=False): data_source_uuids: List[str] + """ + List of data source ids to index, if none are provided, all data sources will be + indexed + """ knowledge_base_uuid: str + """Knowledge base id""" diff --git a/src/gradientai/types/knowledge_bases/indexing_job_create_response.py b/src/gradientai/types/knowledge_bases/indexing_job_create_response.py index 835ec60d..685f40ef 100644 --- a/src/gradientai/types/knowledge_bases/indexing_job_create_response.py +++ b/src/gradientai/types/knowledge_bases/indexing_job_create_response.py @@ -10,3 +10,4 @@ class IndexingJobCreateResponse(BaseModel): job: Optional[APIIndexingJob] = None + """IndexingJob description""" diff --git a/src/gradientai/types/knowledge_bases/indexing_job_list_params.py b/src/gradientai/types/knowledge_bases/indexing_job_list_params.py index 90206aba..c9ac560e 100644 --- a/src/gradientai/types/knowledge_bases/indexing_job_list_params.py +++ b/src/gradientai/types/knowledge_bases/indexing_job_list_params.py @@ -9,7 +9,7 @@ class IndexingJobListParams(TypedDict, total=False): page: int - """page number.""" + """Page number.""" per_page: int - """items per page.""" + """Items per page.""" diff --git a/src/gradientai/types/knowledge_bases/indexing_job_list_response.py b/src/gradientai/types/knowledge_bases/indexing_job_list_response.py index deea4562..371f51bb 100644 --- a/src/gradientai/types/knowledge_bases/indexing_job_list_response.py +++ b/src/gradientai/types/knowledge_bases/indexing_job_list_response.py @@ -12,7 +12,10 @@ class IndexingJobListResponse(BaseModel): jobs: Optional[List[APIIndexingJob]] = None + """The indexing jobs""" links: Optional[APILinks] = None + """Links to other pages""" meta: Optional[APIMeta] = None + """Meta information about the data set""" diff --git a/src/gradientai/types/knowledge_bases/indexing_job_retrieve_response.py b/src/gradientai/types/knowledge_bases/indexing_job_retrieve_response.py index 6034bdf1..2d6be855 100644 --- a/src/gradientai/types/knowledge_bases/indexing_job_retrieve_response.py +++ b/src/gradientai/types/knowledge_bases/indexing_job_retrieve_response.py @@ -10,3 +10,4 @@ class IndexingJobRetrieveResponse(BaseModel): job: Optional[APIIndexingJob] = None + """IndexingJob description""" diff --git a/src/gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py b/src/gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py index ae4b394f..9fd41764 100644 --- a/src/gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py +++ b/src/gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py @@ -10,3 +10,4 @@ class IndexingJobUpdateCancelResponse(BaseModel): job: Optional[APIIndexingJob] = None + """IndexingJob description""" diff --git a/src/gradientai/types/model_list_response.py b/src/gradientai/types/model_list_response.py index 47651759..5915bdd1 100644 --- a/src/gradientai/types/model_list_response.py +++ b/src/gradientai/types/model_list_response.py @@ -1,18 +1,28 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List, Optional +from typing import List +from typing_extensions import Literal from .._models import BaseModel -from .api_model import APIModel -from .shared.api_meta import APIMeta -from .shared.api_links import APILinks -__all__ = ["ModelListResponse"] +__all__ = ["ModelListResponse", "Data"] -class ModelListResponse(BaseModel): - links: Optional[APILinks] = None +class Data(BaseModel): + id: str + """The model identifier, which can be referenced in the API endpoints.""" + + created: int + """The Unix timestamp (in seconds) when the model was created.""" + + object: Literal["model"] + """The object type, which is always "model".""" - meta: Optional[APIMeta] = None + owned_by: str + """The organization that owns the model.""" + + +class ModelListResponse(BaseModel): + data: List[Data] - models: Optional[List[APIModel]] = None + object: Literal["list"] diff --git a/src/gradientai/types/model_retrieve_response.py b/src/gradientai/types/model_retrieve_response.py new file mode 100644 index 00000000..dd5de863 --- /dev/null +++ b/src/gradientai/types/model_retrieve_response.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from .._models import BaseModel + +__all__ = ["ModelRetrieveResponse"] + + +class ModelRetrieveResponse(BaseModel): + id: str + """The model identifier, which can be referenced in the API endpoints.""" + + created: int + """The Unix timestamp (in seconds) when the model was created.""" + + object: Literal["model"] + """The object type, which is always "model".""" + + owned_by: str + """The organization that owns the model.""" diff --git a/src/gradientai/types/models/providers/anthropic_create_params.py b/src/gradientai/types/models/providers/anthropic_create_params.py index b624121f..c9fd6e85 100644 --- a/src/gradientai/types/models/providers/anthropic_create_params.py +++ b/src/gradientai/types/models/providers/anthropic_create_params.py @@ -9,5 +9,7 @@ class AnthropicCreateParams(TypedDict, total=False): api_key: str + """Anthropic API key""" name: str + """Name of the key""" diff --git a/src/gradientai/types/models/providers/anthropic_create_response.py b/src/gradientai/types/models/providers/anthropic_create_response.py index f0b8d2d1..0fbe50bc 100644 --- a/src/gradientai/types/models/providers/anthropic_create_response.py +++ b/src/gradientai/types/models/providers/anthropic_create_response.py @@ -10,3 +10,4 @@ class AnthropicCreateResponse(BaseModel): api_key_info: Optional[APIAnthropicAPIKeyInfo] = None + """Anthropic API Key Info""" diff --git a/src/gradientai/types/models/providers/anthropic_delete_response.py b/src/gradientai/types/models/providers/anthropic_delete_response.py index a3842bbc..b4fdd978 100644 --- a/src/gradientai/types/models/providers/anthropic_delete_response.py +++ b/src/gradientai/types/models/providers/anthropic_delete_response.py @@ -10,3 +10,4 @@ class AnthropicDeleteResponse(BaseModel): api_key_info: Optional[APIAnthropicAPIKeyInfo] = None + """Anthropic API Key Info""" diff --git a/src/gradientai/types/models/providers/anthropic_list_agents_params.py b/src/gradientai/types/models/providers/anthropic_list_agents_params.py index 1a5b8229..b3308b69 100644 --- a/src/gradientai/types/models/providers/anthropic_list_agents_params.py +++ b/src/gradientai/types/models/providers/anthropic_list_agents_params.py @@ -9,7 +9,7 @@ class AnthropicListAgentsParams(TypedDict, total=False): page: int - """page number.""" + """Page number.""" per_page: int - """items per page.""" + """Items per page.""" diff --git a/src/gradientai/types/models/providers/anthropic_list_agents_response.py b/src/gradientai/types/models/providers/anthropic_list_agents_response.py index 6816f0db..a1525275 100644 --- a/src/gradientai/types/models/providers/anthropic_list_agents_response.py +++ b/src/gradientai/types/models/providers/anthropic_list_agents_response.py @@ -15,8 +15,10 @@ class AnthropicListAgentsResponse(BaseModel): agents: Optional[List["APIAgent"]] = None links: Optional[APILinks] = None + """Links to other pages""" meta: Optional[APIMeta] = None + """Meta information about the data set""" from ...api_agent import APIAgent diff --git a/src/gradientai/types/models/providers/anthropic_list_params.py b/src/gradientai/types/models/providers/anthropic_list_params.py index de8ce520..ae1cca58 100644 --- a/src/gradientai/types/models/providers/anthropic_list_params.py +++ b/src/gradientai/types/models/providers/anthropic_list_params.py @@ -9,7 +9,7 @@ class AnthropicListParams(TypedDict, total=False): page: int - """page number.""" + """Page number.""" per_page: int - """items per page.""" + """Items per page.""" diff --git a/src/gradientai/types/models/providers/anthropic_list_response.py b/src/gradientai/types/models/providers/anthropic_list_response.py index 77999f5b..24d6547a 100644 --- a/src/gradientai/types/models/providers/anthropic_list_response.py +++ b/src/gradientai/types/models/providers/anthropic_list_response.py @@ -12,7 +12,10 @@ class AnthropicListResponse(BaseModel): api_key_infos: Optional[List[APIAnthropicAPIKeyInfo]] = None + """Api key infos""" links: Optional[APILinks] = None + """Links to other pages""" meta: Optional[APIMeta] = None + """Meta information about the data set""" diff --git a/src/gradientai/types/models/providers/anthropic_retrieve_response.py b/src/gradientai/types/models/providers/anthropic_retrieve_response.py index 7083b75f..61324b7d 100644 --- a/src/gradientai/types/models/providers/anthropic_retrieve_response.py +++ b/src/gradientai/types/models/providers/anthropic_retrieve_response.py @@ -10,3 +10,4 @@ class AnthropicRetrieveResponse(BaseModel): api_key_info: Optional[APIAnthropicAPIKeyInfo] = None + """Anthropic API Key Info""" diff --git a/src/gradientai/types/models/providers/anthropic_update_params.py b/src/gradientai/types/models/providers/anthropic_update_params.py index 7bb03045..865dc29c 100644 --- a/src/gradientai/types/models/providers/anthropic_update_params.py +++ b/src/gradientai/types/models/providers/anthropic_update_params.py @@ -11,7 +11,10 @@ class AnthropicUpdateParams(TypedDict, total=False): api_key: str + """Anthropic API key""" body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")] + """API key ID""" name: str + """Name of the key""" diff --git a/src/gradientai/types/models/providers/anthropic_update_response.py b/src/gradientai/types/models/providers/anthropic_update_response.py index d3b2911b..3a6daaea 100644 --- a/src/gradientai/types/models/providers/anthropic_update_response.py +++ b/src/gradientai/types/models/providers/anthropic_update_response.py @@ -10,3 +10,4 @@ class AnthropicUpdateResponse(BaseModel): api_key_info: Optional[APIAnthropicAPIKeyInfo] = None + """Anthropic API Key Info""" diff --git a/src/gradientai/types/models/providers/openai_create_params.py b/src/gradientai/types/models/providers/openai_create_params.py index da655d75..8ed7f571 100644 --- a/src/gradientai/types/models/providers/openai_create_params.py +++ b/src/gradientai/types/models/providers/openai_create_params.py @@ -9,5 +9,7 @@ class OpenAICreateParams(TypedDict, total=False): api_key: str + """OpenAI API key""" name: str + """Name of the key""" diff --git a/src/gradientai/types/models/providers/openai_create_response.py b/src/gradientai/types/models/providers/openai_create_response.py index 4908a91a..b2e94766 100644 --- a/src/gradientai/types/models/providers/openai_create_response.py +++ b/src/gradientai/types/models/providers/openai_create_response.py @@ -10,3 +10,4 @@ class OpenAICreateResponse(BaseModel): api_key_info: Optional[APIOpenAIAPIKeyInfo] = None + """OpenAI API Key Info""" diff --git a/src/gradientai/types/models/providers/openai_delete_response.py b/src/gradientai/types/models/providers/openai_delete_response.py index 080a251f..e59c89fe 100644 --- a/src/gradientai/types/models/providers/openai_delete_response.py +++ b/src/gradientai/types/models/providers/openai_delete_response.py @@ -10,3 +10,4 @@ class OpenAIDeleteResponse(BaseModel): api_key_info: Optional[APIOpenAIAPIKeyInfo] = None + """OpenAI API Key Info""" diff --git a/src/gradientai/types/models/providers/openai_list_params.py b/src/gradientai/types/models/providers/openai_list_params.py index e5b86b8d..5677eeaf 100644 --- a/src/gradientai/types/models/providers/openai_list_params.py +++ b/src/gradientai/types/models/providers/openai_list_params.py @@ -9,7 +9,7 @@ class OpenAIListParams(TypedDict, total=False): page: int - """page number.""" + """Page number.""" per_page: int - """items per page.""" + """Items per page.""" diff --git a/src/gradientai/types/models/providers/openai_list_response.py b/src/gradientai/types/models/providers/openai_list_response.py index edbd9fb4..698cd11e 100644 --- a/src/gradientai/types/models/providers/openai_list_response.py +++ b/src/gradientai/types/models/providers/openai_list_response.py @@ -12,7 +12,10 @@ class OpenAIListResponse(BaseModel): api_key_infos: Optional[List[APIOpenAIAPIKeyInfo]] = None + """Api key infos""" links: Optional[APILinks] = None + """Links to other pages""" meta: Optional[APIMeta] = None + """Meta information about the data set""" diff --git a/src/gradientai/types/models/providers/openai_retrieve_agents_params.py b/src/gradientai/types/models/providers/openai_retrieve_agents_params.py index 8a41eaf9..2db6d7a1 100644 --- a/src/gradientai/types/models/providers/openai_retrieve_agents_params.py +++ b/src/gradientai/types/models/providers/openai_retrieve_agents_params.py @@ -9,7 +9,7 @@ class OpenAIRetrieveAgentsParams(TypedDict, total=False): page: int - """page number.""" + """Page number.""" per_page: int - """items per page.""" + """Items per page.""" diff --git a/src/gradientai/types/models/providers/openai_retrieve_agents_response.py b/src/gradientai/types/models/providers/openai_retrieve_agents_response.py index b3166636..717a56cd 100644 --- a/src/gradientai/types/models/providers/openai_retrieve_agents_response.py +++ b/src/gradientai/types/models/providers/openai_retrieve_agents_response.py @@ -15,8 +15,10 @@ class OpenAIRetrieveAgentsResponse(BaseModel): agents: Optional[List["APIAgent"]] = None links: Optional[APILinks] = None + """Links to other pages""" meta: Optional[APIMeta] = None + """Meta information about the data set""" from ...api_agent import APIAgent diff --git a/src/gradientai/types/models/providers/openai_retrieve_response.py b/src/gradientai/types/models/providers/openai_retrieve_response.py index ef23b966..0f382073 100644 --- a/src/gradientai/types/models/providers/openai_retrieve_response.py +++ b/src/gradientai/types/models/providers/openai_retrieve_response.py @@ -10,3 +10,4 @@ class OpenAIRetrieveResponse(BaseModel): api_key_info: Optional[APIOpenAIAPIKeyInfo] = None + """OpenAI API Key Info""" diff --git a/src/gradientai/types/models/providers/openai_update_params.py b/src/gradientai/types/models/providers/openai_update_params.py index ab5d02cf..9b99495e 100644 --- a/src/gradientai/types/models/providers/openai_update_params.py +++ b/src/gradientai/types/models/providers/openai_update_params.py @@ -11,7 +11,10 @@ class OpenAIUpdateParams(TypedDict, total=False): api_key: str + """OpenAI API key""" body_api_key_uuid: Annotated[str, PropertyInfo(alias="api_key_uuid")] + """API key ID""" name: str + """Name of the key""" diff --git a/src/gradientai/types/models/providers/openai_update_response.py b/src/gradientai/types/models/providers/openai_update_response.py index 9bb80518..ec7a1c94 100644 --- a/src/gradientai/types/models/providers/openai_update_response.py +++ b/src/gradientai/types/models/providers/openai_update_response.py @@ -10,3 +10,4 @@ class OpenAIUpdateResponse(BaseModel): api_key_info: Optional[APIOpenAIAPIKeyInfo] = None + """OpenAI API Key Info""" diff --git a/src/gradientai/types/region_list_params.py b/src/gradientai/types/region_list_params.py index 1db0ad50..4fef37b3 100644 --- a/src/gradientai/types/region_list_params.py +++ b/src/gradientai/types/region_list_params.py @@ -8,8 +8,8 @@ class RegionListParams(TypedDict, total=False): - serves_batch: bool - """include datacenters that are capable of running batch jobs.""" + page: int + """Which 'page' of paginated results to return.""" - serves_inference: bool - """include datacenters that serve inference.""" + per_page: int + """Number of items returned per page""" diff --git a/src/gradientai/types/region_list_response.py b/src/gradientai/types/region_list_response.py index 0f955b36..f1bf4c69 100644 --- a/src/gradientai/types/region_list_response.py +++ b/src/gradientai/types/region_list_response.py @@ -3,21 +3,17 @@ from typing import List, Optional from .._models import BaseModel +from .shared.region import Region +from .shared.page_links import PageLinks +from .shared.meta_properties import MetaProperties -__all__ = ["RegionListResponse", "Region"] +__all__ = ["RegionListResponse"] -class Region(BaseModel): - inference_url: Optional[str] = None - - region: Optional[str] = None - - serves_batch: Optional[bool] = None - - serves_inference: Optional[bool] = None - - stream_inference_url: Optional[str] = None +class RegionListResponse(BaseModel): + meta: MetaProperties + """Information about the response itself.""" + regions: List[Region] -class RegionListResponse(BaseModel): - regions: Optional[List[Region]] = None + links: Optional[PageLinks] = None diff --git a/src/gradientai/types/shared/__init__.py b/src/gradientai/types/shared/__init__.py index 9fdd7605..6d90845f 100644 --- a/src/gradientai/types/shared/__init__.py +++ b/src/gradientai/types/shared/__init__.py @@ -1,6 +1,29 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +from .size import Size as Size +from .image import Image as Image +from .action import Action as Action +from .kernel import Kernel as Kernel +from .region import Region as Region +from .droplet import Droplet as Droplet from .api_meta import APIMeta as APIMeta +from .gpu_info import GPUInfo as GPUInfo from .api_links import APILinks as APILinks +from .disk_info import DiskInfo as DiskInfo +from .snapshots import Snapshots as Snapshots +from .network_v4 import NetworkV4 as NetworkV4 +from .network_v6 import NetworkV6 as NetworkV6 +from .page_links import PageLinks as PageLinks +from .action_link import ActionLink as ActionLink +from .vpc_peering import VpcPeering as VpcPeering +from .subscription import Subscription as Subscription +from .forward_links import ForwardLinks as ForwardLinks +from .backward_links import BackwardLinks as BackwardLinks +from .meta_properties import MetaProperties as MetaProperties +from .completion_usage import CompletionUsage as CompletionUsage +from .garbage_collection import GarbageCollection as GarbageCollection +from .firewall_rule_target import FirewallRuleTarget as FirewallRuleTarget from .chat_completion_chunk import ChatCompletionChunk as ChatCompletionChunk +from .subscription_tier_base import SubscriptionTierBase as SubscriptionTierBase +from .droplet_next_backup_window import DropletNextBackupWindow as DropletNextBackupWindow from .chat_completion_token_logprob import ChatCompletionTokenLogprob as ChatCompletionTokenLogprob diff --git a/src/gradientai/types/shared/action.py b/src/gradientai/types/shared/action.py new file mode 100644 index 00000000..2b9fbf4e --- /dev/null +++ b/src/gradientai/types/shared/action.py @@ -0,0 +1,51 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from datetime import datetime +from typing_extensions import Literal + +from .region import Region +from ..._models import BaseModel + +__all__ = ["Action"] + + +class Action(BaseModel): + id: Optional[int] = None + """A unique numeric ID that can be used to identify and reference an action.""" + + completed_at: Optional[datetime] = None + """ + A time value given in ISO8601 combined date and time format that represents when + the action was completed. + """ + + region: Optional[Region] = None + + region_slug: Optional[str] = None + """A human-readable string that is used as a unique identifier for each region.""" + + resource_id: Optional[int] = None + """A unique identifier for the resource that the action is associated with.""" + + resource_type: Optional[str] = None + """The type of resource that the action is associated with.""" + + started_at: Optional[datetime] = None + """ + A time value given in ISO8601 combined date and time format that represents when + the action was initiated. + """ + + status: Optional[Literal["in-progress", "completed", "errored"]] = None + """The current status of the action. + + This can be "in-progress", "completed", or "errored". + """ + + type: Optional[str] = None + """This is the type of action that the object represents. + + For example, this could be "transfer" to represent the state of an image + transfer action. + """ diff --git a/src/gradientai/types/shared/action_link.py b/src/gradientai/types/shared/action_link.py new file mode 100644 index 00000000..78aec9ff --- /dev/null +++ b/src/gradientai/types/shared/action_link.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["ActionLink"] + + +class ActionLink(BaseModel): + id: Optional[int] = None + """A unique numeric ID that can be used to identify and reference an action.""" + + href: Optional[str] = None + """A URL that can be used to access the action.""" + + rel: Optional[str] = None + """A string specifying the type of the related action.""" diff --git a/src/gradientai/types/shared/api_links.py b/src/gradientai/types/shared/api_links.py index b37113f0..24b19cfe 100644 --- a/src/gradientai/types/shared/api_links.py +++ b/src/gradientai/types/shared/api_links.py @@ -9,13 +9,18 @@ class Pages(BaseModel): first: Optional[str] = None + """First page""" last: Optional[str] = None + """Last page""" next: Optional[str] = None + """Next page""" previous: Optional[str] = None + """Previous page""" class APILinks(BaseModel): pages: Optional[Pages] = None + """Information about how to reach other pages""" diff --git a/src/gradientai/types/shared/api_meta.py b/src/gradientai/types/shared/api_meta.py index 9191812c..dc267527 100644 --- a/src/gradientai/types/shared/api_meta.py +++ b/src/gradientai/types/shared/api_meta.py @@ -9,7 +9,10 @@ class APIMeta(BaseModel): page: Optional[int] = None + """The current page""" pages: Optional[int] = None + """Total number of pages""" total: Optional[int] = None + """Total amount of items over all pages""" diff --git a/src/gradientai/types/shared/backward_links.py b/src/gradientai/types/shared/backward_links.py new file mode 100644 index 00000000..502fefef --- /dev/null +++ b/src/gradientai/types/shared/backward_links.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["BackwardLinks"] + + +class BackwardLinks(BaseModel): + first: Optional[str] = None + """URI of the first page of the results.""" + + prev: Optional[str] = None + """URI of the previous page of the results.""" diff --git a/src/gradientai/types/shared/chat_completion_chunk.py b/src/gradientai/types/shared/chat_completion_chunk.py index 4d45ef8d..4dd587f9 100644 --- a/src/gradientai/types/shared/chat_completion_chunk.py +++ b/src/gradientai/types/shared/chat_completion_chunk.py @@ -4,9 +4,43 @@ from typing_extensions import Literal from ..._models import BaseModel +from .completion_usage import CompletionUsage from .chat_completion_token_logprob import ChatCompletionTokenLogprob -__all__ = ["ChatCompletionChunk", "Choice", "ChoiceDelta", "ChoiceLogprobs", "Usage"] +__all__ = [ + "ChatCompletionChunk", + "Choice", + "ChoiceDelta", + "ChoiceDeltaToolCall", + "ChoiceDeltaToolCallFunction", + "ChoiceLogprobs", +] + + +class ChoiceDeltaToolCallFunction(BaseModel): + arguments: Optional[str] = None + """ + The arguments to call the function with, as generated by the model in JSON + format. Note that the model does not always generate valid JSON, and may + hallucinate parameters not defined by your function schema. Validate the + arguments in your code before calling your function. + """ + + name: Optional[str] = None + """The name of the function to call.""" + + +class ChoiceDeltaToolCall(BaseModel): + index: int + + id: Optional[str] = None + """The ID of the tool call.""" + + function: Optional[ChoiceDeltaToolCallFunction] = None + """A chunk of a function that the model called.""" + + type: Optional[Literal["function"]] = None + """The type of the tool. Currently, only `function` is supported.""" class ChoiceDelta(BaseModel): @@ -19,6 +53,8 @@ class ChoiceDelta(BaseModel): role: Optional[Literal["developer", "user", "assistant"]] = None """The role of the author of this message.""" + tool_calls: Optional[List[ChoiceDeltaToolCall]] = None + class ChoiceLogprobs(BaseModel): content: Optional[List[ChatCompletionTokenLogprob]] = None @@ -32,12 +68,12 @@ class Choice(BaseModel): delta: ChoiceDelta """A chat completion delta generated by streamed model responses.""" - finish_reason: Optional[Literal["stop", "length"]] = None + finish_reason: Optional[Literal["stop", "length", "tool_calls"]] = None """The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, or `length` if the maximum number of tokens specified in the request - was reached + was reached, `tool_calls` if the model called a tool. """ index: int @@ -47,17 +83,6 @@ class Choice(BaseModel): """Log probability information for the choice.""" -class Usage(BaseModel): - completion_tokens: int - """Number of tokens in the generated completion.""" - - prompt_tokens: int - """Number of tokens in the prompt.""" - - total_tokens: int - """Total number of tokens used in the request (prompt + completion).""" - - class ChatCompletionChunk(BaseModel): id: str """A unique identifier for the chat completion. Each chunk has the same ID.""" @@ -81,7 +106,7 @@ class ChatCompletionChunk(BaseModel): object: Literal["chat.completion.chunk"] """The object type, which is always `chat.completion.chunk`.""" - usage: Optional[Usage] = None + usage: Optional[CompletionUsage] = None """ An optional field that will only be present when you set `stream_options: {"include_usage": true}` in your request. When present, it diff --git a/src/gradientai/types/shared/completion_usage.py b/src/gradientai/types/shared/completion_usage.py new file mode 100644 index 00000000..a2012eef --- /dev/null +++ b/src/gradientai/types/shared/completion_usage.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from ..._models import BaseModel + +__all__ = ["CompletionUsage"] + + +class CompletionUsage(BaseModel): + completion_tokens: int + """Number of tokens in the generated completion.""" + + prompt_tokens: int + """Number of tokens in the prompt.""" + + total_tokens: int + """Total number of tokens used in the request (prompt + completion).""" diff --git a/src/gradientai/types/shared/disk_info.py b/src/gradientai/types/shared/disk_info.py new file mode 100644 index 00000000..3c5c4911 --- /dev/null +++ b/src/gradientai/types/shared/disk_info.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["DiskInfo", "Size"] + + +class Size(BaseModel): + amount: Optional[int] = None + """The amount of space allocated to the disk.""" + + unit: Optional[str] = None + """The unit of measure for the disk size.""" + + +class DiskInfo(BaseModel): + size: Optional[Size] = None + + type: Optional[Literal["local", "scratch"]] = None + """The type of disk. + + All Droplets contain a `local` disk. Additionally, GPU Droplets can also have a + `scratch` disk for non-persistent data. + """ diff --git a/src/gradientai/types/shared/droplet.py b/src/gradientai/types/shared/droplet.py new file mode 100644 index 00000000..9d2bb17c --- /dev/null +++ b/src/gradientai/types/shared/droplet.py @@ -0,0 +1,143 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime +from typing_extensions import Literal + +from .size import Size +from .image import Image +from .kernel import Kernel +from .region import Region +from .gpu_info import GPUInfo +from ..._models import BaseModel +from .disk_info import DiskInfo +from .network_v4 import NetworkV4 +from .network_v6 import NetworkV6 +from .droplet_next_backup_window import DropletNextBackupWindow + +__all__ = ["Droplet", "Networks"] + + +class Networks(BaseModel): + v4: Optional[List[NetworkV4]] = None + + v6: Optional[List[NetworkV6]] = None + + +class Droplet(BaseModel): + id: int + """A unique identifier for each Droplet instance. + + This is automatically generated upon Droplet creation. + """ + + backup_ids: List[int] + """ + An array of backup IDs of any backups that have been taken of the Droplet + instance. Droplet backups are enabled at the time of the instance creation. + Requires `image:read` scope. + """ + + created_at: datetime + """ + A time value given in ISO8601 combined date and time format that represents when + the Droplet was created. + """ + + disk: int + """The size of the Droplet's disk in gigabytes.""" + + features: List[str] + """An array of features enabled on this Droplet.""" + + image: Image + """The Droplet's image. Requires `image:read` scope.""" + + locked: bool + """ + A boolean value indicating whether the Droplet has been locked, preventing + actions by users. + """ + + memory: int + """Memory of the Droplet in megabytes.""" + + name: str + """The human-readable name set for the Droplet instance.""" + + networks: Networks + """The details of the network that are configured for the Droplet instance. + + This is an object that contains keys for IPv4 and IPv6. The value of each of + these is an array that contains objects describing an individual IP resource + allocated to the Droplet. These will define attributes like the IP address, + netmask, and gateway of the specific network depending on the type of network it + is. + """ + + next_backup_window: Optional[DropletNextBackupWindow] = None + """ + The details of the Droplet's backups feature, if backups are configured for the + Droplet. This object contains keys for the start and end times of the window + during which the backup will start. + """ + + region: Region + + size: Size + + size_slug: str + """The unique slug identifier for the size of this Droplet.""" + + snapshot_ids: List[int] + """ + An array of snapshot IDs of any snapshots created from the Droplet instance. + Requires `image:read` scope. + """ + + status: Literal["new", "active", "off", "archive"] + """A status string indicating the state of the Droplet instance. + + This may be "new", "active", "off", or "archive". + """ + + tags: List[str] + """An array of Tags the Droplet has been tagged with. Requires `tag:read` scope.""" + + vcpus: int + """The number of virtual CPUs.""" + + volume_ids: List[str] + """ + A flat array including the unique identifier for each Block Storage volume + attached to the Droplet. Requires `block_storage:read` scope. + """ + + disk_info: Optional[List[DiskInfo]] = None + """ + An array of objects containing information about the disks available to the + Droplet. + """ + + gpu_info: Optional[GPUInfo] = None + """ + An object containing information about the GPU capabilities of Droplets created + with this size. + """ + + kernel: Optional[Kernel] = None + """ + **Note**: All Droplets created after March 2017 use internal kernels by default. + These Droplets will have this attribute set to `null`. + + The current + [kernel](https://docs.digitalocean.com/products/droplets/how-to/kernel/) for + Droplets with externally managed kernels. This will initially be set to the + kernel of the base image when the Droplet is created. + """ + + vpc_uuid: Optional[str] = None + """ + A string specifying the UUID of the VPC to which the Droplet is assigned. + Requires `vpc:read` scope. + """ diff --git a/src/gradientai/types/shared/droplet_next_backup_window.py b/src/gradientai/types/shared/droplet_next_backup_window.py new file mode 100644 index 00000000..81d07be6 --- /dev/null +++ b/src/gradientai/types/shared/droplet_next_backup_window.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from datetime import datetime + +from ..._models import BaseModel + +__all__ = ["DropletNextBackupWindow"] + + +class DropletNextBackupWindow(BaseModel): + end: Optional[datetime] = None + """ + A time value given in ISO8601 combined date and time format specifying the end + of the Droplet's backup window. + """ + + start: Optional[datetime] = None + """ + A time value given in ISO8601 combined date and time format specifying the start + of the Droplet's backup window. + """ diff --git a/src/gradientai/types/shared/firewall_rule_target.py b/src/gradientai/types/shared/firewall_rule_target.py new file mode 100644 index 00000000..11f61065 --- /dev/null +++ b/src/gradientai/types/shared/firewall_rule_target.py @@ -0,0 +1,41 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel + +__all__ = ["FirewallRuleTarget"] + + +class FirewallRuleTarget(BaseModel): + addresses: Optional[List[str]] = None + """ + An array of strings containing the IPv4 addresses, IPv6 addresses, IPv4 CIDRs, + and/or IPv6 CIDRs to which the firewall will allow traffic. + """ + + droplet_ids: Optional[List[int]] = None + """ + An array containing the IDs of the Droplets to which the firewall will allow + traffic. + """ + + kubernetes_ids: Optional[List[str]] = None + """ + An array containing the IDs of the Kubernetes clusters to which the firewall + will allow traffic. + """ + + load_balancer_uids: Optional[List[str]] = None + """ + An array containing the IDs of the load balancers to which the firewall will + allow traffic. + """ + + tags: Optional[List[str]] = None + """A flat array of tag names as strings to be applied to the resource. + + Tag names must exist in order to be referenced in a request. + + Requires `tag:create` and `tag:read` scopes. + """ diff --git a/src/gradientai/types/shared/forward_links.py b/src/gradientai/types/shared/forward_links.py new file mode 100644 index 00000000..30d46985 --- /dev/null +++ b/src/gradientai/types/shared/forward_links.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["ForwardLinks"] + + +class ForwardLinks(BaseModel): + last: Optional[str] = None + """URI of the last page of the results.""" + + next: Optional[str] = None + """URI of the next page of the results.""" diff --git a/src/gradientai/types/shared/garbage_collection.py b/src/gradientai/types/shared/garbage_collection.py new file mode 100644 index 00000000..f1f7f4cd --- /dev/null +++ b/src/gradientai/types/shared/garbage_collection.py @@ -0,0 +1,43 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from datetime import datetime +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["GarbageCollection"] + + +class GarbageCollection(BaseModel): + blobs_deleted: Optional[int] = None + """The number of blobs deleted as a result of this garbage collection.""" + + created_at: Optional[datetime] = None + """The time the garbage collection was created.""" + + freed_bytes: Optional[int] = None + """The number of bytes freed as a result of this garbage collection.""" + + registry_name: Optional[str] = None + """The name of the container registry.""" + + status: Optional[ + Literal[ + "requested", + "waiting for write JWTs to expire", + "scanning manifests", + "deleting unreferenced blobs", + "cancelling", + "failed", + "succeeded", + "cancelled", + ] + ] = None + """The current status of this garbage collection.""" + + updated_at: Optional[datetime] = None + """The time the garbage collection was last updated.""" + + uuid: Optional[str] = None + """A string specifying the UUID of the garbage collection.""" diff --git a/src/gradientai/types/shared/gpu_info.py b/src/gradientai/types/shared/gpu_info.py new file mode 100644 index 00000000..a285dd23 --- /dev/null +++ b/src/gradientai/types/shared/gpu_info.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["GPUInfo", "Vram"] + + +class Vram(BaseModel): + amount: Optional[int] = None + """The amount of VRAM allocated to the GPU.""" + + unit: Optional[str] = None + """The unit of measure for the VRAM.""" + + +class GPUInfo(BaseModel): + count: Optional[int] = None + """The number of GPUs allocated to the Droplet.""" + + model: Optional[str] = None + """The model of the GPU.""" + + vram: Optional[Vram] = None diff --git a/src/gradientai/types/shared/image.py b/src/gradientai/types/shared/image.py new file mode 100644 index 00000000..d8a7acde --- /dev/null +++ b/src/gradientai/types/shared/image.py @@ -0,0 +1,131 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["Image"] + + +class Image(BaseModel): + id: Optional[int] = None + """A unique number that can be used to identify and reference a specific image.""" + + created_at: Optional[datetime] = None + """ + A time value given in ISO8601 combined date and time format that represents when + the image was created. + """ + + description: Optional[str] = None + """An optional free-form text field to describe an image.""" + + distribution: Optional[ + Literal[ + "Arch Linux", + "CentOS", + "CoreOS", + "Debian", + "Fedora", + "Fedora Atomic", + "FreeBSD", + "Gentoo", + "openSUSE", + "RancherOS", + "Rocky Linux", + "Ubuntu", + "Unknown", + ] + ] = None + """The name of a custom image's distribution. + + Currently, the valid values are `Arch Linux`, `CentOS`, `CoreOS`, `Debian`, + `Fedora`, `Fedora Atomic`, `FreeBSD`, `Gentoo`, `openSUSE`, `RancherOS`, + `Rocky Linux`, `Ubuntu`, and `Unknown`. Any other value will be accepted but + ignored, and `Unknown` will be used in its place. + """ + + error_message: Optional[str] = None + """ + A string containing information about errors that may occur when importing a + custom image. + """ + + min_disk_size: Optional[int] = None + """The minimum disk size in GB required for a Droplet to use this image.""" + + name: Optional[str] = None + """The display name that has been given to an image. + + This is what is shown in the control panel and is generally a descriptive title + for the image in question. + """ + + public: Optional[bool] = None + """ + This is a boolean value that indicates whether the image in question is public + or not. An image that is public is available to all accounts. A non-public image + is only accessible from your account. + """ + + regions: Optional[ + List[ + Literal[ + "ams1", + "ams2", + "ams3", + "blr1", + "fra1", + "lon1", + "nyc1", + "nyc2", + "nyc3", + "sfo1", + "sfo2", + "sfo3", + "sgp1", + "tor1", + "syd1", + ] + ] + ] = None + """This attribute is an array of the regions that the image is available in. + + The regions are represented by their identifying slug values. + """ + + size_gigabytes: Optional[float] = None + """The size of the image in gigabytes.""" + + slug: Optional[str] = None + """ + A uniquely identifying string that is associated with each of the + DigitalOcean-provided public images. These can be used to reference a public + image as an alternative to the numeric id. + """ + + status: Optional[Literal["NEW", "available", "pending", "deleted", "retired"]] = None + """A status string indicating the state of a custom image. + + This may be `NEW`, `available`, `pending`, `deleted`, or `retired`. + """ + + tags: Optional[List[str]] = None + """A flat array of tag names as strings to be applied to the resource. + + Tag names may be for either existing or new tags. + + Requires `tag:create` scope. + """ + + type: Optional[Literal["base", "snapshot", "backup", "custom", "admin"]] = None + """Describes the kind of image. + + It may be one of `base`, `snapshot`, `backup`, `custom`, or `admin`. + Respectively, this specifies whether an image is a DigitalOcean base OS image, + user-generated Droplet snapshot, automatically created Droplet backup, + user-provided virtual machine image, or an image used for DigitalOcean managed + resources (e.g. DOKS worker nodes). + """ diff --git a/src/gradientai/types/shared/kernel.py b/src/gradientai/types/shared/kernel.py new file mode 100644 index 00000000..78a63427 --- /dev/null +++ b/src/gradientai/types/shared/kernel.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["Kernel"] + + +class Kernel(BaseModel): + id: Optional[int] = None + """A unique number used to identify and reference a specific kernel.""" + + name: Optional[str] = None + """The display name of the kernel. + + This is shown in the web UI and is generally a descriptive title for the kernel + in question. + """ + + version: Optional[str] = None + """ + A standard kernel version string representing the version, patch, and release + information. + """ diff --git a/src/gradientai/types/shared/meta_properties.py b/src/gradientai/types/shared/meta_properties.py new file mode 100644 index 00000000..a78a64d6 --- /dev/null +++ b/src/gradientai/types/shared/meta_properties.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["MetaProperties"] + + +class MetaProperties(BaseModel): + total: Optional[int] = None + """Number of objects returned by the request.""" diff --git a/src/gradientai/types/shared/network_v4.py b/src/gradientai/types/shared/network_v4.py new file mode 100644 index 00000000..bbf8490a --- /dev/null +++ b/src/gradientai/types/shared/network_v4.py @@ -0,0 +1,26 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["NetworkV4"] + + +class NetworkV4(BaseModel): + gateway: Optional[str] = None + """The gateway of the specified IPv4 network interface. + + For private interfaces, a gateway is not provided. This is denoted by returning + `nil` as its value. + """ + + ip_address: Optional[str] = None + """The IP address of the IPv4 network interface.""" + + netmask: Optional[str] = None + """The netmask of the IPv4 network interface.""" + + type: Optional[Literal["public", "private"]] = None + """The type of the IPv4 network interface.""" diff --git a/src/gradientai/types/shared/network_v6.py b/src/gradientai/types/shared/network_v6.py new file mode 100644 index 00000000..a3eb6b42 --- /dev/null +++ b/src/gradientai/types/shared/network_v6.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["NetworkV6"] + + +class NetworkV6(BaseModel): + gateway: Optional[str] = None + """The gateway of the specified IPv6 network interface.""" + + ip_address: Optional[str] = None + """The IP address of the IPv6 network interface.""" + + netmask: Optional[int] = None + """The netmask of the IPv6 network interface.""" + + type: Optional[Literal["public"]] = None + """The type of the IPv6 network interface. + + **Note**: IPv6 private networking is not currently supported. + """ diff --git a/src/gradientai/types/shared/page_links.py b/src/gradientai/types/shared/page_links.py new file mode 100644 index 00000000..bfceabef --- /dev/null +++ b/src/gradientai/types/shared/page_links.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union, Optional +from typing_extensions import TypeAlias + +from ..._models import BaseModel +from .forward_links import ForwardLinks +from .backward_links import BackwardLinks + +__all__ = ["PageLinks", "Pages"] + +Pages: TypeAlias = Union[ForwardLinks, BackwardLinks, object] + + +class PageLinks(BaseModel): + pages: Optional[Pages] = None diff --git a/src/gradientai/types/shared/region.py b/src/gradientai/types/shared/region.py new file mode 100644 index 00000000..d2fe7c51 --- /dev/null +++ b/src/gradientai/types/shared/region.py @@ -0,0 +1,36 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List + +from ..._models import BaseModel + +__all__ = ["Region"] + + +class Region(BaseModel): + available: bool + """ + This is a boolean value that represents whether new Droplets can be created in + this region. + """ + + features: List[str] + """ + This attribute is set to an array which contains features available in this + region + """ + + name: str + """The display name of the region. + + This will be a full name that is used in the control panel and other interfaces. + """ + + sizes: List[str] + """ + This attribute is set to an array which contains the identifying slugs for the + sizes available in this region. sizes:read is required to view. + """ + + slug: str + """A human-readable string that is used as a unique identifier for each region.""" diff --git a/src/gradientai/types/shared/size.py b/src/gradientai/types/shared/size.py new file mode 100644 index 00000000..42b0b41f --- /dev/null +++ b/src/gradientai/types/shared/size.py @@ -0,0 +1,79 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from .gpu_info import GPUInfo +from ..._models import BaseModel +from .disk_info import DiskInfo + +__all__ = ["Size"] + + +class Size(BaseModel): + available: bool + """ + This is a boolean value that represents whether new Droplets can be created with + this size. + """ + + description: str + """A string describing the class of Droplets created from this size. + + For example: Basic, General Purpose, CPU-Optimized, Memory-Optimized, or + Storage-Optimized. + """ + + disk: int + """The amount of disk space set aside for Droplets of this size. + + The value is represented in gigabytes. + """ + + memory: int + """The amount of RAM allocated to Droplets created of this size. + + The value is represented in megabytes. + """ + + price_hourly: float + """This describes the price of the Droplet size as measured hourly. + + The value is measured in US dollars. + """ + + price_monthly: float + """ + This attribute describes the monthly cost of this Droplet size if the Droplet is + kept for an entire month. The value is measured in US dollars. + """ + + regions: List[str] + """ + An array containing the region slugs where this size is available for Droplet + creates. regions:read is required to view. + """ + + slug: str + """A human-readable string that is used to uniquely identify each size.""" + + transfer: float + """ + The amount of transfer bandwidth that is available for Droplets created in this + size. This only counts traffic on the public interface. The value is given in + terabytes. + """ + + vcpus: int + """The number of CPUs allocated to Droplets of this size.""" + + disk_info: Optional[List[DiskInfo]] = None + """ + An array of objects containing information about the disks available to Droplets + created with this size. + """ + + gpu_info: Optional[GPUInfo] = None + """ + An object containing information about the GPU capabilities of Droplets created + with this size. + """ diff --git a/src/gradientai/types/shared/snapshots.py b/src/gradientai/types/shared/snapshots.py new file mode 100644 index 00000000..940b58c8 --- /dev/null +++ b/src/gradientai/types/shared/snapshots.py @@ -0,0 +1,47 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["Snapshots"] + + +class Snapshots(BaseModel): + id: str + """The unique identifier for the snapshot.""" + + created_at: datetime + """ + A time value given in ISO8601 combined date and time format that represents when + the snapshot was created. + """ + + min_disk_size: int + """The minimum size in GB required for a volume or Droplet to use this snapshot.""" + + name: str + """A human-readable name for the snapshot.""" + + regions: List[str] + """An array of the regions that the snapshot is available in. + + The regions are represented by their identifying slug values. + """ + + resource_id: str + """The unique identifier for the resource that the snapshot originated from.""" + + resource_type: Literal["droplet", "volume"] + """The type of resource that the snapshot originated from.""" + + size_gigabytes: float + """The billable size of the snapshot in gigabytes.""" + + tags: Optional[List[str]] = None + """An array of Tags the snapshot has been tagged with. + + Requires `tag:read` scope. + """ diff --git a/src/gradientai/types/shared/subscription.py b/src/gradientai/types/shared/subscription.py new file mode 100644 index 00000000..4d77a9b8 --- /dev/null +++ b/src/gradientai/types/shared/subscription.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from datetime import datetime + +from ..._models import BaseModel +from .subscription_tier_base import SubscriptionTierBase + +__all__ = ["Subscription"] + + +class Subscription(BaseModel): + created_at: Optional[datetime] = None + """The time at which the subscription was created.""" + + tier: Optional[SubscriptionTierBase] = None + + updated_at: Optional[datetime] = None + """The time at which the subscription was last updated.""" diff --git a/src/gradientai/types/shared/subscription_tier_base.py b/src/gradientai/types/shared/subscription_tier_base.py new file mode 100644 index 00000000..65e1a316 --- /dev/null +++ b/src/gradientai/types/shared/subscription_tier_base.py @@ -0,0 +1,44 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel + +__all__ = ["SubscriptionTierBase"] + + +class SubscriptionTierBase(BaseModel): + allow_storage_overage: Optional[bool] = None + """ + A boolean indicating whether the subscription tier supports additional storage + above what is included in the base plan at an additional cost per GiB used. + """ + + included_bandwidth_bytes: Optional[int] = None + """ + The amount of outbound data transfer included in the subscription tier in bytes. + """ + + included_repositories: Optional[int] = None + """The number of repositories included in the subscription tier. + + `0` indicates that the subscription tier includes unlimited repositories. + """ + + included_storage_bytes: Optional[int] = None + """The amount of storage included in the subscription tier in bytes.""" + + monthly_price_in_cents: Optional[int] = None + """The monthly cost of the subscription tier in cents.""" + + name: Optional[str] = None + """The name of the subscription tier.""" + + slug: Optional[str] = None + """The slug identifier of the subscription tier.""" + + storage_overage_price_in_cents: Optional[int] = None + """ + The price paid in cents per GiB for additional storage beyond what is included + in the subscription plan. + """ diff --git a/src/gradientai/types/shared/vpc_peering.py b/src/gradientai/types/shared/vpc_peering.py new file mode 100644 index 00000000..ef674e23 --- /dev/null +++ b/src/gradientai/types/shared/vpc_peering.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from datetime import datetime +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["VpcPeering"] + + +class VpcPeering(BaseModel): + id: Optional[str] = None + """A unique ID that can be used to identify and reference the VPC peering.""" + + created_at: Optional[datetime] = None + """A time value given in ISO8601 combined date and time format.""" + + name: Optional[str] = None + """The name of the VPC peering. + + Must be unique within the team and may only contain alphanumeric characters and + dashes. + """ + + status: Optional[Literal["PROVISIONING", "ACTIVE", "DELETING"]] = None + """The current status of the VPC peering.""" + + vpc_ids: Optional[List[str]] = None + """An array of the two peered VPCs IDs.""" diff --git a/src/gradientai/types/shared_params/__init__.py b/src/gradientai/types/shared_params/__init__.py new file mode 100644 index 00000000..ccdec8fd --- /dev/null +++ b/src/gradientai/types/shared_params/__init__.py @@ -0,0 +1,3 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .firewall_rule_target import FirewallRuleTarget as FirewallRuleTarget diff --git a/src/gradientai/types/shared_params/firewall_rule_target.py b/src/gradientai/types/shared_params/firewall_rule_target.py new file mode 100644 index 00000000..49a5f75c --- /dev/null +++ b/src/gradientai/types/shared_params/firewall_rule_target.py @@ -0,0 +1,42 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Iterable, Optional +from typing_extensions import TypedDict + +__all__ = ["FirewallRuleTarget"] + + +class FirewallRuleTarget(TypedDict, total=False): + addresses: List[str] + """ + An array of strings containing the IPv4 addresses, IPv6 addresses, IPv4 CIDRs, + and/or IPv6 CIDRs to which the firewall will allow traffic. + """ + + droplet_ids: Iterable[int] + """ + An array containing the IDs of the Droplets to which the firewall will allow + traffic. + """ + + kubernetes_ids: List[str] + """ + An array containing the IDs of the Kubernetes clusters to which the firewall + will allow traffic. + """ + + load_balancer_uids: List[str] + """ + An array containing the IDs of the load balancers to which the firewall will + allow traffic. + """ + + tags: Optional[List[str]] + """A flat array of tag names as strings to be applied to the resource. + + Tag names must exist in order to be referenced in a request. + + Requires `tag:create` and `tag:read` scopes. + """ diff --git a/tests/api_resources/agents/chat/test_completions.py b/tests/api_resources/agents/chat/test_completions.py index 48707a55..6533a423 100644 --- a/tests/api_resources/agents/chat/test_completions.py +++ b/tests/api_resources/agents/chat/test_completions.py @@ -56,6 +56,17 @@ def test_method_create_with_all_params_overload_1(self, client: GradientAI) -> N stream=False, stream_options={"include_usage": True}, temperature=1, + tool_choice="none", + tools=[ + { + "function": { + "name": "name", + "description": "description", + "parameters": {"foo": "bar"}, + }, + "type": "function", + } + ], top_logprobs=0, top_p=1, user="user-1234", @@ -138,6 +149,17 @@ def test_method_create_with_all_params_overload_2(self, client: GradientAI) -> N stop="\n", stream_options={"include_usage": True}, temperature=1, + tool_choice="none", + tools=[ + { + "function": { + "name": "name", + "description": "description", + "parameters": {"foo": "bar"}, + }, + "type": "function", + } + ], top_logprobs=0, top_p=1, user="user-1234", @@ -233,6 +255,17 @@ async def test_method_create_with_all_params_overload_1( stream=False, stream_options={"include_usage": True}, temperature=1, + tool_choice="none", + tools=[ + { + "function": { + "name": "name", + "description": "description", + "parameters": {"foo": "bar"}, + }, + "type": "function", + } + ], top_logprobs=0, top_p=1, user="user-1234", @@ -323,6 +356,17 @@ async def test_method_create_with_all_params_overload_2( stop="\n", stream_options={"include_usage": True}, temperature=1, + tool_choice="none", + tools=[ + { + "function": { + "name": "name", + "description": "description", + "parameters": {"foo": "bar"}, + }, + "type": "function", + } + ], top_logprobs=0, top_p=1, user="user-1234", diff --git a/tests/api_resources/agents/evaluation_metrics/test_models.py b/tests/api_resources/agents/evaluation_metrics/test_models.py new file mode 100644 index 00000000..6b8f8bc7 --- /dev/null +++ b/tests/api_resources/agents/evaluation_metrics/test_models.py @@ -0,0 +1,102 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.agents.evaluation_metrics import ModelListResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestModels: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + model = client.agents.evaluation_metrics.models.list() + assert_matches_type(ModelListResponse, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + model = client.agents.evaluation_metrics.models.list( + page=0, + per_page=0, + public_only=True, + usecases=["MODEL_USECASE_UNKNOWN"], + ) + assert_matches_type(ModelListResponse, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.agents.evaluation_metrics.models.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + model = response.parse() + assert_matches_type(ModelListResponse, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.agents.evaluation_metrics.models.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + model = response.parse() + assert_matches_type(ModelListResponse, model, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncModels: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + model = await async_client.agents.evaluation_metrics.models.list() + assert_matches_type(ModelListResponse, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + model = await async_client.agents.evaluation_metrics.models.list( + page=0, + per_page=0, + public_only=True, + usecases=["MODEL_USECASE_UNKNOWN"], + ) + assert_matches_type(ModelListResponse, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.evaluation_metrics.models.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + model = await response.parse() + assert_matches_type(ModelListResponse, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.evaluation_metrics.models.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + model = await response.parse() + assert_matches_type(ModelListResponse, model, path=["response"]) + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/agents/evaluation_metrics/test_workspaces.py b/tests/api_resources/agents/evaluation_metrics/test_workspaces.py index afeaa8f1..ea39c474 100644 --- a/tests/api_resources/agents/evaluation_metrics/test_workspaces.py +++ b/tests/api_resources/agents/evaluation_metrics/test_workspaces.py @@ -34,9 +34,9 @@ def test_method_create(self, client: GradientAI) -> None: @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: workspace = client.agents.evaluation_metrics.workspaces.create( - agent_uuids=["string"], - description="description", - name="name", + agent_uuids=["example string"], + description='"example string"', + name='"example name"', ) assert_matches_type(WorkspaceCreateResponse, workspace, path=["response"]) @@ -108,7 +108,7 @@ def test_path_params_retrieve(self, client: GradientAI) -> None: @parametrize def test_method_update(self, client: GradientAI) -> None: workspace = client.agents.evaluation_metrics.workspaces.update( - path_workspace_uuid="workspace_uuid", + path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(WorkspaceUpdateResponse, workspace, path=["response"]) @@ -116,10 +116,10 @@ def test_method_update(self, client: GradientAI) -> None: @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: workspace = client.agents.evaluation_metrics.workspaces.update( - path_workspace_uuid="workspace_uuid", - description="description", - name="name", - body_workspace_uuid="workspace_uuid", + path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', + description='"example string"', + name='"example name"', + body_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(WorkspaceUpdateResponse, workspace, path=["response"]) @@ -127,7 +127,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_update(self, client: GradientAI) -> None: response = client.agents.evaluation_metrics.workspaces.with_raw_response.update( - path_workspace_uuid="workspace_uuid", + path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -139,7 +139,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: with client.agents.evaluation_metrics.workspaces.with_streaming_response.update( - path_workspace_uuid="workspace_uuid", + path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -285,9 +285,9 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: workspace = await async_client.agents.evaluation_metrics.workspaces.create( - agent_uuids=["string"], - description="description", - name="name", + agent_uuids=["example string"], + description='"example string"', + name='"example name"', ) assert_matches_type(WorkspaceCreateResponse, workspace, path=["response"]) @@ -359,7 +359,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: workspace = await async_client.agents.evaluation_metrics.workspaces.update( - path_workspace_uuid="workspace_uuid", + path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(WorkspaceUpdateResponse, workspace, path=["response"]) @@ -367,10 +367,10 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: workspace = await async_client.agents.evaluation_metrics.workspaces.update( - path_workspace_uuid="workspace_uuid", - description="description", - name="name", - body_workspace_uuid="workspace_uuid", + path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', + description='"example string"', + name='"example name"', + body_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(WorkspaceUpdateResponse, workspace, path=["response"]) @@ -378,7 +378,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.evaluation_metrics.workspaces.with_raw_response.update( - path_workspace_uuid="workspace_uuid", + path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -390,7 +390,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.evaluation_metrics.workspaces.with_streaming_response.update( - path_workspace_uuid="workspace_uuid", + path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py b/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py index 764e13e0..635721b3 100644 --- a/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py +++ b/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py @@ -24,7 +24,7 @@ class TestAgents: @parametrize def test_method_list(self, client: GradientAI) -> None: agent = client.agents.evaluation_metrics.workspaces.agents.list( - workspace_uuid="workspace_uuid", + workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(AgentListResponse, agent, path=["response"]) @@ -32,8 +32,7 @@ def test_method_list(self, client: GradientAI) -> None: @parametrize def test_method_list_with_all_params(self, client: GradientAI) -> None: agent = client.agents.evaluation_metrics.workspaces.agents.list( - workspace_uuid="workspace_uuid", - field_mask={"paths": ["string"]}, + workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', only_deployed=True, page=0, per_page=0, @@ -44,7 +43,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_list(self, client: GradientAI) -> None: response = client.agents.evaluation_metrics.workspaces.agents.with_raw_response.list( - workspace_uuid="workspace_uuid", + workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -56,7 +55,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @parametrize def test_streaming_response_list(self, client: GradientAI) -> None: with client.agents.evaluation_metrics.workspaces.agents.with_streaming_response.list( - workspace_uuid="workspace_uuid", + workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -78,7 +77,7 @@ def test_path_params_list(self, client: GradientAI) -> None: @parametrize def test_method_move(self, client: GradientAI) -> None: agent = client.agents.evaluation_metrics.workspaces.agents.move( - path_workspace_uuid="workspace_uuid", + path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(AgentMoveResponse, agent, path=["response"]) @@ -86,9 +85,9 @@ def test_method_move(self, client: GradientAI) -> None: @parametrize def test_method_move_with_all_params(self, client: GradientAI) -> None: agent = client.agents.evaluation_metrics.workspaces.agents.move( - path_workspace_uuid="workspace_uuid", - agent_uuids=["string"], - body_workspace_uuid="workspace_uuid", + path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuids=["example string"], + body_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(AgentMoveResponse, agent, path=["response"]) @@ -96,7 +95,7 @@ def test_method_move_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_move(self, client: GradientAI) -> None: response = client.agents.evaluation_metrics.workspaces.agents.with_raw_response.move( - path_workspace_uuid="workspace_uuid", + path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -108,7 +107,7 @@ def test_raw_response_move(self, client: GradientAI) -> None: @parametrize def test_streaming_response_move(self, client: GradientAI) -> None: with client.agents.evaluation_metrics.workspaces.agents.with_streaming_response.move( - path_workspace_uuid="workspace_uuid", + path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -136,7 +135,7 @@ class TestAsyncAgents: @parametrize async def test_method_list(self, async_client: AsyncGradientAI) -> None: agent = await async_client.agents.evaluation_metrics.workspaces.agents.list( - workspace_uuid="workspace_uuid", + workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(AgentListResponse, agent, path=["response"]) @@ -144,8 +143,7 @@ async def test_method_list(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: agent = await async_client.agents.evaluation_metrics.workspaces.agents.list( - workspace_uuid="workspace_uuid", - field_mask={"paths": ["string"]}, + workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', only_deployed=True, page=0, per_page=0, @@ -156,7 +154,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @parametrize async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.evaluation_metrics.workspaces.agents.with_raw_response.list( - workspace_uuid="workspace_uuid", + workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -168,7 +166,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.evaluation_metrics.workspaces.agents.with_streaming_response.list( - workspace_uuid="workspace_uuid", + workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -190,7 +188,7 @@ async def test_path_params_list(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_move(self, async_client: AsyncGradientAI) -> None: agent = await async_client.agents.evaluation_metrics.workspaces.agents.move( - path_workspace_uuid="workspace_uuid", + path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(AgentMoveResponse, agent, path=["response"]) @@ -198,9 +196,9 @@ async def test_method_move(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_move_with_all_params(self, async_client: AsyncGradientAI) -> None: agent = await async_client.agents.evaluation_metrics.workspaces.agents.move( - path_workspace_uuid="workspace_uuid", - agent_uuids=["string"], - body_workspace_uuid="workspace_uuid", + path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuids=["example string"], + body_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(AgentMoveResponse, agent, path=["response"]) @@ -208,7 +206,7 @@ async def test_method_move_with_all_params(self, async_client: AsyncGradientAI) @parametrize async def test_raw_response_move(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.evaluation_metrics.workspaces.agents.with_raw_response.move( - path_workspace_uuid="workspace_uuid", + path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -220,7 +218,7 @@ async def test_raw_response_move(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_move(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.evaluation_metrics.workspaces.agents.with_streaming_response.move( - path_workspace_uuid="workspace_uuid", + path_workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/agents/test_api_keys.py b/tests/api_resources/agents/test_api_keys.py index beb9666a..c29511f5 100644 --- a/tests/api_resources/agents/test_api_keys.py +++ b/tests/api_resources/agents/test_api_keys.py @@ -27,7 +27,7 @@ class TestAPIKeys: @parametrize def test_method_create(self, client: GradientAI) -> None: api_key = client.agents.api_keys.create( - path_agent_uuid="agent_uuid", + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) @@ -35,9 +35,9 @@ def test_method_create(self, client: GradientAI) -> None: @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: api_key = client.agents.api_keys.create( - path_agent_uuid="agent_uuid", - body_agent_uuid="agent_uuid", - name="name", + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + body_agent_uuid='"12345678-1234-1234-1234-123456789012"', + name="Production Key", ) assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) @@ -45,7 +45,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_create(self, client: GradientAI) -> None: response = client.agents.api_keys.with_raw_response.create( - path_agent_uuid="agent_uuid", + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -57,7 +57,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @parametrize def test_streaming_response_create(self, client: GradientAI) -> None: with client.agents.api_keys.with_streaming_response.create( - path_agent_uuid="agent_uuid", + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -79,8 +79,8 @@ def test_path_params_create(self, client: GradientAI) -> None: @parametrize def test_method_update(self, client: GradientAI) -> None: api_key = client.agents.api_keys.update( - path_api_key_uuid="api_key_uuid", - path_agent_uuid="agent_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) @@ -88,11 +88,11 @@ def test_method_update(self, client: GradientAI) -> None: @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: api_key = client.agents.api_keys.update( - path_api_key_uuid="api_key_uuid", - path_agent_uuid="agent_uuid", - body_agent_uuid="agent_uuid", - body_api_key_uuid="api_key_uuid", - name="name", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + body_agent_uuid='"12345678-1234-1234-1234-123456789012"', + body_api_key_uuid='"12345678-1234-1234-1234-123456789012"', + name='"Production Key"', ) assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) @@ -100,8 +100,8 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_update(self, client: GradientAI) -> None: response = client.agents.api_keys.with_raw_response.update( - path_api_key_uuid="api_key_uuid", - path_agent_uuid="agent_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -113,8 +113,8 @@ def test_raw_response_update(self, client: GradientAI) -> None: @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: with client.agents.api_keys.with_streaming_response.update( - path_api_key_uuid="api_key_uuid", - path_agent_uuid="agent_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -129,21 +129,21 @@ def test_streaming_response_update(self, client: GradientAI) -> None: def test_path_params_update(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): client.agents.api_keys.with_raw_response.update( - path_api_key_uuid="api_key_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', path_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): client.agents.api_keys.with_raw_response.update( path_api_key_uuid="", - path_agent_uuid="agent_uuid", + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @pytest.mark.skip() @parametrize def test_method_list(self, client: GradientAI) -> None: api_key = client.agents.api_keys.list( - agent_uuid="agent_uuid", + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(APIKeyListResponse, api_key, path=["response"]) @@ -151,7 +151,7 @@ def test_method_list(self, client: GradientAI) -> None: @parametrize def test_method_list_with_all_params(self, client: GradientAI) -> None: api_key = client.agents.api_keys.list( - agent_uuid="agent_uuid", + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', page=0, per_page=0, ) @@ -161,7 +161,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_list(self, client: GradientAI) -> None: response = client.agents.api_keys.with_raw_response.list( - agent_uuid="agent_uuid", + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -173,7 +173,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @parametrize def test_streaming_response_list(self, client: GradientAI) -> None: with client.agents.api_keys.with_streaming_response.list( - agent_uuid="agent_uuid", + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -195,8 +195,8 @@ def test_path_params_list(self, client: GradientAI) -> None: @parametrize def test_method_delete(self, client: GradientAI) -> None: api_key = client.agents.api_keys.delete( - api_key_uuid="api_key_uuid", - agent_uuid="agent_uuid", + api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) @@ -204,8 +204,8 @@ def test_method_delete(self, client: GradientAI) -> None: @parametrize def test_raw_response_delete(self, client: GradientAI) -> None: response = client.agents.api_keys.with_raw_response.delete( - api_key_uuid="api_key_uuid", - agent_uuid="agent_uuid", + api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -217,8 +217,8 @@ def test_raw_response_delete(self, client: GradientAI) -> None: @parametrize def test_streaming_response_delete(self, client: GradientAI) -> None: with client.agents.api_keys.with_streaming_response.delete( - api_key_uuid="api_key_uuid", - agent_uuid="agent_uuid", + api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -233,22 +233,22 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: def test_path_params_delete(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): client.agents.api_keys.with_raw_response.delete( - api_key_uuid="api_key_uuid", + api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): client.agents.api_keys.with_raw_response.delete( api_key_uuid="", - agent_uuid="agent_uuid", + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @pytest.mark.skip() @parametrize def test_method_regenerate(self, client: GradientAI) -> None: api_key = client.agents.api_keys.regenerate( - api_key_uuid="api_key_uuid", - agent_uuid="agent_uuid", + api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(APIKeyRegenerateResponse, api_key, path=["response"]) @@ -256,8 +256,8 @@ def test_method_regenerate(self, client: GradientAI) -> None: @parametrize def test_raw_response_regenerate(self, client: GradientAI) -> None: response = client.agents.api_keys.with_raw_response.regenerate( - api_key_uuid="api_key_uuid", - agent_uuid="agent_uuid", + api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -269,8 +269,8 @@ def test_raw_response_regenerate(self, client: GradientAI) -> None: @parametrize def test_streaming_response_regenerate(self, client: GradientAI) -> None: with client.agents.api_keys.with_streaming_response.regenerate( - api_key_uuid="api_key_uuid", - agent_uuid="agent_uuid", + api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -285,14 +285,14 @@ def test_streaming_response_regenerate(self, client: GradientAI) -> None: def test_path_params_regenerate(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): client.agents.api_keys.with_raw_response.regenerate( - api_key_uuid="api_key_uuid", + api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): client.agents.api_keys.with_raw_response.regenerate( api_key_uuid="", - agent_uuid="agent_uuid", + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -305,7 +305,7 @@ class TestAsyncAPIKeys: @parametrize async def test_method_create(self, async_client: AsyncGradientAI) -> None: api_key = await async_client.agents.api_keys.create( - path_agent_uuid="agent_uuid", + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) @@ -313,9 +313,9 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: api_key = await async_client.agents.api_keys.create( - path_agent_uuid="agent_uuid", - body_agent_uuid="agent_uuid", - name="name", + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + body_agent_uuid='"12345678-1234-1234-1234-123456789012"', + name="Production Key", ) assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) @@ -323,7 +323,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI @parametrize async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.api_keys.with_raw_response.create( - path_agent_uuid="agent_uuid", + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -335,7 +335,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.api_keys.with_streaming_response.create( - path_agent_uuid="agent_uuid", + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -357,8 +357,8 @@ async def test_path_params_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: api_key = await async_client.agents.api_keys.update( - path_api_key_uuid="api_key_uuid", - path_agent_uuid="agent_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) @@ -366,11 +366,11 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: api_key = await async_client.agents.api_keys.update( - path_api_key_uuid="api_key_uuid", - path_agent_uuid="agent_uuid", - body_agent_uuid="agent_uuid", - body_api_key_uuid="api_key_uuid", - name="name", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + body_agent_uuid='"12345678-1234-1234-1234-123456789012"', + body_api_key_uuid='"12345678-1234-1234-1234-123456789012"', + name='"Production Key"', ) assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) @@ -378,8 +378,8 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.api_keys.with_raw_response.update( - path_api_key_uuid="api_key_uuid", - path_agent_uuid="agent_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -391,8 +391,8 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.api_keys.with_streaming_response.update( - path_api_key_uuid="api_key_uuid", - path_agent_uuid="agent_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -407,21 +407,21 @@ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): await async_client.agents.api_keys.with_raw_response.update( - path_api_key_uuid="api_key_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', path_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_api_key_uuid` but received ''"): await async_client.agents.api_keys.with_raw_response.update( path_api_key_uuid="", - path_agent_uuid="agent_uuid", + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @pytest.mark.skip() @parametrize async def test_method_list(self, async_client: AsyncGradientAI) -> None: api_key = await async_client.agents.api_keys.list( - agent_uuid="agent_uuid", + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(APIKeyListResponse, api_key, path=["response"]) @@ -429,7 +429,7 @@ async def test_method_list(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: api_key = await async_client.agents.api_keys.list( - agent_uuid="agent_uuid", + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', page=0, per_page=0, ) @@ -439,7 +439,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @parametrize async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.api_keys.with_raw_response.list( - agent_uuid="agent_uuid", + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -451,7 +451,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.api_keys.with_streaming_response.list( - agent_uuid="agent_uuid", + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -473,8 +473,8 @@ async def test_path_params_list(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_delete(self, async_client: AsyncGradientAI) -> None: api_key = await async_client.agents.api_keys.delete( - api_key_uuid="api_key_uuid", - agent_uuid="agent_uuid", + api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(APIKeyDeleteResponse, api_key, path=["response"]) @@ -482,8 +482,8 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.api_keys.with_raw_response.delete( - api_key_uuid="api_key_uuid", - agent_uuid="agent_uuid", + api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -495,8 +495,8 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.api_keys.with_streaming_response.delete( - api_key_uuid="api_key_uuid", - agent_uuid="agent_uuid", + api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -511,22 +511,22 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): await async_client.agents.api_keys.with_raw_response.delete( - api_key_uuid="api_key_uuid", + api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): await async_client.agents.api_keys.with_raw_response.delete( api_key_uuid="", - agent_uuid="agent_uuid", + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @pytest.mark.skip() @parametrize async def test_method_regenerate(self, async_client: AsyncGradientAI) -> None: api_key = await async_client.agents.api_keys.regenerate( - api_key_uuid="api_key_uuid", - agent_uuid="agent_uuid", + api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(APIKeyRegenerateResponse, api_key, path=["response"]) @@ -534,8 +534,8 @@ async def test_method_regenerate(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_raw_response_regenerate(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.api_keys.with_raw_response.regenerate( - api_key_uuid="api_key_uuid", - agent_uuid="agent_uuid", + api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -547,8 +547,8 @@ async def test_raw_response_regenerate(self, async_client: AsyncGradientAI) -> N @parametrize async def test_streaming_response_regenerate(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.api_keys.with_streaming_response.regenerate( - api_key_uuid="api_key_uuid", - agent_uuid="agent_uuid", + api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -563,12 +563,12 @@ async def test_streaming_response_regenerate(self, async_client: AsyncGradientAI async def test_path_params_regenerate(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): await async_client.agents.api_keys.with_raw_response.regenerate( - api_key_uuid="api_key_uuid", + api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `api_key_uuid` but received ''"): await async_client.agents.api_keys.with_raw_response.regenerate( api_key_uuid="", - agent_uuid="agent_uuid", + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) diff --git a/tests/api_resources/agents/test_evaluation_datasets.py b/tests/api_resources/agents/test_evaluation_datasets.py index e6ca2644..0413591e 100644 --- a/tests/api_resources/agents/test_evaluation_datasets.py +++ b/tests/api_resources/agents/test_evaluation_datasets.py @@ -31,11 +31,11 @@ def test_method_create(self, client: GradientAI) -> None: def test_method_create_with_all_params(self, client: GradientAI) -> None: evaluation_dataset = client.agents.evaluation_datasets.create( file_upload_dataset={ - "original_file_name": "original_file_name", - "size_in_bytes": "size_in_bytes", - "stored_object_key": "stored_object_key", + "original_file_name": '"example name"', + "size_in_bytes": '"12345"', + "stored_object_key": '"example string"', }, - name="name", + name='"example name"', ) assert_matches_type(EvaluationDatasetCreateResponse, evaluation_dataset, path=["response"]) @@ -75,7 +75,7 @@ def test_method_create_file_upload_presigned_urls_with_all_params(self, client: evaluation_dataset = client.agents.evaluation_datasets.create_file_upload_presigned_urls( files=[ { - "file_name": "file_name", + "file_name": '"example name"', "file_size": "file_size", } ], @@ -127,11 +127,11 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: evaluation_dataset = await async_client.agents.evaluation_datasets.create( file_upload_dataset={ - "original_file_name": "original_file_name", - "size_in_bytes": "size_in_bytes", - "stored_object_key": "stored_object_key", + "original_file_name": '"example name"', + "size_in_bytes": '"12345"', + "stored_object_key": '"example string"', }, - name="name", + name='"example name"', ) assert_matches_type(EvaluationDatasetCreateResponse, evaluation_dataset, path=["response"]) @@ -173,7 +173,7 @@ async def test_method_create_file_upload_presigned_urls_with_all_params( evaluation_dataset = await async_client.agents.evaluation_datasets.create_file_upload_presigned_urls( files=[ { - "file_name": "file_name", + "file_name": '"example name"', "file_size": "file_size", } ], diff --git a/tests/api_resources/agents/test_evaluation_metrics.py b/tests/api_resources/agents/test_evaluation_metrics.py index be83e330..d64367ae 100644 --- a/tests/api_resources/agents/test_evaluation_metrics.py +++ b/tests/api_resources/agents/test_evaluation_metrics.py @@ -9,7 +9,10 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import EvaluationMetricListResponse +from gradientai.types.agents import ( + EvaluationMetricListResponse, + EvaluationMetricListRegionsResponse, +) base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -45,6 +48,43 @@ def test_streaming_response_list(self, client: GradientAI) -> None: assert cast(Any, response.is_closed) is True + @pytest.mark.skip() + @parametrize + def test_method_list_regions(self, client: GradientAI) -> None: + evaluation_metric = client.agents.evaluation_metrics.list_regions() + assert_matches_type(EvaluationMetricListRegionsResponse, evaluation_metric, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_regions_with_all_params(self, client: GradientAI) -> None: + evaluation_metric = client.agents.evaluation_metrics.list_regions( + serves_batch=True, + serves_inference=True, + ) + assert_matches_type(EvaluationMetricListRegionsResponse, evaluation_metric, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list_regions(self, client: GradientAI) -> None: + response = client.agents.evaluation_metrics.with_raw_response.list_regions() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + evaluation_metric = response.parse() + assert_matches_type(EvaluationMetricListRegionsResponse, evaluation_metric, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list_regions(self, client: GradientAI) -> None: + with client.agents.evaluation_metrics.with_streaming_response.list_regions() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + evaluation_metric = response.parse() + assert_matches_type(EvaluationMetricListRegionsResponse, evaluation_metric, path=["response"]) + + assert cast(Any, response.is_closed) is True + class TestAsyncEvaluationMetrics: parametrize = pytest.mark.parametrize( @@ -78,3 +118,40 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N assert_matches_type(EvaluationMetricListResponse, evaluation_metric, path=["response"]) assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_list_regions(self, async_client: AsyncGradientAI) -> None: + evaluation_metric = await async_client.agents.evaluation_metrics.list_regions() + assert_matches_type(EvaluationMetricListRegionsResponse, evaluation_metric, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_regions_with_all_params(self, async_client: AsyncGradientAI) -> None: + evaluation_metric = await async_client.agents.evaluation_metrics.list_regions( + serves_batch=True, + serves_inference=True, + ) + assert_matches_type(EvaluationMetricListRegionsResponse, evaluation_metric, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list_regions(self, async_client: AsyncGradientAI) -> None: + response = await async_client.agents.evaluation_metrics.with_raw_response.list_regions() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + evaluation_metric = await response.parse() + assert_matches_type(EvaluationMetricListRegionsResponse, evaluation_metric, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list_regions(self, async_client: AsyncGradientAI) -> None: + async with async_client.agents.evaluation_metrics.with_streaming_response.list_regions() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + evaluation_metric = await response.parse() + assert_matches_type(EvaluationMetricListRegionsResponse, evaluation_metric, path=["response"]) + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/agents/test_evaluation_runs.py b/tests/api_resources/agents/test_evaluation_runs.py index b2fce320..2ea44e6b 100644 --- a/tests/api_resources/agents/test_evaluation_runs.py +++ b/tests/api_resources/agents/test_evaluation_runs.py @@ -32,9 +32,9 @@ def test_method_create(self, client: GradientAI) -> None: @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: evaluation_run = client.agents.evaluation_runs.create( - agent_uuids=["string"], - run_name="run_name", - test_case_uuid="test_case_uuid", + agent_uuids=["example string"], + run_name="Evaluation Run Name", + test_case_uuid='"12345678-1234-1234-1234-123456789012"', ) assert_matches_type(EvaluationRunCreateResponse, evaluation_run, path=["response"]) @@ -106,7 +106,17 @@ def test_path_params_retrieve(self, client: GradientAI) -> None: @parametrize def test_method_list_results(self, client: GradientAI) -> None: evaluation_run = client.agents.evaluation_runs.list_results( - "evaluation_run_uuid", + evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', + ) + assert_matches_type(EvaluationRunListResultsResponse, evaluation_run, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_results_with_all_params(self, client: GradientAI) -> None: + evaluation_run = client.agents.evaluation_runs.list_results( + evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', + page=0, + per_page=0, ) assert_matches_type(EvaluationRunListResultsResponse, evaluation_run, path=["response"]) @@ -114,7 +124,7 @@ def test_method_list_results(self, client: GradientAI) -> None: @parametrize def test_raw_response_list_results(self, client: GradientAI) -> None: response = client.agents.evaluation_runs.with_raw_response.list_results( - "evaluation_run_uuid", + evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -126,7 +136,7 @@ def test_raw_response_list_results(self, client: GradientAI) -> None: @parametrize def test_streaming_response_list_results(self, client: GradientAI) -> None: with client.agents.evaluation_runs.with_streaming_response.list_results( - "evaluation_run_uuid", + evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -141,15 +151,15 @@ def test_streaming_response_list_results(self, client: GradientAI) -> None: def test_path_params_list_results(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"): client.agents.evaluation_runs.with_raw_response.list_results( - "", + evaluation_run_uuid="", ) @pytest.mark.skip() @parametrize def test_method_retrieve_results(self, client: GradientAI) -> None: evaluation_run = client.agents.evaluation_runs.retrieve_results( - prompt_id=0, - evaluation_run_uuid="evaluation_run_uuid", + prompt_id=1, + evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(EvaluationRunRetrieveResultsResponse, evaluation_run, path=["response"]) @@ -157,8 +167,8 @@ def test_method_retrieve_results(self, client: GradientAI) -> None: @parametrize def test_raw_response_retrieve_results(self, client: GradientAI) -> None: response = client.agents.evaluation_runs.with_raw_response.retrieve_results( - prompt_id=0, - evaluation_run_uuid="evaluation_run_uuid", + prompt_id=1, + evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -170,8 +180,8 @@ def test_raw_response_retrieve_results(self, client: GradientAI) -> None: @parametrize def test_streaming_response_retrieve_results(self, client: GradientAI) -> None: with client.agents.evaluation_runs.with_streaming_response.retrieve_results( - prompt_id=0, - evaluation_run_uuid="evaluation_run_uuid", + prompt_id=1, + evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -186,7 +196,7 @@ def test_streaming_response_retrieve_results(self, client: GradientAI) -> None: def test_path_params_retrieve_results(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"): client.agents.evaluation_runs.with_raw_response.retrieve_results( - prompt_id=0, + prompt_id=1, evaluation_run_uuid="", ) @@ -206,9 +216,9 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: evaluation_run = await async_client.agents.evaluation_runs.create( - agent_uuids=["string"], - run_name="run_name", - test_case_uuid="test_case_uuid", + agent_uuids=["example string"], + run_name="Evaluation Run Name", + test_case_uuid='"12345678-1234-1234-1234-123456789012"', ) assert_matches_type(EvaluationRunCreateResponse, evaluation_run, path=["response"]) @@ -280,7 +290,17 @@ async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None @parametrize async def test_method_list_results(self, async_client: AsyncGradientAI) -> None: evaluation_run = await async_client.agents.evaluation_runs.list_results( - "evaluation_run_uuid", + evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', + ) + assert_matches_type(EvaluationRunListResultsResponse, evaluation_run, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_results_with_all_params(self, async_client: AsyncGradientAI) -> None: + evaluation_run = await async_client.agents.evaluation_runs.list_results( + evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', + page=0, + per_page=0, ) assert_matches_type(EvaluationRunListResultsResponse, evaluation_run, path=["response"]) @@ -288,7 +308,7 @@ async def test_method_list_results(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_raw_response_list_results(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.evaluation_runs.with_raw_response.list_results( - "evaluation_run_uuid", + evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -300,7 +320,7 @@ async def test_raw_response_list_results(self, async_client: AsyncGradientAI) -> @parametrize async def test_streaming_response_list_results(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.evaluation_runs.with_streaming_response.list_results( - "evaluation_run_uuid", + evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -315,15 +335,15 @@ async def test_streaming_response_list_results(self, async_client: AsyncGradient async def test_path_params_list_results(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"): await async_client.agents.evaluation_runs.with_raw_response.list_results( - "", + evaluation_run_uuid="", ) @pytest.mark.skip() @parametrize async def test_method_retrieve_results(self, async_client: AsyncGradientAI) -> None: evaluation_run = await async_client.agents.evaluation_runs.retrieve_results( - prompt_id=0, - evaluation_run_uuid="evaluation_run_uuid", + prompt_id=1, + evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(EvaluationRunRetrieveResultsResponse, evaluation_run, path=["response"]) @@ -331,8 +351,8 @@ async def test_method_retrieve_results(self, async_client: AsyncGradientAI) -> N @parametrize async def test_raw_response_retrieve_results(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.evaluation_runs.with_raw_response.retrieve_results( - prompt_id=0, - evaluation_run_uuid="evaluation_run_uuid", + prompt_id=1, + evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -344,8 +364,8 @@ async def test_raw_response_retrieve_results(self, async_client: AsyncGradientAI @parametrize async def test_streaming_response_retrieve_results(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.evaluation_runs.with_streaming_response.retrieve_results( - prompt_id=0, - evaluation_run_uuid="evaluation_run_uuid", + prompt_id=1, + evaluation_run_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -360,6 +380,6 @@ async def test_streaming_response_retrieve_results(self, async_client: AsyncGrad async def test_path_params_retrieve_results(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `evaluation_run_uuid` but received ''"): await async_client.agents.evaluation_runs.with_raw_response.retrieve_results( - prompt_id=0, + prompt_id=1, evaluation_run_uuid="", ) diff --git a/tests/api_resources/agents/test_evaluation_test_cases.py b/tests/api_resources/agents/test_evaluation_test_cases.py index a0b5ee77..e9083ba3 100644 --- a/tests/api_resources/agents/test_evaluation_test_cases.py +++ b/tests/api_resources/agents/test_evaluation_test_cases.py @@ -33,16 +33,17 @@ def test_method_create(self, client: GradientAI) -> None: @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: evaluation_test_case = client.agents.evaluation_test_cases.create( - dataset_uuid="dataset_uuid", - description="description", - metrics=["string"], - name="name", + dataset_uuid='"123e4567-e89b-12d3-a456-426614174000"', + description='"example string"', + metrics=["example string"], + name='"example name"', star_metric={ - "metric_uuid": "metric_uuid", - "name": "name", - "success_threshold_pct": 0, + "metric_uuid": '"123e4567-e89b-12d3-a456-426614174000"', + "name": '"example name"', + "success_threshold": 123, + "success_threshold_pct": 123, }, - workspace_uuid="workspace_uuid", + workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(EvaluationTestCaseCreateResponse, evaluation_test_case, path=["response"]) @@ -72,7 +73,7 @@ def test_streaming_response_create(self, client: GradientAI) -> None: @parametrize def test_method_retrieve(self, client: GradientAI) -> None: evaluation_test_case = client.agents.evaluation_test_cases.retrieve( - test_case_uuid="test_case_uuid", + test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(EvaluationTestCaseRetrieveResponse, evaluation_test_case, path=["response"]) @@ -80,7 +81,7 @@ def test_method_retrieve(self, client: GradientAI) -> None: @parametrize def test_method_retrieve_with_all_params(self, client: GradientAI) -> None: evaluation_test_case = client.agents.evaluation_test_cases.retrieve( - test_case_uuid="test_case_uuid", + test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', evaluation_test_case_version=0, ) assert_matches_type(EvaluationTestCaseRetrieveResponse, evaluation_test_case, path=["response"]) @@ -89,7 +90,7 @@ def test_method_retrieve_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_retrieve(self, client: GradientAI) -> None: response = client.agents.evaluation_test_cases.with_raw_response.retrieve( - test_case_uuid="test_case_uuid", + test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -101,7 +102,7 @@ def test_raw_response_retrieve(self, client: GradientAI) -> None: @parametrize def test_streaming_response_retrieve(self, client: GradientAI) -> None: with client.agents.evaluation_test_cases.with_streaming_response.retrieve( - test_case_uuid="test_case_uuid", + test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -123,7 +124,7 @@ def test_path_params_retrieve(self, client: GradientAI) -> None: @parametrize def test_method_update(self, client: GradientAI) -> None: evaluation_test_case = client.agents.evaluation_test_cases.update( - path_test_case_uuid="test_case_uuid", + path_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(EvaluationTestCaseUpdateResponse, evaluation_test_case, path=["response"]) @@ -131,17 +132,18 @@ def test_method_update(self, client: GradientAI) -> None: @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: evaluation_test_case = client.agents.evaluation_test_cases.update( - path_test_case_uuid="test_case_uuid", - dataset_uuid="dataset_uuid", - description="description", - metrics={"metric_uuids": ["string"]}, - name="name", + path_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', + dataset_uuid='"123e4567-e89b-12d3-a456-426614174000"', + description='"example string"', + metrics={"metric_uuids": ["example string"]}, + name='"example name"', star_metric={ - "metric_uuid": "metric_uuid", - "name": "name", - "success_threshold_pct": 0, + "metric_uuid": '"123e4567-e89b-12d3-a456-426614174000"', + "name": '"example name"', + "success_threshold": 123, + "success_threshold_pct": 123, }, - body_test_case_uuid="test_case_uuid", + body_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(EvaluationTestCaseUpdateResponse, evaluation_test_case, path=["response"]) @@ -149,7 +151,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_update(self, client: GradientAI) -> None: response = client.agents.evaluation_test_cases.with_raw_response.update( - path_test_case_uuid="test_case_uuid", + path_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -161,7 +163,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: with client.agents.evaluation_test_cases.with_streaming_response.update( - path_test_case_uuid="test_case_uuid", + path_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -211,7 +213,7 @@ def test_streaming_response_list(self, client: GradientAI) -> None: @parametrize def test_method_list_evaluation_runs(self, client: GradientAI) -> None: evaluation_test_case = client.agents.evaluation_test_cases.list_evaluation_runs( - evaluation_test_case_uuid="evaluation_test_case_uuid", + evaluation_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(EvaluationTestCaseListEvaluationRunsResponse, evaluation_test_case, path=["response"]) @@ -219,7 +221,7 @@ def test_method_list_evaluation_runs(self, client: GradientAI) -> None: @parametrize def test_method_list_evaluation_runs_with_all_params(self, client: GradientAI) -> None: evaluation_test_case = client.agents.evaluation_test_cases.list_evaluation_runs( - evaluation_test_case_uuid="evaluation_test_case_uuid", + evaluation_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', evaluation_test_case_version=0, ) assert_matches_type(EvaluationTestCaseListEvaluationRunsResponse, evaluation_test_case, path=["response"]) @@ -228,7 +230,7 @@ def test_method_list_evaluation_runs_with_all_params(self, client: GradientAI) - @parametrize def test_raw_response_list_evaluation_runs(self, client: GradientAI) -> None: response = client.agents.evaluation_test_cases.with_raw_response.list_evaluation_runs( - evaluation_test_case_uuid="evaluation_test_case_uuid", + evaluation_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -240,7 +242,7 @@ def test_raw_response_list_evaluation_runs(self, client: GradientAI) -> None: @parametrize def test_streaming_response_list_evaluation_runs(self, client: GradientAI) -> None: with client.agents.evaluation_test_cases.with_streaming_response.list_evaluation_runs( - evaluation_test_case_uuid="evaluation_test_case_uuid", + evaluation_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -276,16 +278,17 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: evaluation_test_case = await async_client.agents.evaluation_test_cases.create( - dataset_uuid="dataset_uuid", - description="description", - metrics=["string"], - name="name", + dataset_uuid='"123e4567-e89b-12d3-a456-426614174000"', + description='"example string"', + metrics=["example string"], + name='"example name"', star_metric={ - "metric_uuid": "metric_uuid", - "name": "name", - "success_threshold_pct": 0, + "metric_uuid": '"123e4567-e89b-12d3-a456-426614174000"', + "name": '"example name"', + "success_threshold": 123, + "success_threshold_pct": 123, }, - workspace_uuid="workspace_uuid", + workspace_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(EvaluationTestCaseCreateResponse, evaluation_test_case, path=["response"]) @@ -315,7 +318,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> @parametrize async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: evaluation_test_case = await async_client.agents.evaluation_test_cases.retrieve( - test_case_uuid="test_case_uuid", + test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(EvaluationTestCaseRetrieveResponse, evaluation_test_case, path=["response"]) @@ -323,7 +326,7 @@ async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_retrieve_with_all_params(self, async_client: AsyncGradientAI) -> None: evaluation_test_case = await async_client.agents.evaluation_test_cases.retrieve( - test_case_uuid="test_case_uuid", + test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', evaluation_test_case_version=0, ) assert_matches_type(EvaluationTestCaseRetrieveResponse, evaluation_test_case, path=["response"]) @@ -332,7 +335,7 @@ async def test_method_retrieve_with_all_params(self, async_client: AsyncGradient @parametrize async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.evaluation_test_cases.with_raw_response.retrieve( - test_case_uuid="test_case_uuid", + test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -344,7 +347,7 @@ async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> Non @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.evaluation_test_cases.with_streaming_response.retrieve( - test_case_uuid="test_case_uuid", + test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -366,7 +369,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: evaluation_test_case = await async_client.agents.evaluation_test_cases.update( - path_test_case_uuid="test_case_uuid", + path_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(EvaluationTestCaseUpdateResponse, evaluation_test_case, path=["response"]) @@ -374,17 +377,18 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: evaluation_test_case = await async_client.agents.evaluation_test_cases.update( - path_test_case_uuid="test_case_uuid", - dataset_uuid="dataset_uuid", - description="description", - metrics={"metric_uuids": ["string"]}, - name="name", + path_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', + dataset_uuid='"123e4567-e89b-12d3-a456-426614174000"', + description='"example string"', + metrics={"metric_uuids": ["example string"]}, + name='"example name"', star_metric={ - "metric_uuid": "metric_uuid", - "name": "name", - "success_threshold_pct": 0, + "metric_uuid": '"123e4567-e89b-12d3-a456-426614174000"', + "name": '"example name"', + "success_threshold": 123, + "success_threshold_pct": 123, }, - body_test_case_uuid="test_case_uuid", + body_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(EvaluationTestCaseUpdateResponse, evaluation_test_case, path=["response"]) @@ -392,7 +396,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.evaluation_test_cases.with_raw_response.update( - path_test_case_uuid="test_case_uuid", + path_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -404,7 +408,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.evaluation_test_cases.with_streaming_response.update( - path_test_case_uuid="test_case_uuid", + path_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -454,7 +458,7 @@ async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> N @parametrize async def test_method_list_evaluation_runs(self, async_client: AsyncGradientAI) -> None: evaluation_test_case = await async_client.agents.evaluation_test_cases.list_evaluation_runs( - evaluation_test_case_uuid="evaluation_test_case_uuid", + evaluation_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(EvaluationTestCaseListEvaluationRunsResponse, evaluation_test_case, path=["response"]) @@ -462,7 +466,7 @@ async def test_method_list_evaluation_runs(self, async_client: AsyncGradientAI) @parametrize async def test_method_list_evaluation_runs_with_all_params(self, async_client: AsyncGradientAI) -> None: evaluation_test_case = await async_client.agents.evaluation_test_cases.list_evaluation_runs( - evaluation_test_case_uuid="evaluation_test_case_uuid", + evaluation_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', evaluation_test_case_version=0, ) assert_matches_type(EvaluationTestCaseListEvaluationRunsResponse, evaluation_test_case, path=["response"]) @@ -471,7 +475,7 @@ async def test_method_list_evaluation_runs_with_all_params(self, async_client: A @parametrize async def test_raw_response_list_evaluation_runs(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.evaluation_test_cases.with_raw_response.list_evaluation_runs( - evaluation_test_case_uuid="evaluation_test_case_uuid", + evaluation_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -483,7 +487,7 @@ async def test_raw_response_list_evaluation_runs(self, async_client: AsyncGradie @parametrize async def test_streaming_response_list_evaluation_runs(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.evaluation_test_cases.with_streaming_response.list_evaluation_runs( - evaluation_test_case_uuid="evaluation_test_case_uuid", + evaluation_test_case_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/agents/test_functions.py b/tests/api_resources/agents/test_functions.py index 5a3693cb..4390d1d2 100644 --- a/tests/api_resources/agents/test_functions.py +++ b/tests/api_resources/agents/test_functions.py @@ -25,7 +25,7 @@ class TestFunctions: @parametrize def test_method_create(self, client: GradientAI) -> None: function = client.agents.functions.create( - path_agent_uuid="agent_uuid", + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(FunctionCreateResponse, function, path=["response"]) @@ -33,12 +33,12 @@ def test_method_create(self, client: GradientAI) -> None: @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: function = client.agents.functions.create( - path_agent_uuid="agent_uuid", - body_agent_uuid="agent_uuid", - description="description", - faas_name="faas_name", - faas_namespace="faas_namespace", - function_name="function_name", + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + body_agent_uuid='"12345678-1234-1234-1234-123456789012"', + description='"My Function Description"', + faas_name='"my-function"', + faas_namespace='"default"', + function_name='"My Function"', input_schema={}, output_schema={}, ) @@ -48,7 +48,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_create(self, client: GradientAI) -> None: response = client.agents.functions.with_raw_response.create( - path_agent_uuid="agent_uuid", + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -60,7 +60,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @parametrize def test_streaming_response_create(self, client: GradientAI) -> None: with client.agents.functions.with_streaming_response.create( - path_agent_uuid="agent_uuid", + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -82,8 +82,8 @@ def test_path_params_create(self, client: GradientAI) -> None: @parametrize def test_method_update(self, client: GradientAI) -> None: function = client.agents.functions.update( - path_function_uuid="function_uuid", - path_agent_uuid="agent_uuid", + path_function_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(FunctionUpdateResponse, function, path=["response"]) @@ -91,14 +91,14 @@ def test_method_update(self, client: GradientAI) -> None: @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: function = client.agents.functions.update( - path_function_uuid="function_uuid", - path_agent_uuid="agent_uuid", - body_agent_uuid="agent_uuid", - description="description", - faas_name="faas_name", - faas_namespace="faas_namespace", - function_name="function_name", - body_function_uuid="function_uuid", + path_function_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + body_agent_uuid='"12345678-1234-1234-1234-123456789012"', + description='"My Function Description"', + faas_name='"my-function"', + faas_namespace='"default"', + function_name='"My Function"', + body_function_uuid='"12345678-1234-1234-1234-123456789012"', input_schema={}, output_schema={}, ) @@ -108,8 +108,8 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_update(self, client: GradientAI) -> None: response = client.agents.functions.with_raw_response.update( - path_function_uuid="function_uuid", - path_agent_uuid="agent_uuid", + path_function_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -121,8 +121,8 @@ def test_raw_response_update(self, client: GradientAI) -> None: @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: with client.agents.functions.with_streaming_response.update( - path_function_uuid="function_uuid", - path_agent_uuid="agent_uuid", + path_function_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -137,22 +137,22 @@ def test_streaming_response_update(self, client: GradientAI) -> None: def test_path_params_update(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): client.agents.functions.with_raw_response.update( - path_function_uuid="function_uuid", + path_function_uuid='"123e4567-e89b-12d3-a456-426614174000"', path_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_function_uuid` but received ''"): client.agents.functions.with_raw_response.update( path_function_uuid="", - path_agent_uuid="agent_uuid", + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @pytest.mark.skip() @parametrize def test_method_delete(self, client: GradientAI) -> None: function = client.agents.functions.delete( - function_uuid="function_uuid", - agent_uuid="agent_uuid", + function_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(FunctionDeleteResponse, function, path=["response"]) @@ -160,8 +160,8 @@ def test_method_delete(self, client: GradientAI) -> None: @parametrize def test_raw_response_delete(self, client: GradientAI) -> None: response = client.agents.functions.with_raw_response.delete( - function_uuid="function_uuid", - agent_uuid="agent_uuid", + function_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -173,8 +173,8 @@ def test_raw_response_delete(self, client: GradientAI) -> None: @parametrize def test_streaming_response_delete(self, client: GradientAI) -> None: with client.agents.functions.with_streaming_response.delete( - function_uuid="function_uuid", - agent_uuid="agent_uuid", + function_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -189,14 +189,14 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: def test_path_params_delete(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): client.agents.functions.with_raw_response.delete( - function_uuid="function_uuid", + function_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `function_uuid` but received ''"): client.agents.functions.with_raw_response.delete( function_uuid="", - agent_uuid="agent_uuid", + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -209,7 +209,7 @@ class TestAsyncFunctions: @parametrize async def test_method_create(self, async_client: AsyncGradientAI) -> None: function = await async_client.agents.functions.create( - path_agent_uuid="agent_uuid", + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(FunctionCreateResponse, function, path=["response"]) @@ -217,12 +217,12 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: function = await async_client.agents.functions.create( - path_agent_uuid="agent_uuid", - body_agent_uuid="agent_uuid", - description="description", - faas_name="faas_name", - faas_namespace="faas_namespace", - function_name="function_name", + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + body_agent_uuid='"12345678-1234-1234-1234-123456789012"', + description='"My Function Description"', + faas_name='"my-function"', + faas_namespace='"default"', + function_name='"My Function"', input_schema={}, output_schema={}, ) @@ -232,7 +232,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI @parametrize async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.functions.with_raw_response.create( - path_agent_uuid="agent_uuid", + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -244,7 +244,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.functions.with_streaming_response.create( - path_agent_uuid="agent_uuid", + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -266,8 +266,8 @@ async def test_path_params_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: function = await async_client.agents.functions.update( - path_function_uuid="function_uuid", - path_agent_uuid="agent_uuid", + path_function_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(FunctionUpdateResponse, function, path=["response"]) @@ -275,14 +275,14 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: function = await async_client.agents.functions.update( - path_function_uuid="function_uuid", - path_agent_uuid="agent_uuid", - body_agent_uuid="agent_uuid", - description="description", - faas_name="faas_name", - faas_namespace="faas_namespace", - function_name="function_name", - body_function_uuid="function_uuid", + path_function_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + body_agent_uuid='"12345678-1234-1234-1234-123456789012"', + description='"My Function Description"', + faas_name='"my-function"', + faas_namespace='"default"', + function_name='"My Function"', + body_function_uuid='"12345678-1234-1234-1234-123456789012"', input_schema={}, output_schema={}, ) @@ -292,8 +292,8 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.functions.with_raw_response.update( - path_function_uuid="function_uuid", - path_agent_uuid="agent_uuid", + path_function_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -305,8 +305,8 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.functions.with_streaming_response.update( - path_function_uuid="function_uuid", - path_agent_uuid="agent_uuid", + path_function_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -321,22 +321,22 @@ async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_agent_uuid` but received ''"): await async_client.agents.functions.with_raw_response.update( - path_function_uuid="function_uuid", + path_function_uuid='"123e4567-e89b-12d3-a456-426614174000"', path_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_function_uuid` but received ''"): await async_client.agents.functions.with_raw_response.update( path_function_uuid="", - path_agent_uuid="agent_uuid", + path_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @pytest.mark.skip() @parametrize async def test_method_delete(self, async_client: AsyncGradientAI) -> None: function = await async_client.agents.functions.delete( - function_uuid="function_uuid", - agent_uuid="agent_uuid", + function_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(FunctionDeleteResponse, function, path=["response"]) @@ -344,8 +344,8 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.functions.with_raw_response.delete( - function_uuid="function_uuid", - agent_uuid="agent_uuid", + function_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -357,8 +357,8 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.functions.with_streaming_response.delete( - function_uuid="function_uuid", - agent_uuid="agent_uuid", + function_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -373,12 +373,12 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): await async_client.agents.functions.with_raw_response.delete( - function_uuid="function_uuid", + function_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `function_uuid` but received ''"): await async_client.agents.functions.with_raw_response.delete( function_uuid="", - agent_uuid="agent_uuid", + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) diff --git a/tests/api_resources/agents/test_knowledge_bases.py b/tests/api_resources/agents/test_knowledge_bases.py index e62c05ff..2ac20d89 100644 --- a/tests/api_resources/agents/test_knowledge_bases.py +++ b/tests/api_resources/agents/test_knowledge_bases.py @@ -63,8 +63,8 @@ def test_path_params_attach(self, client: GradientAI) -> None: @parametrize def test_method_attach_single(self, client: GradientAI) -> None: knowledge_base = client.agents.knowledge_bases.attach_single( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="agent_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) @@ -72,8 +72,8 @@ def test_method_attach_single(self, client: GradientAI) -> None: @parametrize def test_raw_response_attach_single(self, client: GradientAI) -> None: response = client.agents.knowledge_bases.with_raw_response.attach_single( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="agent_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -85,8 +85,8 @@ def test_raw_response_attach_single(self, client: GradientAI) -> None: @parametrize def test_streaming_response_attach_single(self, client: GradientAI) -> None: with client.agents.knowledge_bases.with_streaming_response.attach_single( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="agent_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -101,22 +101,22 @@ def test_streaming_response_attach_single(self, client: GradientAI) -> None: def test_path_params_attach_single(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): client.agents.knowledge_bases.with_raw_response.attach_single( - knowledge_base_uuid="knowledge_base_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): client.agents.knowledge_bases.with_raw_response.attach_single( knowledge_base_uuid="", - agent_uuid="agent_uuid", + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @pytest.mark.skip() @parametrize def test_method_detach(self, client: GradientAI) -> None: knowledge_base = client.agents.knowledge_bases.detach( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="agent_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(KnowledgeBaseDetachResponse, knowledge_base, path=["response"]) @@ -124,8 +124,8 @@ def test_method_detach(self, client: GradientAI) -> None: @parametrize def test_raw_response_detach(self, client: GradientAI) -> None: response = client.agents.knowledge_bases.with_raw_response.detach( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="agent_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -137,8 +137,8 @@ def test_raw_response_detach(self, client: GradientAI) -> None: @parametrize def test_streaming_response_detach(self, client: GradientAI) -> None: with client.agents.knowledge_bases.with_streaming_response.detach( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="agent_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -153,14 +153,14 @@ def test_streaming_response_detach(self, client: GradientAI) -> None: def test_path_params_detach(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): client.agents.knowledge_bases.with_raw_response.detach( - knowledge_base_uuid="knowledge_base_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): client.agents.knowledge_bases.with_raw_response.detach( knowledge_base_uuid="", - agent_uuid="agent_uuid", + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -215,8 +215,8 @@ async def test_path_params_attach(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_attach_single(self, async_client: AsyncGradientAI) -> None: knowledge_base = await async_client.agents.knowledge_bases.attach_single( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="agent_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(APILinkKnowledgeBaseOutput, knowledge_base, path=["response"]) @@ -224,8 +224,8 @@ async def test_method_attach_single(self, async_client: AsyncGradientAI) -> None @parametrize async def test_raw_response_attach_single(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.knowledge_bases.with_raw_response.attach_single( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="agent_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -237,8 +237,8 @@ async def test_raw_response_attach_single(self, async_client: AsyncGradientAI) - @parametrize async def test_streaming_response_attach_single(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.knowledge_bases.with_streaming_response.attach_single( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="agent_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -253,22 +253,22 @@ async def test_streaming_response_attach_single(self, async_client: AsyncGradien async def test_path_params_attach_single(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): await async_client.agents.knowledge_bases.with_raw_response.attach_single( - knowledge_base_uuid="knowledge_base_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): await async_client.agents.knowledge_bases.with_raw_response.attach_single( knowledge_base_uuid="", - agent_uuid="agent_uuid", + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @pytest.mark.skip() @parametrize async def test_method_detach(self, async_client: AsyncGradientAI) -> None: knowledge_base = await async_client.agents.knowledge_bases.detach( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="agent_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(KnowledgeBaseDetachResponse, knowledge_base, path=["response"]) @@ -276,8 +276,8 @@ async def test_method_detach(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_raw_response_detach(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.knowledge_bases.with_raw_response.detach( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="agent_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -289,8 +289,8 @@ async def test_raw_response_detach(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_detach(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.knowledge_bases.with_streaming_response.detach( - knowledge_base_uuid="knowledge_base_uuid", - agent_uuid="agent_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -305,12 +305,12 @@ async def test_streaming_response_detach(self, async_client: AsyncGradientAI) -> async def test_path_params_detach(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `agent_uuid` but received ''"): await async_client.agents.knowledge_bases.with_raw_response.detach( - knowledge_base_uuid="knowledge_base_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): await async_client.agents.knowledge_bases.with_raw_response.detach( knowledge_base_uuid="", - agent_uuid="agent_uuid", + agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) diff --git a/tests/api_resources/agents/test_routes.py b/tests/api_resources/agents/test_routes.py index 2e6dfd7b..d04e8c90 100644 --- a/tests/api_resources/agents/test_routes.py +++ b/tests/api_resources/agents/test_routes.py @@ -26,8 +26,8 @@ class TestRoutes: @parametrize def test_method_update(self, client: GradientAI) -> None: route = client.agents.routes.update( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", + path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(RouteUpdateResponse, route, path=["response"]) @@ -35,13 +35,13 @@ def test_method_update(self, client: GradientAI) -> None: @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: route = client.agents.routes.update( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", - body_child_agent_uuid="child_agent_uuid", - if_case="if_case", - body_parent_agent_uuid="parent_agent_uuid", - route_name="route_name", - uuid="uuid", + path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + body_child_agent_uuid='"12345678-1234-1234-1234-123456789012"', + if_case='"use this to get weather information"', + body_parent_agent_uuid='"12345678-1234-1234-1234-123456789012"', + route_name='"weather_route"', + uuid='"12345678-1234-1234-1234-123456789012"', ) assert_matches_type(RouteUpdateResponse, route, path=["response"]) @@ -49,8 +49,8 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_update(self, client: GradientAI) -> None: response = client.agents.routes.with_raw_response.update( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", + path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -62,8 +62,8 @@ def test_raw_response_update(self, client: GradientAI) -> None: @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: with client.agents.routes.with_streaming_response.update( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", + path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -80,22 +80,22 @@ def test_path_params_update(self, client: GradientAI) -> None: ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" ): client.agents.routes.with_raw_response.update( - path_child_agent_uuid="child_agent_uuid", + path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', path_parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): client.agents.routes.with_raw_response.update( path_child_agent_uuid="", - path_parent_agent_uuid="parent_agent_uuid", + path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @pytest.mark.skip() @parametrize def test_method_delete(self, client: GradientAI) -> None: route = client.agents.routes.delete( - child_agent_uuid="child_agent_uuid", - parent_agent_uuid="parent_agent_uuid", + child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(RouteDeleteResponse, route, path=["response"]) @@ -103,8 +103,8 @@ def test_method_delete(self, client: GradientAI) -> None: @parametrize def test_raw_response_delete(self, client: GradientAI) -> None: response = client.agents.routes.with_raw_response.delete( - child_agent_uuid="child_agent_uuid", - parent_agent_uuid="parent_agent_uuid", + child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -116,8 +116,8 @@ def test_raw_response_delete(self, client: GradientAI) -> None: @parametrize def test_streaming_response_delete(self, client: GradientAI) -> None: with client.agents.routes.with_streaming_response.delete( - child_agent_uuid="child_agent_uuid", - parent_agent_uuid="parent_agent_uuid", + child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -132,22 +132,22 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: def test_path_params_delete(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `parent_agent_uuid` but received ''"): client.agents.routes.with_raw_response.delete( - child_agent_uuid="child_agent_uuid", + child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `child_agent_uuid` but received ''"): client.agents.routes.with_raw_response.delete( child_agent_uuid="", - parent_agent_uuid="parent_agent_uuid", + parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @pytest.mark.skip() @parametrize def test_method_add(self, client: GradientAI) -> None: route = client.agents.routes.add( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", + path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(RouteAddResponse, route, path=["response"]) @@ -155,12 +155,12 @@ def test_method_add(self, client: GradientAI) -> None: @parametrize def test_method_add_with_all_params(self, client: GradientAI) -> None: route = client.agents.routes.add( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", - body_child_agent_uuid="child_agent_uuid", - if_case="if_case", - body_parent_agent_uuid="parent_agent_uuid", - route_name="route_name", + path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + body_child_agent_uuid='"12345678-1234-1234-1234-123456789012"', + if_case='"use this to get weather information"', + body_parent_agent_uuid='"12345678-1234-1234-1234-123456789012"', + route_name='"weather_route"', ) assert_matches_type(RouteAddResponse, route, path=["response"]) @@ -168,8 +168,8 @@ def test_method_add_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_add(self, client: GradientAI) -> None: response = client.agents.routes.with_raw_response.add( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", + path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -181,8 +181,8 @@ def test_raw_response_add(self, client: GradientAI) -> None: @parametrize def test_streaming_response_add(self, client: GradientAI) -> None: with client.agents.routes.with_streaming_response.add( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", + path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -199,14 +199,14 @@ def test_path_params_add(self, client: GradientAI) -> None: ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" ): client.agents.routes.with_raw_response.add( - path_child_agent_uuid="child_agent_uuid", + path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', path_parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): client.agents.routes.with_raw_response.add( path_child_agent_uuid="", - path_parent_agent_uuid="parent_agent_uuid", + path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @pytest.mark.skip() @@ -261,8 +261,8 @@ class TestAsyncRoutes: @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: route = await async_client.agents.routes.update( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", + path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(RouteUpdateResponse, route, path=["response"]) @@ -270,13 +270,13 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: route = await async_client.agents.routes.update( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", - body_child_agent_uuid="child_agent_uuid", - if_case="if_case", - body_parent_agent_uuid="parent_agent_uuid", - route_name="route_name", - uuid="uuid", + path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + body_child_agent_uuid='"12345678-1234-1234-1234-123456789012"', + if_case='"use this to get weather information"', + body_parent_agent_uuid='"12345678-1234-1234-1234-123456789012"', + route_name='"weather_route"', + uuid='"12345678-1234-1234-1234-123456789012"', ) assert_matches_type(RouteUpdateResponse, route, path=["response"]) @@ -284,8 +284,8 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.routes.with_raw_response.update( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", + path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -297,8 +297,8 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.routes.with_streaming_response.update( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", + path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -315,22 +315,22 @@ async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" ): await async_client.agents.routes.with_raw_response.update( - path_child_agent_uuid="child_agent_uuid", + path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', path_parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): await async_client.agents.routes.with_raw_response.update( path_child_agent_uuid="", - path_parent_agent_uuid="parent_agent_uuid", + path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @pytest.mark.skip() @parametrize async def test_method_delete(self, async_client: AsyncGradientAI) -> None: route = await async_client.agents.routes.delete( - child_agent_uuid="child_agent_uuid", - parent_agent_uuid="parent_agent_uuid", + child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(RouteDeleteResponse, route, path=["response"]) @@ -338,8 +338,8 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.routes.with_raw_response.delete( - child_agent_uuid="child_agent_uuid", - parent_agent_uuid="parent_agent_uuid", + child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -351,8 +351,8 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.routes.with_streaming_response.delete( - child_agent_uuid="child_agent_uuid", - parent_agent_uuid="parent_agent_uuid", + child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -367,22 +367,22 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `parent_agent_uuid` but received ''"): await async_client.agents.routes.with_raw_response.delete( - child_agent_uuid="child_agent_uuid", + child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `child_agent_uuid` but received ''"): await async_client.agents.routes.with_raw_response.delete( child_agent_uuid="", - parent_agent_uuid="parent_agent_uuid", + parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @pytest.mark.skip() @parametrize async def test_method_add(self, async_client: AsyncGradientAI) -> None: route = await async_client.agents.routes.add( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", + path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(RouteAddResponse, route, path=["response"]) @@ -390,12 +390,12 @@ async def test_method_add(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_add_with_all_params(self, async_client: AsyncGradientAI) -> None: route = await async_client.agents.routes.add( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", - body_child_agent_uuid="child_agent_uuid", - if_case="if_case", - body_parent_agent_uuid="parent_agent_uuid", - route_name="route_name", + path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + body_child_agent_uuid='"12345678-1234-1234-1234-123456789012"', + if_case='"use this to get weather information"', + body_parent_agent_uuid='"12345678-1234-1234-1234-123456789012"', + route_name='"weather_route"', ) assert_matches_type(RouteAddResponse, route, path=["response"]) @@ -403,8 +403,8 @@ async def test_method_add_with_all_params(self, async_client: AsyncGradientAI) - @parametrize async def test_raw_response_add(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.routes.with_raw_response.add( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", + path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -416,8 +416,8 @@ async def test_raw_response_add(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_add(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.routes.with_streaming_response.add( - path_child_agent_uuid="child_agent_uuid", - path_parent_agent_uuid="parent_agent_uuid", + path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', + path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -434,14 +434,14 @@ async def test_path_params_add(self, async_client: AsyncGradientAI) -> None: ValueError, match=r"Expected a non-empty value for `path_parent_agent_uuid` but received ''" ): await async_client.agents.routes.with_raw_response.add( - path_child_agent_uuid="child_agent_uuid", + path_child_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', path_parent_agent_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `path_child_agent_uuid` but received ''"): await async_client.agents.routes.with_raw_response.add( path_child_agent_uuid="", - path_parent_agent_uuid="parent_agent_uuid", + path_parent_agent_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @pytest.mark.skip() diff --git a/tests/api_resources/agents/test_versions.py b/tests/api_resources/agents/test_versions.py index 79f73672..d6151470 100644 --- a/tests/api_resources/agents/test_versions.py +++ b/tests/api_resources/agents/test_versions.py @@ -24,7 +24,7 @@ class TestVersions: @parametrize def test_method_update(self, client: GradientAI) -> None: version = client.agents.versions.update( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(VersionUpdateResponse, version, path=["response"]) @@ -32,9 +32,9 @@ def test_method_update(self, client: GradientAI) -> None: @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: version = client.agents.versions.update( - path_uuid="uuid", - body_uuid="uuid", - version_hash="version_hash", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + body_uuid='"12345678-1234-1234-1234-123456789012"', + version_hash="c3658d8b5c05494cd03ce042926ef08157889ed54b1b74b5ee0b3d66dcee4b73", ) assert_matches_type(VersionUpdateResponse, version, path=["response"]) @@ -42,7 +42,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_update(self, client: GradientAI) -> None: response = client.agents.versions.with_raw_response.update( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -54,7 +54,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: with client.agents.versions.with_streaming_response.update( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -76,7 +76,7 @@ def test_path_params_update(self, client: GradientAI) -> None: @parametrize def test_method_list(self, client: GradientAI) -> None: version = client.agents.versions.list( - uuid="uuid", + uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(VersionListResponse, version, path=["response"]) @@ -84,7 +84,7 @@ def test_method_list(self, client: GradientAI) -> None: @parametrize def test_method_list_with_all_params(self, client: GradientAI) -> None: version = client.agents.versions.list( - uuid="uuid", + uuid='"123e4567-e89b-12d3-a456-426614174000"', page=0, per_page=0, ) @@ -94,7 +94,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_list(self, client: GradientAI) -> None: response = client.agents.versions.with_raw_response.list( - uuid="uuid", + uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -106,7 +106,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @parametrize def test_streaming_response_list(self, client: GradientAI) -> None: with client.agents.versions.with_streaming_response.list( - uuid="uuid", + uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -134,7 +134,7 @@ class TestAsyncVersions: @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: version = await async_client.agents.versions.update( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(VersionUpdateResponse, version, path=["response"]) @@ -142,9 +142,9 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: version = await async_client.agents.versions.update( - path_uuid="uuid", - body_uuid="uuid", - version_hash="version_hash", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + body_uuid='"12345678-1234-1234-1234-123456789012"', + version_hash="c3658d8b5c05494cd03ce042926ef08157889ed54b1b74b5ee0b3d66dcee4b73", ) assert_matches_type(VersionUpdateResponse, version, path=["response"]) @@ -152,7 +152,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.versions.with_raw_response.update( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -164,7 +164,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.versions.with_streaming_response.update( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -186,7 +186,7 @@ async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_list(self, async_client: AsyncGradientAI) -> None: version = await async_client.agents.versions.list( - uuid="uuid", + uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(VersionListResponse, version, path=["response"]) @@ -194,7 +194,7 @@ async def test_method_list(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: version = await async_client.agents.versions.list( - uuid="uuid", + uuid='"123e4567-e89b-12d3-a456-426614174000"', page=0, per_page=0, ) @@ -204,7 +204,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @parametrize async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.versions.with_raw_response.list( - uuid="uuid", + uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -216,7 +216,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.versions.with_streaming_response.list( - uuid="uuid", + uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index 25b8419a..46c8b431 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -54,6 +54,17 @@ def test_method_create_with_all_params_overload_1(self, client: GradientAI) -> N stream=False, stream_options={"include_usage": True}, temperature=1, + tool_choice="none", + tools=[ + { + "function": { + "name": "name", + "description": "description", + "parameters": {"foo": "bar"}, + }, + "type": "function", + } + ], top_logprobs=0, top_p=1, user="user-1234", @@ -136,6 +147,17 @@ def test_method_create_with_all_params_overload_2(self, client: GradientAI) -> N stop="\n", stream_options={"include_usage": True}, temperature=1, + tool_choice="none", + tools=[ + { + "function": { + "name": "name", + "description": "description", + "parameters": {"foo": "bar"}, + }, + "type": "function", + } + ], top_logprobs=0, top_p=1, user="user-1234", @@ -224,6 +246,17 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn stream=False, stream_options={"include_usage": True}, temperature=1, + tool_choice="none", + tools=[ + { + "function": { + "name": "name", + "description": "description", + "parameters": {"foo": "bar"}, + }, + "type": "function", + } + ], top_logprobs=0, top_p=1, user="user-1234", @@ -306,6 +339,17 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn stop="\n", stream_options={"include_usage": True}, temperature=1, + tool_choice="none", + tools=[ + { + "function": { + "name": "name", + "description": "description", + "parameters": {"foo": "bar"}, + }, + "type": "function", + } + ], top_logprobs=0, top_p=1, user="user-1234", diff --git a/tests/api_resources/gpu_droplets/__init__.py b/tests/api_resources/gpu_droplets/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/gpu_droplets/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/gpu_droplets/account/__init__.py b/tests/api_resources/gpu_droplets/account/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/gpu_droplets/account/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/gpu_droplets/account/test_keys.py b/tests/api_resources/gpu_droplets/account/test_keys.py new file mode 100644 index 00000000..acad3575 --- /dev/null +++ b/tests/api_resources/gpu_droplets/account/test_keys.py @@ -0,0 +1,399 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.gpu_droplets.account import ( + KeyListResponse, + KeyCreateResponse, + KeyUpdateResponse, + KeyRetrieveResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestKeys: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: GradientAI) -> None: + key = client.gpu_droplets.account.keys.create( + name="My SSH Public Key", + public_key="ssh-rsa AEXAMPLEaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example", + ) + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: GradientAI) -> None: + response = client.gpu_droplets.account.keys.with_raw_response.create( + name="My SSH Public Key", + public_key="ssh-rsa AEXAMPLEaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: GradientAI) -> None: + with client.gpu_droplets.account.keys.with_streaming_response.create( + name="My SSH Public Key", + public_key="ssh-rsa AEXAMPLEaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + key = client.gpu_droplets.account.keys.retrieve( + 512189, + ) + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.gpu_droplets.account.keys.with_raw_response.retrieve( + 512189, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.gpu_droplets.account.keys.with_streaming_response.retrieve( + 512189, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: GradientAI) -> None: + key = client.gpu_droplets.account.keys.update( + ssh_key_identifier=512189, + ) + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params(self, client: GradientAI) -> None: + key = client.gpu_droplets.account.keys.update( + ssh_key_identifier=512189, + name="My SSH Public Key", + ) + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: GradientAI) -> None: + response = client.gpu_droplets.account.keys.with_raw_response.update( + ssh_key_identifier=512189, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: GradientAI) -> None: + with client.gpu_droplets.account.keys.with_streaming_response.update( + ssh_key_identifier=512189, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + key = client.gpu_droplets.account.keys.list() + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + key = client.gpu_droplets.account.keys.list( + page=1, + per_page=1, + ) + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.gpu_droplets.account.keys.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.gpu_droplets.account.keys.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert_matches_type(KeyListResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: GradientAI) -> None: + key = client.gpu_droplets.account.keys.delete( + 512189, + ) + assert key is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: GradientAI) -> None: + response = client.gpu_droplets.account.keys.with_raw_response.delete( + 512189, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = response.parse() + assert key is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: GradientAI) -> None: + with client.gpu_droplets.account.keys.with_streaming_response.delete( + 512189, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = response.parse() + assert key is None + + assert cast(Any, response.is_closed) is True + + +class TestAsyncKeys: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncGradientAI) -> None: + key = await async_client.gpu_droplets.account.keys.create( + name="My SSH Public Key", + public_key="ssh-rsa AEXAMPLEaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example", + ) + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.account.keys.with_raw_response.create( + name="My SSH Public Key", + public_key="ssh-rsa AEXAMPLEaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.account.keys.with_streaming_response.create( + name="My SSH Public Key", + public_key="ssh-rsa AEXAMPLEaC1yc2EAAAADAQABAAAAQQDDHr/jh2Jy4yALcK4JyWbVkPRaWmhck3IgCoeOO3z1e2dBowLh64QAM+Qb72pxekALga2oi4GvT+TlWNhzPH4V example", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyCreateResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + key = await async_client.gpu_droplets.account.keys.retrieve( + 512189, + ) + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.account.keys.with_raw_response.retrieve( + 512189, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.account.keys.with_streaming_response.retrieve( + 512189, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyRetrieveResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncGradientAI) -> None: + key = await async_client.gpu_droplets.account.keys.update( + ssh_key_identifier=512189, + ) + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: + key = await async_client.gpu_droplets.account.keys.update( + ssh_key_identifier=512189, + name="My SSH Public Key", + ) + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.account.keys.with_raw_response.update( + ssh_key_identifier=512189, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.account.keys.with_streaming_response.update( + ssh_key_identifier=512189, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyUpdateResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + key = await async_client.gpu_droplets.account.keys.list() + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + key = await async_client.gpu_droplets.account.keys.list( + page=1, + per_page=1, + ) + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.account.keys.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert_matches_type(KeyListResponse, key, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.account.keys.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert_matches_type(KeyListResponse, key, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + key = await async_client.gpu_droplets.account.keys.delete( + 512189, + ) + assert key is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.account.keys.with_raw_response.delete( + 512189, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + key = await response.parse() + assert key is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.account.keys.with_streaming_response.delete( + 512189, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + key = await response.parse() + assert key is None + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/gpu_droplets/firewalls/__init__.py b/tests/api_resources/gpu_droplets/firewalls/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/gpu_droplets/firewalls/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/gpu_droplets/firewalls/test_droplets.py b/tests/api_resources/gpu_droplets/firewalls/test_droplets.py new file mode 100644 index 00000000..67d132aa --- /dev/null +++ b/tests/api_resources/gpu_droplets/firewalls/test_droplets.py @@ -0,0 +1,206 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestDroplets: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_add(self, client: GradientAI) -> None: + droplet = client.gpu_droplets.firewalls.droplets.add( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + droplet_ids=[49696269], + ) + assert droplet is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_add(self, client: GradientAI) -> None: + response = client.gpu_droplets.firewalls.droplets.with_raw_response.add( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + droplet_ids=[49696269], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + droplet = response.parse() + assert droplet is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_add(self, client: GradientAI) -> None: + with client.gpu_droplets.firewalls.droplets.with_streaming_response.add( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + droplet_ids=[49696269], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + droplet = response.parse() + assert droplet is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_add(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): + client.gpu_droplets.firewalls.droplets.with_raw_response.add( + firewall_id="", + droplet_ids=[49696269], + ) + + @pytest.mark.skip() + @parametrize + def test_method_remove(self, client: GradientAI) -> None: + droplet = client.gpu_droplets.firewalls.droplets.remove( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + droplet_ids=[49696269], + ) + assert droplet is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_remove(self, client: GradientAI) -> None: + response = client.gpu_droplets.firewalls.droplets.with_raw_response.remove( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + droplet_ids=[49696269], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + droplet = response.parse() + assert droplet is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_remove(self, client: GradientAI) -> None: + with client.gpu_droplets.firewalls.droplets.with_streaming_response.remove( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + droplet_ids=[49696269], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + droplet = response.parse() + assert droplet is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_remove(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): + client.gpu_droplets.firewalls.droplets.with_raw_response.remove( + firewall_id="", + droplet_ids=[49696269], + ) + + +class TestAsyncDroplets: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_add(self, async_client: AsyncGradientAI) -> None: + droplet = await async_client.gpu_droplets.firewalls.droplets.add( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + droplet_ids=[49696269], + ) + assert droplet is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_add(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.firewalls.droplets.with_raw_response.add( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + droplet_ids=[49696269], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + droplet = await response.parse() + assert droplet is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_add(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.firewalls.droplets.with_streaming_response.add( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + droplet_ids=[49696269], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + droplet = await response.parse() + assert droplet is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_add(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): + await async_client.gpu_droplets.firewalls.droplets.with_raw_response.add( + firewall_id="", + droplet_ids=[49696269], + ) + + @pytest.mark.skip() + @parametrize + async def test_method_remove(self, async_client: AsyncGradientAI) -> None: + droplet = await async_client.gpu_droplets.firewalls.droplets.remove( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + droplet_ids=[49696269], + ) + assert droplet is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_remove(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.firewalls.droplets.with_raw_response.remove( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + droplet_ids=[49696269], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + droplet = await response.parse() + assert droplet is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_remove(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.firewalls.droplets.with_streaming_response.remove( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + droplet_ids=[49696269], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + droplet = await response.parse() + assert droplet is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_remove(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): + await async_client.gpu_droplets.firewalls.droplets.with_raw_response.remove( + firewall_id="", + droplet_ids=[49696269], + ) diff --git a/tests/api_resources/gpu_droplets/firewalls/test_rules.py b/tests/api_resources/gpu_droplets/firewalls/test_rules.py new file mode 100644 index 00000000..446a11af --- /dev/null +++ b/tests/api_resources/gpu_droplets/firewalls/test_rules.py @@ -0,0 +1,326 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestRules: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_add(self, client: GradientAI) -> None: + rule = client.gpu_droplets.firewalls.rules.add( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + ) + assert rule is None + + @pytest.mark.skip() + @parametrize + def test_method_add_with_all_params(self, client: GradientAI) -> None: + rule = client.gpu_droplets.firewalls.rules.add( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + inbound_rules=[ + { + "ports": "3306", + "protocol": "tcp", + "sources": { + "addresses": ["1.2.3.4", "18.0.0.0/8"], + "droplet_ids": [49696269], + "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], + "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], + "tags": ["base-image", "prod"], + }, + } + ], + outbound_rules=[ + { + "destinations": { + "addresses": ["1.2.3.4", "18.0.0.0/8"], + "droplet_ids": [49696269], + "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], + "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], + "tags": ["base-image", "prod"], + }, + "ports": "3306", + "protocol": "tcp", + } + ], + ) + assert rule is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_add(self, client: GradientAI) -> None: + response = client.gpu_droplets.firewalls.rules.with_raw_response.add( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + rule = response.parse() + assert rule is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_add(self, client: GradientAI) -> None: + with client.gpu_droplets.firewalls.rules.with_streaming_response.add( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + rule = response.parse() + assert rule is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_add(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): + client.gpu_droplets.firewalls.rules.with_raw_response.add( + firewall_id="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_remove(self, client: GradientAI) -> None: + rule = client.gpu_droplets.firewalls.rules.remove( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + ) + assert rule is None + + @pytest.mark.skip() + @parametrize + def test_method_remove_with_all_params(self, client: GradientAI) -> None: + rule = client.gpu_droplets.firewalls.rules.remove( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + inbound_rules=[ + { + "ports": "3306", + "protocol": "tcp", + "sources": { + "addresses": ["1.2.3.4", "18.0.0.0/8"], + "droplet_ids": [49696269], + "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], + "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], + "tags": ["base-image", "prod"], + }, + } + ], + outbound_rules=[ + { + "destinations": { + "addresses": ["1.2.3.4", "18.0.0.0/8"], + "droplet_ids": [49696269], + "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], + "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], + "tags": ["base-image", "prod"], + }, + "ports": "3306", + "protocol": "tcp", + } + ], + ) + assert rule is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_remove(self, client: GradientAI) -> None: + response = client.gpu_droplets.firewalls.rules.with_raw_response.remove( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + rule = response.parse() + assert rule is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_remove(self, client: GradientAI) -> None: + with client.gpu_droplets.firewalls.rules.with_streaming_response.remove( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + rule = response.parse() + assert rule is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_remove(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): + client.gpu_droplets.firewalls.rules.with_raw_response.remove( + firewall_id="", + ) + + +class TestAsyncRules: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_add(self, async_client: AsyncGradientAI) -> None: + rule = await async_client.gpu_droplets.firewalls.rules.add( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + ) + assert rule is None + + @pytest.mark.skip() + @parametrize + async def test_method_add_with_all_params(self, async_client: AsyncGradientAI) -> None: + rule = await async_client.gpu_droplets.firewalls.rules.add( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + inbound_rules=[ + { + "ports": "3306", + "protocol": "tcp", + "sources": { + "addresses": ["1.2.3.4", "18.0.0.0/8"], + "droplet_ids": [49696269], + "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], + "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], + "tags": ["base-image", "prod"], + }, + } + ], + outbound_rules=[ + { + "destinations": { + "addresses": ["1.2.3.4", "18.0.0.0/8"], + "droplet_ids": [49696269], + "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], + "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], + "tags": ["base-image", "prod"], + }, + "ports": "3306", + "protocol": "tcp", + } + ], + ) + assert rule is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_add(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.firewalls.rules.with_raw_response.add( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + rule = await response.parse() + assert rule is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_add(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.firewalls.rules.with_streaming_response.add( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + rule = await response.parse() + assert rule is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_add(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): + await async_client.gpu_droplets.firewalls.rules.with_raw_response.add( + firewall_id="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_remove(self, async_client: AsyncGradientAI) -> None: + rule = await async_client.gpu_droplets.firewalls.rules.remove( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + ) + assert rule is None + + @pytest.mark.skip() + @parametrize + async def test_method_remove_with_all_params(self, async_client: AsyncGradientAI) -> None: + rule = await async_client.gpu_droplets.firewalls.rules.remove( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + inbound_rules=[ + { + "ports": "3306", + "protocol": "tcp", + "sources": { + "addresses": ["1.2.3.4", "18.0.0.0/8"], + "droplet_ids": [49696269], + "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], + "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], + "tags": ["base-image", "prod"], + }, + } + ], + outbound_rules=[ + { + "destinations": { + "addresses": ["1.2.3.4", "18.0.0.0/8"], + "droplet_ids": [49696269], + "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], + "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], + "tags": ["base-image", "prod"], + }, + "ports": "3306", + "protocol": "tcp", + } + ], + ) + assert rule is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_remove(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.firewalls.rules.with_raw_response.remove( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + rule = await response.parse() + assert rule is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_remove(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.firewalls.rules.with_streaming_response.remove( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + rule = await response.parse() + assert rule is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_remove(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): + await async_client.gpu_droplets.firewalls.rules.with_raw_response.remove( + firewall_id="", + ) diff --git a/tests/api_resources/gpu_droplets/firewalls/test_tags.py b/tests/api_resources/gpu_droplets/firewalls/test_tags.py new file mode 100644 index 00000000..a0227c61 --- /dev/null +++ b/tests/api_resources/gpu_droplets/firewalls/test_tags.py @@ -0,0 +1,206 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestTags: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_add(self, client: GradientAI) -> None: + tag = client.gpu_droplets.firewalls.tags.add( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + tags=["frontend"], + ) + assert tag is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_add(self, client: GradientAI) -> None: + response = client.gpu_droplets.firewalls.tags.with_raw_response.add( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + tags=["frontend"], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + tag = response.parse() + assert tag is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_add(self, client: GradientAI) -> None: + with client.gpu_droplets.firewalls.tags.with_streaming_response.add( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + tags=["frontend"], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + tag = response.parse() + assert tag is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_add(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): + client.gpu_droplets.firewalls.tags.with_raw_response.add( + firewall_id="", + tags=["frontend"], + ) + + @pytest.mark.skip() + @parametrize + def test_method_remove(self, client: GradientAI) -> None: + tag = client.gpu_droplets.firewalls.tags.remove( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + tags=["frontend"], + ) + assert tag is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_remove(self, client: GradientAI) -> None: + response = client.gpu_droplets.firewalls.tags.with_raw_response.remove( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + tags=["frontend"], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + tag = response.parse() + assert tag is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_remove(self, client: GradientAI) -> None: + with client.gpu_droplets.firewalls.tags.with_streaming_response.remove( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + tags=["frontend"], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + tag = response.parse() + assert tag is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_remove(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): + client.gpu_droplets.firewalls.tags.with_raw_response.remove( + firewall_id="", + tags=["frontend"], + ) + + +class TestAsyncTags: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_add(self, async_client: AsyncGradientAI) -> None: + tag = await async_client.gpu_droplets.firewalls.tags.add( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + tags=["frontend"], + ) + assert tag is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_add(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.firewalls.tags.with_raw_response.add( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + tags=["frontend"], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + tag = await response.parse() + assert tag is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_add(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.firewalls.tags.with_streaming_response.add( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + tags=["frontend"], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + tag = await response.parse() + assert tag is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_add(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): + await async_client.gpu_droplets.firewalls.tags.with_raw_response.add( + firewall_id="", + tags=["frontend"], + ) + + @pytest.mark.skip() + @parametrize + async def test_method_remove(self, async_client: AsyncGradientAI) -> None: + tag = await async_client.gpu_droplets.firewalls.tags.remove( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + tags=["frontend"], + ) + assert tag is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_remove(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.firewalls.tags.with_raw_response.remove( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + tags=["frontend"], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + tag = await response.parse() + assert tag is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_remove(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.firewalls.tags.with_streaming_response.remove( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + tags=["frontend"], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + tag = await response.parse() + assert tag is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_remove(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): + await async_client.gpu_droplets.firewalls.tags.with_raw_response.remove( + firewall_id="", + tags=["frontend"], + ) diff --git a/tests/api_resources/gpu_droplets/floating_ips/__init__.py b/tests/api_resources/gpu_droplets/floating_ips/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/gpu_droplets/floating_ips/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/gpu_droplets/floating_ips/test_actions.py b/tests/api_resources/gpu_droplets/floating_ips/test_actions.py new file mode 100644 index 00000000..82a12d2e --- /dev/null +++ b/tests/api_resources/gpu_droplets/floating_ips/test_actions.py @@ -0,0 +1,396 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.gpu_droplets.floating_ips import ( + ActionListResponse, + ActionCreateResponse, + ActionRetrieveResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestActions: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_overload_1(self, client: GradientAI) -> None: + action = client.gpu_droplets.floating_ips.actions.create( + floating_ip="45.55.96.47", + type="assign", + ) + assert_matches_type(ActionCreateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create_overload_1(self, client: GradientAI) -> None: + response = client.gpu_droplets.floating_ips.actions.with_raw_response.create( + floating_ip="45.55.96.47", + type="assign", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(ActionCreateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create_overload_1(self, client: GradientAI) -> None: + with client.gpu_droplets.floating_ips.actions.with_streaming_response.create( + floating_ip="45.55.96.47", + type="assign", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(ActionCreateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_create_overload_1(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): + client.gpu_droplets.floating_ips.actions.with_raw_response.create( + floating_ip="", + type="assign", + ) + + @pytest.mark.skip() + @parametrize + def test_method_create_overload_2(self, client: GradientAI) -> None: + action = client.gpu_droplets.floating_ips.actions.create( + floating_ip="45.55.96.47", + droplet_id=758604968, + type="assign", + ) + assert_matches_type(ActionCreateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create_overload_2(self, client: GradientAI) -> None: + response = client.gpu_droplets.floating_ips.actions.with_raw_response.create( + floating_ip="45.55.96.47", + droplet_id=758604968, + type="assign", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(ActionCreateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create_overload_2(self, client: GradientAI) -> None: + with client.gpu_droplets.floating_ips.actions.with_streaming_response.create( + floating_ip="45.55.96.47", + droplet_id=758604968, + type="assign", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(ActionCreateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_create_overload_2(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): + client.gpu_droplets.floating_ips.actions.with_raw_response.create( + floating_ip="", + droplet_id=758604968, + type="assign", + ) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + action = client.gpu_droplets.floating_ips.actions.retrieve( + action_id=36804636, + floating_ip="45.55.96.47", + ) + assert_matches_type(ActionRetrieveResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.gpu_droplets.floating_ips.actions.with_raw_response.retrieve( + action_id=36804636, + floating_ip="45.55.96.47", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(ActionRetrieveResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.gpu_droplets.floating_ips.actions.with_streaming_response.retrieve( + action_id=36804636, + floating_ip="45.55.96.47", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(ActionRetrieveResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): + client.gpu_droplets.floating_ips.actions.with_raw_response.retrieve( + action_id=36804636, + floating_ip="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + action = client.gpu_droplets.floating_ips.actions.list( + "192.168.1.1", + ) + assert_matches_type(ActionListResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.gpu_droplets.floating_ips.actions.with_raw_response.list( + "192.168.1.1", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(ActionListResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.gpu_droplets.floating_ips.actions.with_streaming_response.list( + "192.168.1.1", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(ActionListResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_list(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): + client.gpu_droplets.floating_ips.actions.with_raw_response.list( + "", + ) + + +class TestAsyncActions: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_create_overload_1(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.floating_ips.actions.create( + floating_ip="45.55.96.47", + type="assign", + ) + assert_matches_type(ActionCreateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.floating_ips.actions.with_raw_response.create( + floating_ip="45.55.96.47", + type="assign", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(ActionCreateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.floating_ips.actions.with_streaming_response.create( + floating_ip="45.55.96.47", + type="assign", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(ActionCreateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_create_overload_1(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): + await async_client.gpu_droplets.floating_ips.actions.with_raw_response.create( + floating_ip="", + type="assign", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_create_overload_2(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.floating_ips.actions.create( + floating_ip="45.55.96.47", + droplet_id=758604968, + type="assign", + ) + assert_matches_type(ActionCreateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.floating_ips.actions.with_raw_response.create( + floating_ip="45.55.96.47", + droplet_id=758604968, + type="assign", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(ActionCreateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.floating_ips.actions.with_streaming_response.create( + floating_ip="45.55.96.47", + droplet_id=758604968, + type="assign", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(ActionCreateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_create_overload_2(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): + await async_client.gpu_droplets.floating_ips.actions.with_raw_response.create( + floating_ip="", + droplet_id=758604968, + type="assign", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.floating_ips.actions.retrieve( + action_id=36804636, + floating_ip="45.55.96.47", + ) + assert_matches_type(ActionRetrieveResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.floating_ips.actions.with_raw_response.retrieve( + action_id=36804636, + floating_ip="45.55.96.47", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(ActionRetrieveResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.floating_ips.actions.with_streaming_response.retrieve( + action_id=36804636, + floating_ip="45.55.96.47", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(ActionRetrieveResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): + await async_client.gpu_droplets.floating_ips.actions.with_raw_response.retrieve( + action_id=36804636, + floating_ip="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.floating_ips.actions.list( + "192.168.1.1", + ) + assert_matches_type(ActionListResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.floating_ips.actions.with_raw_response.list( + "192.168.1.1", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(ActionListResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.floating_ips.actions.with_streaming_response.list( + "192.168.1.1", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(ActionListResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_list(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): + await async_client.gpu_droplets.floating_ips.actions.with_raw_response.list( + "", + ) diff --git a/tests/api_resources/gpu_droplets/images/__init__.py b/tests/api_resources/gpu_droplets/images/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/gpu_droplets/images/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/gpu_droplets/images/test_actions.py b/tests/api_resources/gpu_droplets/images/test_actions.py new file mode 100644 index 00000000..4d59c85b --- /dev/null +++ b/tests/api_resources/gpu_droplets/images/test_actions.py @@ -0,0 +1,321 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.shared import Action +from gradientai.types.gpu_droplets.images import ActionListResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestActions: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_overload_1(self, client: GradientAI) -> None: + action = client.gpu_droplets.images.actions.create( + image_id=62137902, + type="convert", + ) + assert_matches_type(Action, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create_overload_1(self, client: GradientAI) -> None: + response = client.gpu_droplets.images.actions.with_raw_response.create( + image_id=62137902, + type="convert", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(Action, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create_overload_1(self, client: GradientAI) -> None: + with client.gpu_droplets.images.actions.with_streaming_response.create( + image_id=62137902, + type="convert", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(Action, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_create_overload_2(self, client: GradientAI) -> None: + action = client.gpu_droplets.images.actions.create( + image_id=62137902, + region="nyc3", + type="convert", + ) + assert_matches_type(Action, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create_overload_2(self, client: GradientAI) -> None: + response = client.gpu_droplets.images.actions.with_raw_response.create( + image_id=62137902, + region="nyc3", + type="convert", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(Action, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create_overload_2(self, client: GradientAI) -> None: + with client.gpu_droplets.images.actions.with_streaming_response.create( + image_id=62137902, + region="nyc3", + type="convert", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(Action, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + action = client.gpu_droplets.images.actions.retrieve( + action_id=36804636, + image_id=62137902, + ) + assert_matches_type(Action, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.gpu_droplets.images.actions.with_raw_response.retrieve( + action_id=36804636, + image_id=62137902, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(Action, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.gpu_droplets.images.actions.with_streaming_response.retrieve( + action_id=36804636, + image_id=62137902, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(Action, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + action = client.gpu_droplets.images.actions.list( + 0, + ) + assert_matches_type(ActionListResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.gpu_droplets.images.actions.with_raw_response.list( + 0, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(ActionListResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.gpu_droplets.images.actions.with_streaming_response.list( + 0, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(ActionListResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncActions: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_create_overload_1(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.images.actions.create( + image_id=62137902, + type="convert", + ) + assert_matches_type(Action, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.images.actions.with_raw_response.create( + image_id=62137902, + type="convert", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(Action, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.images.actions.with_streaming_response.create( + image_id=62137902, + type="convert", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(Action, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_create_overload_2(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.images.actions.create( + image_id=62137902, + region="nyc3", + type="convert", + ) + assert_matches_type(Action, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.images.actions.with_raw_response.create( + image_id=62137902, + region="nyc3", + type="convert", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(Action, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.images.actions.with_streaming_response.create( + image_id=62137902, + region="nyc3", + type="convert", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(Action, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.images.actions.retrieve( + action_id=36804636, + image_id=62137902, + ) + assert_matches_type(Action, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.images.actions.with_raw_response.retrieve( + action_id=36804636, + image_id=62137902, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(Action, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.images.actions.with_streaming_response.retrieve( + action_id=36804636, + image_id=62137902, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(Action, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.images.actions.list( + 0, + ) + assert_matches_type(ActionListResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.images.actions.with_raw_response.list( + 0, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(ActionListResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.images.actions.with_streaming_response.list( + 0, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(ActionListResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/gpu_droplets/load_balancers/__init__.py b/tests/api_resources/gpu_droplets/load_balancers/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/gpu_droplets/load_balancers/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/gpu_droplets/load_balancers/test_droplets.py b/tests/api_resources/gpu_droplets/load_balancers/test_droplets.py new file mode 100644 index 00000000..333567f4 --- /dev/null +++ b/tests/api_resources/gpu_droplets/load_balancers/test_droplets.py @@ -0,0 +1,206 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestDroplets: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_add(self, client: GradientAI) -> None: + droplet = client.gpu_droplets.load_balancers.droplets.add( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + droplet_ids=[3164444, 3164445], + ) + assert droplet is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_add(self, client: GradientAI) -> None: + response = client.gpu_droplets.load_balancers.droplets.with_raw_response.add( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + droplet_ids=[3164444, 3164445], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + droplet = response.parse() + assert droplet is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_add(self, client: GradientAI) -> None: + with client.gpu_droplets.load_balancers.droplets.with_streaming_response.add( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + droplet_ids=[3164444, 3164445], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + droplet = response.parse() + assert droplet is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_add(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): + client.gpu_droplets.load_balancers.droplets.with_raw_response.add( + lb_id="", + droplet_ids=[3164444, 3164445], + ) + + @pytest.mark.skip() + @parametrize + def test_method_remove(self, client: GradientAI) -> None: + droplet = client.gpu_droplets.load_balancers.droplets.remove( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + droplet_ids=[3164444, 3164445], + ) + assert droplet is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_remove(self, client: GradientAI) -> None: + response = client.gpu_droplets.load_balancers.droplets.with_raw_response.remove( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + droplet_ids=[3164444, 3164445], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + droplet = response.parse() + assert droplet is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_remove(self, client: GradientAI) -> None: + with client.gpu_droplets.load_balancers.droplets.with_streaming_response.remove( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + droplet_ids=[3164444, 3164445], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + droplet = response.parse() + assert droplet is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_remove(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): + client.gpu_droplets.load_balancers.droplets.with_raw_response.remove( + lb_id="", + droplet_ids=[3164444, 3164445], + ) + + +class TestAsyncDroplets: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_add(self, async_client: AsyncGradientAI) -> None: + droplet = await async_client.gpu_droplets.load_balancers.droplets.add( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + droplet_ids=[3164444, 3164445], + ) + assert droplet is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_add(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.load_balancers.droplets.with_raw_response.add( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + droplet_ids=[3164444, 3164445], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + droplet = await response.parse() + assert droplet is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_add(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.load_balancers.droplets.with_streaming_response.add( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + droplet_ids=[3164444, 3164445], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + droplet = await response.parse() + assert droplet is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_add(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): + await async_client.gpu_droplets.load_balancers.droplets.with_raw_response.add( + lb_id="", + droplet_ids=[3164444, 3164445], + ) + + @pytest.mark.skip() + @parametrize + async def test_method_remove(self, async_client: AsyncGradientAI) -> None: + droplet = await async_client.gpu_droplets.load_balancers.droplets.remove( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + droplet_ids=[3164444, 3164445], + ) + assert droplet is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_remove(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.load_balancers.droplets.with_raw_response.remove( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + droplet_ids=[3164444, 3164445], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + droplet = await response.parse() + assert droplet is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_remove(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.load_balancers.droplets.with_streaming_response.remove( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + droplet_ids=[3164444, 3164445], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + droplet = await response.parse() + assert droplet is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_remove(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): + await async_client.gpu_droplets.load_balancers.droplets.with_raw_response.remove( + lb_id="", + droplet_ids=[3164444, 3164445], + ) diff --git a/tests/api_resources/gpu_droplets/load_balancers/test_forwarding_rules.py b/tests/api_resources/gpu_droplets/load_balancers/test_forwarding_rules.py new file mode 100644 index 00000000..ec6f7838 --- /dev/null +++ b/tests/api_resources/gpu_droplets/load_balancers/test_forwarding_rules.py @@ -0,0 +1,318 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestForwardingRules: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_add(self, client: GradientAI) -> None: + forwarding_rule = client.gpu_droplets.load_balancers.forwarding_rules.add( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + assert forwarding_rule is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_add(self, client: GradientAI) -> None: + response = client.gpu_droplets.load_balancers.forwarding_rules.with_raw_response.add( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + forwarding_rule = response.parse() + assert forwarding_rule is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_add(self, client: GradientAI) -> None: + with client.gpu_droplets.load_balancers.forwarding_rules.with_streaming_response.add( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + forwarding_rule = response.parse() + assert forwarding_rule is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_add(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): + client.gpu_droplets.load_balancers.forwarding_rules.with_raw_response.add( + lb_id="", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + + @pytest.mark.skip() + @parametrize + def test_method_remove(self, client: GradientAI) -> None: + forwarding_rule = client.gpu_droplets.load_balancers.forwarding_rules.remove( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + assert forwarding_rule is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_remove(self, client: GradientAI) -> None: + response = client.gpu_droplets.load_balancers.forwarding_rules.with_raw_response.remove( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + forwarding_rule = response.parse() + assert forwarding_rule is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_remove(self, client: GradientAI) -> None: + with client.gpu_droplets.load_balancers.forwarding_rules.with_streaming_response.remove( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + forwarding_rule = response.parse() + assert forwarding_rule is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_remove(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): + client.gpu_droplets.load_balancers.forwarding_rules.with_raw_response.remove( + lb_id="", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + + +class TestAsyncForwardingRules: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_add(self, async_client: AsyncGradientAI) -> None: + forwarding_rule = await async_client.gpu_droplets.load_balancers.forwarding_rules.add( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + assert forwarding_rule is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_add(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.load_balancers.forwarding_rules.with_raw_response.add( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + forwarding_rule = await response.parse() + assert forwarding_rule is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_add(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.load_balancers.forwarding_rules.with_streaming_response.add( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + forwarding_rule = await response.parse() + assert forwarding_rule is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_add(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): + await async_client.gpu_droplets.load_balancers.forwarding_rules.with_raw_response.add( + lb_id="", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + + @pytest.mark.skip() + @parametrize + async def test_method_remove(self, async_client: AsyncGradientAI) -> None: + forwarding_rule = await async_client.gpu_droplets.load_balancers.forwarding_rules.remove( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + assert forwarding_rule is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_remove(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.load_balancers.forwarding_rules.with_raw_response.remove( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + forwarding_rule = await response.parse() + assert forwarding_rule is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_remove(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.load_balancers.forwarding_rules.with_streaming_response.remove( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + forwarding_rule = await response.parse() + assert forwarding_rule is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_remove(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): + await async_client.gpu_droplets.load_balancers.forwarding_rules.with_raw_response.remove( + lb_id="", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) diff --git a/tests/api_resources/gpu_droplets/test_actions.py b/tests/api_resources/gpu_droplets/test_actions.py new file mode 100644 index 00000000..5e443dd8 --- /dev/null +++ b/tests/api_resources/gpu_droplets/test_actions.py @@ -0,0 +1,1209 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.gpu_droplets import ( + ActionListResponse, + ActionInitiateResponse, + ActionRetrieveResponse, + ActionBulkInitiateResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestActions: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + action = client.gpu_droplets.actions.retrieve( + action_id=36804636, + droplet_id=3164444, + ) + assert_matches_type(ActionRetrieveResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.gpu_droplets.actions.with_raw_response.retrieve( + action_id=36804636, + droplet_id=3164444, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(ActionRetrieveResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.gpu_droplets.actions.with_streaming_response.retrieve( + action_id=36804636, + droplet_id=3164444, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(ActionRetrieveResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + action = client.gpu_droplets.actions.list( + droplet_id=3164444, + ) + assert_matches_type(ActionListResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + action = client.gpu_droplets.actions.list( + droplet_id=3164444, + page=1, + per_page=1, + ) + assert_matches_type(ActionListResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.gpu_droplets.actions.with_raw_response.list( + droplet_id=3164444, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(ActionListResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.gpu_droplets.actions.with_streaming_response.list( + droplet_id=3164444, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(ActionListResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_bulk_initiate_overload_1(self, client: GradientAI) -> None: + action = client.gpu_droplets.actions.bulk_initiate( + type="reboot", + ) + assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_bulk_initiate_with_all_params_overload_1(self, client: GradientAI) -> None: + action = client.gpu_droplets.actions.bulk_initiate( + type="reboot", + tag_name="tag_name", + ) + assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_bulk_initiate_overload_1(self, client: GradientAI) -> None: + response = client.gpu_droplets.actions.with_raw_response.bulk_initiate( + type="reboot", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_bulk_initiate_overload_1(self, client: GradientAI) -> None: + with client.gpu_droplets.actions.with_streaming_response.bulk_initiate( + type="reboot", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_bulk_initiate_overload_2(self, client: GradientAI) -> None: + action = client.gpu_droplets.actions.bulk_initiate( + type="reboot", + ) + assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_bulk_initiate_with_all_params_overload_2(self, client: GradientAI) -> None: + action = client.gpu_droplets.actions.bulk_initiate( + type="reboot", + tag_name="tag_name", + name="Nifty New Snapshot", + ) + assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_bulk_initiate_overload_2(self, client: GradientAI) -> None: + response = client.gpu_droplets.actions.with_raw_response.bulk_initiate( + type="reboot", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_bulk_initiate_overload_2(self, client: GradientAI) -> None: + with client.gpu_droplets.actions.with_streaming_response.bulk_initiate( + type="reboot", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_initiate_overload_1(self, client: GradientAI) -> None: + action = client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_initiate_overload_1(self, client: GradientAI) -> None: + response = client.gpu_droplets.actions.with_raw_response.initiate( + droplet_id=3164444, + type="reboot", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_initiate_overload_1(self, client: GradientAI) -> None: + with client.gpu_droplets.actions.with_streaming_response.initiate( + droplet_id=3164444, + type="reboot", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_initiate_overload_2(self, client: GradientAI) -> None: + action = client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="enable_backups", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_initiate_with_all_params_overload_2(self, client: GradientAI) -> None: + action = client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="enable_backups", + backup_policy={ + "hour": 20, + "plan": "daily", + "weekday": "SUN", + }, + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_initiate_overload_2(self, client: GradientAI) -> None: + response = client.gpu_droplets.actions.with_raw_response.initiate( + droplet_id=3164444, + type="enable_backups", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_initiate_overload_2(self, client: GradientAI) -> None: + with client.gpu_droplets.actions.with_streaming_response.initiate( + droplet_id=3164444, + type="enable_backups", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_initiate_overload_3(self, client: GradientAI) -> None: + action = client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="enable_backups", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_initiate_with_all_params_overload_3(self, client: GradientAI) -> None: + action = client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="enable_backups", + backup_policy={ + "hour": 20, + "plan": "weekly", + "weekday": "SUN", + }, + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_initiate_overload_3(self, client: GradientAI) -> None: + response = client.gpu_droplets.actions.with_raw_response.initiate( + droplet_id=3164444, + type="enable_backups", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_initiate_overload_3(self, client: GradientAI) -> None: + with client.gpu_droplets.actions.with_streaming_response.initiate( + droplet_id=3164444, + type="enable_backups", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_initiate_overload_4(self, client: GradientAI) -> None: + action = client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_initiate_with_all_params_overload_4(self, client: GradientAI) -> None: + action = client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + image=12389723, + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_initiate_overload_4(self, client: GradientAI) -> None: + response = client.gpu_droplets.actions.with_raw_response.initiate( + droplet_id=3164444, + type="reboot", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_initiate_overload_4(self, client: GradientAI) -> None: + with client.gpu_droplets.actions.with_streaming_response.initiate( + droplet_id=3164444, + type="reboot", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_initiate_overload_5(self, client: GradientAI) -> None: + action = client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_initiate_with_all_params_overload_5(self, client: GradientAI) -> None: + action = client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + disk=True, + size="s-2vcpu-2gb", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_initiate_overload_5(self, client: GradientAI) -> None: + response = client.gpu_droplets.actions.with_raw_response.initiate( + droplet_id=3164444, + type="reboot", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_initiate_overload_5(self, client: GradientAI) -> None: + with client.gpu_droplets.actions.with_streaming_response.initiate( + droplet_id=3164444, + type="reboot", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_initiate_overload_6(self, client: GradientAI) -> None: + action = client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_initiate_with_all_params_overload_6(self, client: GradientAI) -> None: + action = client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + image="ubuntu-20-04-x64", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_initiate_overload_6(self, client: GradientAI) -> None: + response = client.gpu_droplets.actions.with_raw_response.initiate( + droplet_id=3164444, + type="reboot", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_initiate_overload_6(self, client: GradientAI) -> None: + with client.gpu_droplets.actions.with_streaming_response.initiate( + droplet_id=3164444, + type="reboot", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_initiate_overload_7(self, client: GradientAI) -> None: + action = client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_initiate_with_all_params_overload_7(self, client: GradientAI) -> None: + action = client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + name="nifty-new-name", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_initiate_overload_7(self, client: GradientAI) -> None: + response = client.gpu_droplets.actions.with_raw_response.initiate( + droplet_id=3164444, + type="reboot", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_initiate_overload_7(self, client: GradientAI) -> None: + with client.gpu_droplets.actions.with_streaming_response.initiate( + droplet_id=3164444, + type="reboot", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_initiate_overload_8(self, client: GradientAI) -> None: + action = client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_initiate_with_all_params_overload_8(self, client: GradientAI) -> None: + action = client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + kernel=12389723, + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_initiate_overload_8(self, client: GradientAI) -> None: + response = client.gpu_droplets.actions.with_raw_response.initiate( + droplet_id=3164444, + type="reboot", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_initiate_overload_8(self, client: GradientAI) -> None: + with client.gpu_droplets.actions.with_streaming_response.initiate( + droplet_id=3164444, + type="reboot", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_initiate_overload_9(self, client: GradientAI) -> None: + action = client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_initiate_with_all_params_overload_9(self, client: GradientAI) -> None: + action = client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + name="Nifty New Snapshot", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_initiate_overload_9(self, client: GradientAI) -> None: + response = client.gpu_droplets.actions.with_raw_response.initiate( + droplet_id=3164444, + type="reboot", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_initiate_overload_9(self, client: GradientAI) -> None: + with client.gpu_droplets.actions.with_streaming_response.initiate( + droplet_id=3164444, + type="reboot", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncActions: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.actions.retrieve( + action_id=36804636, + droplet_id=3164444, + ) + assert_matches_type(ActionRetrieveResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.actions.with_raw_response.retrieve( + action_id=36804636, + droplet_id=3164444, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(ActionRetrieveResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.actions.with_streaming_response.retrieve( + action_id=36804636, + droplet_id=3164444, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(ActionRetrieveResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.actions.list( + droplet_id=3164444, + ) + assert_matches_type(ActionListResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.actions.list( + droplet_id=3164444, + page=1, + per_page=1, + ) + assert_matches_type(ActionListResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.actions.with_raw_response.list( + droplet_id=3164444, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(ActionListResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.actions.with_streaming_response.list( + droplet_id=3164444, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(ActionListResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_bulk_initiate_overload_1(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.actions.bulk_initiate( + type="reboot", + ) + assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_bulk_initiate_with_all_params_overload_1(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.actions.bulk_initiate( + type="reboot", + tag_name="tag_name", + ) + assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_bulk_initiate_overload_1(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.actions.with_raw_response.bulk_initiate( + type="reboot", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_bulk_initiate_overload_1(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.actions.with_streaming_response.bulk_initiate( + type="reboot", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_bulk_initiate_overload_2(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.actions.bulk_initiate( + type="reboot", + ) + assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_bulk_initiate_with_all_params_overload_2(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.actions.bulk_initiate( + type="reboot", + tag_name="tag_name", + name="Nifty New Snapshot", + ) + assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_bulk_initiate_overload_2(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.actions.with_raw_response.bulk_initiate( + type="reboot", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_bulk_initiate_overload_2(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.actions.with_streaming_response.bulk_initiate( + type="reboot", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(ActionBulkInitiateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_overload_1(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_initiate_overload_1(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.actions.with_raw_response.initiate( + droplet_id=3164444, + type="reboot", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_initiate_overload_1(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.actions.with_streaming_response.initiate( + droplet_id=3164444, + type="reboot", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_overload_2(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="enable_backups", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_with_all_params_overload_2(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="enable_backups", + backup_policy={ + "hour": 20, + "plan": "daily", + "weekday": "SUN", + }, + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_initiate_overload_2(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.actions.with_raw_response.initiate( + droplet_id=3164444, + type="enable_backups", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_initiate_overload_2(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.actions.with_streaming_response.initiate( + droplet_id=3164444, + type="enable_backups", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_overload_3(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="enable_backups", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_with_all_params_overload_3(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="enable_backups", + backup_policy={ + "hour": 20, + "plan": "weekly", + "weekday": "SUN", + }, + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_initiate_overload_3(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.actions.with_raw_response.initiate( + droplet_id=3164444, + type="enable_backups", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_initiate_overload_3(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.actions.with_streaming_response.initiate( + droplet_id=3164444, + type="enable_backups", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_overload_4(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_with_all_params_overload_4(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + image=12389723, + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_initiate_overload_4(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.actions.with_raw_response.initiate( + droplet_id=3164444, + type="reboot", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_initiate_overload_4(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.actions.with_streaming_response.initiate( + droplet_id=3164444, + type="reboot", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_overload_5(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_with_all_params_overload_5(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + disk=True, + size="s-2vcpu-2gb", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_initiate_overload_5(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.actions.with_raw_response.initiate( + droplet_id=3164444, + type="reboot", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_initiate_overload_5(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.actions.with_streaming_response.initiate( + droplet_id=3164444, + type="reboot", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_overload_6(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_with_all_params_overload_6(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + image="ubuntu-20-04-x64", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_initiate_overload_6(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.actions.with_raw_response.initiate( + droplet_id=3164444, + type="reboot", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_initiate_overload_6(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.actions.with_streaming_response.initiate( + droplet_id=3164444, + type="reboot", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_overload_7(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_with_all_params_overload_7(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + name="nifty-new-name", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_initiate_overload_7(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.actions.with_raw_response.initiate( + droplet_id=3164444, + type="reboot", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_initiate_overload_7(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.actions.with_streaming_response.initiate( + droplet_id=3164444, + type="reboot", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_overload_8(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_with_all_params_overload_8(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + kernel=12389723, + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_initiate_overload_8(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.actions.with_raw_response.initiate( + droplet_id=3164444, + type="reboot", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_initiate_overload_8(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.actions.with_streaming_response.initiate( + droplet_id=3164444, + type="reboot", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_overload_9(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_with_all_params_overload_9(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.actions.initiate( + droplet_id=3164444, + type="reboot", + name="Nifty New Snapshot", + ) + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_initiate_overload_9(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.actions.with_raw_response.initiate( + droplet_id=3164444, + type="reboot", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_initiate_overload_9(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.actions.with_streaming_response.initiate( + droplet_id=3164444, + type="reboot", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(ActionInitiateResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/gpu_droplets/test_autoscale.py b/tests/api_resources/gpu_droplets/test_autoscale.py new file mode 100644 index 00000000..42164666 --- /dev/null +++ b/tests/api_resources/gpu_droplets/test_autoscale.py @@ -0,0 +1,953 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.gpu_droplets import ( + AutoscaleListResponse, + AutoscaleCreateResponse, + AutoscaleUpdateResponse, + AutoscaleRetrieveResponse, + AutoscaleListHistoryResponse, + AutoscaleListMembersResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestAutoscale: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: GradientAI) -> None: + autoscale = client.gpu_droplets.autoscale.create( + config={ + "max_instances": 5, + "min_instances": 1, + }, + droplet_template={ + "image": "ubuntu-20-04-x64", + "region": "nyc3", + "size": "c-2", + "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], + }, + name="my-autoscale-pool", + ) + assert_matches_type(AutoscaleCreateResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: GradientAI) -> None: + autoscale = client.gpu_droplets.autoscale.create( + config={ + "max_instances": 5, + "min_instances": 1, + "cooldown_minutes": 10, + "target_cpu_utilization": 0.5, + "target_memory_utilization": 0.6, + }, + droplet_template={ + "image": "ubuntu-20-04-x64", + "region": "nyc3", + "size": "c-2", + "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], + "ipv6": True, + "name": "example.com", + "project_id": "746c6152-2fa2-11ed-92d3-27aaa54e4988", + "tags": ["env:prod", "web"], + "user_data": "#cloud-config\nruncmd:\n - touch /test.txt\n", + "vpc_uuid": "760e09ef-dc84-11e8-981e-3cfdfeaae000", + "with_droplet_agent": True, + }, + name="my-autoscale-pool", + ) + assert_matches_type(AutoscaleCreateResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: GradientAI) -> None: + response = client.gpu_droplets.autoscale.with_raw_response.create( + config={ + "max_instances": 5, + "min_instances": 1, + }, + droplet_template={ + "image": "ubuntu-20-04-x64", + "region": "nyc3", + "size": "c-2", + "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], + }, + name="my-autoscale-pool", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + autoscale = response.parse() + assert_matches_type(AutoscaleCreateResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: GradientAI) -> None: + with client.gpu_droplets.autoscale.with_streaming_response.create( + config={ + "max_instances": 5, + "min_instances": 1, + }, + droplet_template={ + "image": "ubuntu-20-04-x64", + "region": "nyc3", + "size": "c-2", + "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], + }, + name="my-autoscale-pool", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + autoscale = response.parse() + assert_matches_type(AutoscaleCreateResponse, autoscale, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + autoscale = client.gpu_droplets.autoscale.retrieve( + "autoscale_pool_id", + ) + assert_matches_type(AutoscaleRetrieveResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.gpu_droplets.autoscale.with_raw_response.retrieve( + "autoscale_pool_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + autoscale = response.parse() + assert_matches_type(AutoscaleRetrieveResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.gpu_droplets.autoscale.with_streaming_response.retrieve( + "autoscale_pool_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + autoscale = response.parse() + assert_matches_type(AutoscaleRetrieveResponse, autoscale, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): + client.gpu_droplets.autoscale.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: GradientAI) -> None: + autoscale = client.gpu_droplets.autoscale.update( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + config={"target_number_instances": 2}, + droplet_template={ + "image": "ubuntu-20-04-x64", + "region": "nyc3", + "size": "c-2", + "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], + }, + name="my-autoscale-pool", + ) + assert_matches_type(AutoscaleUpdateResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params(self, client: GradientAI) -> None: + autoscale = client.gpu_droplets.autoscale.update( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + config={"target_number_instances": 2}, + droplet_template={ + "image": "ubuntu-20-04-x64", + "region": "nyc3", + "size": "c-2", + "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], + "ipv6": True, + "name": "example.com", + "project_id": "746c6152-2fa2-11ed-92d3-27aaa54e4988", + "tags": ["env:prod", "web"], + "user_data": "#cloud-config\nruncmd:\n - touch /test.txt\n", + "vpc_uuid": "760e09ef-dc84-11e8-981e-3cfdfeaae000", + "with_droplet_agent": True, + }, + name="my-autoscale-pool", + ) + assert_matches_type(AutoscaleUpdateResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: GradientAI) -> None: + response = client.gpu_droplets.autoscale.with_raw_response.update( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + config={"target_number_instances": 2}, + droplet_template={ + "image": "ubuntu-20-04-x64", + "region": "nyc3", + "size": "c-2", + "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], + }, + name="my-autoscale-pool", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + autoscale = response.parse() + assert_matches_type(AutoscaleUpdateResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: GradientAI) -> None: + with client.gpu_droplets.autoscale.with_streaming_response.update( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + config={"target_number_instances": 2}, + droplet_template={ + "image": "ubuntu-20-04-x64", + "region": "nyc3", + "size": "c-2", + "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], + }, + name="my-autoscale-pool", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + autoscale = response.parse() + assert_matches_type(AutoscaleUpdateResponse, autoscale, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): + client.gpu_droplets.autoscale.with_raw_response.update( + autoscale_pool_id="", + config={"target_number_instances": 2}, + droplet_template={ + "image": "ubuntu-20-04-x64", + "region": "nyc3", + "size": "c-2", + "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], + }, + name="my-autoscale-pool", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + autoscale = client.gpu_droplets.autoscale.list() + assert_matches_type(AutoscaleListResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + autoscale = client.gpu_droplets.autoscale.list( + name="name", + page=1, + per_page=1, + ) + assert_matches_type(AutoscaleListResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.gpu_droplets.autoscale.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + autoscale = response.parse() + assert_matches_type(AutoscaleListResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.gpu_droplets.autoscale.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + autoscale = response.parse() + assert_matches_type(AutoscaleListResponse, autoscale, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: GradientAI) -> None: + autoscale = client.gpu_droplets.autoscale.delete( + "autoscale_pool_id", + ) + assert autoscale is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: GradientAI) -> None: + response = client.gpu_droplets.autoscale.with_raw_response.delete( + "autoscale_pool_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + autoscale = response.parse() + assert autoscale is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: GradientAI) -> None: + with client.gpu_droplets.autoscale.with_streaming_response.delete( + "autoscale_pool_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + autoscale = response.parse() + assert autoscale is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): + client.gpu_droplets.autoscale.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_delete_dangerous(self, client: GradientAI) -> None: + autoscale = client.gpu_droplets.autoscale.delete_dangerous( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + x_dangerous=True, + ) + assert autoscale is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete_dangerous(self, client: GradientAI) -> None: + response = client.gpu_droplets.autoscale.with_raw_response.delete_dangerous( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + x_dangerous=True, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + autoscale = response.parse() + assert autoscale is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete_dangerous(self, client: GradientAI) -> None: + with client.gpu_droplets.autoscale.with_streaming_response.delete_dangerous( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + x_dangerous=True, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + autoscale = response.parse() + assert autoscale is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete_dangerous(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): + client.gpu_droplets.autoscale.with_raw_response.delete_dangerous( + autoscale_pool_id="", + x_dangerous=True, + ) + + @pytest.mark.skip() + @parametrize + def test_method_list_history(self, client: GradientAI) -> None: + autoscale = client.gpu_droplets.autoscale.list_history( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + ) + assert_matches_type(AutoscaleListHistoryResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_history_with_all_params(self, client: GradientAI) -> None: + autoscale = client.gpu_droplets.autoscale.list_history( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + page=1, + per_page=1, + ) + assert_matches_type(AutoscaleListHistoryResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list_history(self, client: GradientAI) -> None: + response = client.gpu_droplets.autoscale.with_raw_response.list_history( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + autoscale = response.parse() + assert_matches_type(AutoscaleListHistoryResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list_history(self, client: GradientAI) -> None: + with client.gpu_droplets.autoscale.with_streaming_response.list_history( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + autoscale = response.parse() + assert_matches_type(AutoscaleListHistoryResponse, autoscale, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_list_history(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): + client.gpu_droplets.autoscale.with_raw_response.list_history( + autoscale_pool_id="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list_members(self, client: GradientAI) -> None: + autoscale = client.gpu_droplets.autoscale.list_members( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + ) + assert_matches_type(AutoscaleListMembersResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_members_with_all_params(self, client: GradientAI) -> None: + autoscale = client.gpu_droplets.autoscale.list_members( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + page=1, + per_page=1, + ) + assert_matches_type(AutoscaleListMembersResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list_members(self, client: GradientAI) -> None: + response = client.gpu_droplets.autoscale.with_raw_response.list_members( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + autoscale = response.parse() + assert_matches_type(AutoscaleListMembersResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list_members(self, client: GradientAI) -> None: + with client.gpu_droplets.autoscale.with_streaming_response.list_members( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + autoscale = response.parse() + assert_matches_type(AutoscaleListMembersResponse, autoscale, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_list_members(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): + client.gpu_droplets.autoscale.with_raw_response.list_members( + autoscale_pool_id="", + ) + + +class TestAsyncAutoscale: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncGradientAI) -> None: + autoscale = await async_client.gpu_droplets.autoscale.create( + config={ + "max_instances": 5, + "min_instances": 1, + }, + droplet_template={ + "image": "ubuntu-20-04-x64", + "region": "nyc3", + "size": "c-2", + "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], + }, + name="my-autoscale-pool", + ) + assert_matches_type(AutoscaleCreateResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + autoscale = await async_client.gpu_droplets.autoscale.create( + config={ + "max_instances": 5, + "min_instances": 1, + "cooldown_minutes": 10, + "target_cpu_utilization": 0.5, + "target_memory_utilization": 0.6, + }, + droplet_template={ + "image": "ubuntu-20-04-x64", + "region": "nyc3", + "size": "c-2", + "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], + "ipv6": True, + "name": "example.com", + "project_id": "746c6152-2fa2-11ed-92d3-27aaa54e4988", + "tags": ["env:prod", "web"], + "user_data": "#cloud-config\nruncmd:\n - touch /test.txt\n", + "vpc_uuid": "760e09ef-dc84-11e8-981e-3cfdfeaae000", + "with_droplet_agent": True, + }, + name="my-autoscale-pool", + ) + assert_matches_type(AutoscaleCreateResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.autoscale.with_raw_response.create( + config={ + "max_instances": 5, + "min_instances": 1, + }, + droplet_template={ + "image": "ubuntu-20-04-x64", + "region": "nyc3", + "size": "c-2", + "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], + }, + name="my-autoscale-pool", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + autoscale = await response.parse() + assert_matches_type(AutoscaleCreateResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.autoscale.with_streaming_response.create( + config={ + "max_instances": 5, + "min_instances": 1, + }, + droplet_template={ + "image": "ubuntu-20-04-x64", + "region": "nyc3", + "size": "c-2", + "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], + }, + name="my-autoscale-pool", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + autoscale = await response.parse() + assert_matches_type(AutoscaleCreateResponse, autoscale, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + autoscale = await async_client.gpu_droplets.autoscale.retrieve( + "autoscale_pool_id", + ) + assert_matches_type(AutoscaleRetrieveResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.autoscale.with_raw_response.retrieve( + "autoscale_pool_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + autoscale = await response.parse() + assert_matches_type(AutoscaleRetrieveResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.autoscale.with_streaming_response.retrieve( + "autoscale_pool_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + autoscale = await response.parse() + assert_matches_type(AutoscaleRetrieveResponse, autoscale, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): + await async_client.gpu_droplets.autoscale.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncGradientAI) -> None: + autoscale = await async_client.gpu_droplets.autoscale.update( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + config={"target_number_instances": 2}, + droplet_template={ + "image": "ubuntu-20-04-x64", + "region": "nyc3", + "size": "c-2", + "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], + }, + name="my-autoscale-pool", + ) + assert_matches_type(AutoscaleUpdateResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: + autoscale = await async_client.gpu_droplets.autoscale.update( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + config={"target_number_instances": 2}, + droplet_template={ + "image": "ubuntu-20-04-x64", + "region": "nyc3", + "size": "c-2", + "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], + "ipv6": True, + "name": "example.com", + "project_id": "746c6152-2fa2-11ed-92d3-27aaa54e4988", + "tags": ["env:prod", "web"], + "user_data": "#cloud-config\nruncmd:\n - touch /test.txt\n", + "vpc_uuid": "760e09ef-dc84-11e8-981e-3cfdfeaae000", + "with_droplet_agent": True, + }, + name="my-autoscale-pool", + ) + assert_matches_type(AutoscaleUpdateResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.autoscale.with_raw_response.update( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + config={"target_number_instances": 2}, + droplet_template={ + "image": "ubuntu-20-04-x64", + "region": "nyc3", + "size": "c-2", + "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], + }, + name="my-autoscale-pool", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + autoscale = await response.parse() + assert_matches_type(AutoscaleUpdateResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.autoscale.with_streaming_response.update( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + config={"target_number_instances": 2}, + droplet_template={ + "image": "ubuntu-20-04-x64", + "region": "nyc3", + "size": "c-2", + "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], + }, + name="my-autoscale-pool", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + autoscale = await response.parse() + assert_matches_type(AutoscaleUpdateResponse, autoscale, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): + await async_client.gpu_droplets.autoscale.with_raw_response.update( + autoscale_pool_id="", + config={"target_number_instances": 2}, + droplet_template={ + "image": "ubuntu-20-04-x64", + "region": "nyc3", + "size": "c-2", + "ssh_keys": ["3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], + }, + name="my-autoscale-pool", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + autoscale = await async_client.gpu_droplets.autoscale.list() + assert_matches_type(AutoscaleListResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + autoscale = await async_client.gpu_droplets.autoscale.list( + name="name", + page=1, + per_page=1, + ) + assert_matches_type(AutoscaleListResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.autoscale.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + autoscale = await response.parse() + assert_matches_type(AutoscaleListResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.autoscale.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + autoscale = await response.parse() + assert_matches_type(AutoscaleListResponse, autoscale, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + autoscale = await async_client.gpu_droplets.autoscale.delete( + "autoscale_pool_id", + ) + assert autoscale is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.autoscale.with_raw_response.delete( + "autoscale_pool_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + autoscale = await response.parse() + assert autoscale is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.autoscale.with_streaming_response.delete( + "autoscale_pool_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + autoscale = await response.parse() + assert autoscale is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): + await async_client.gpu_droplets.autoscale.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_delete_dangerous(self, async_client: AsyncGradientAI) -> None: + autoscale = await async_client.gpu_droplets.autoscale.delete_dangerous( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + x_dangerous=True, + ) + assert autoscale is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete_dangerous(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.autoscale.with_raw_response.delete_dangerous( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + x_dangerous=True, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + autoscale = await response.parse() + assert autoscale is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete_dangerous(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.autoscale.with_streaming_response.delete_dangerous( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + x_dangerous=True, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + autoscale = await response.parse() + assert autoscale is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete_dangerous(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): + await async_client.gpu_droplets.autoscale.with_raw_response.delete_dangerous( + autoscale_pool_id="", + x_dangerous=True, + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list_history(self, async_client: AsyncGradientAI) -> None: + autoscale = await async_client.gpu_droplets.autoscale.list_history( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + ) + assert_matches_type(AutoscaleListHistoryResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_history_with_all_params(self, async_client: AsyncGradientAI) -> None: + autoscale = await async_client.gpu_droplets.autoscale.list_history( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + page=1, + per_page=1, + ) + assert_matches_type(AutoscaleListHistoryResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list_history(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.autoscale.with_raw_response.list_history( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + autoscale = await response.parse() + assert_matches_type(AutoscaleListHistoryResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list_history(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.autoscale.with_streaming_response.list_history( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + autoscale = await response.parse() + assert_matches_type(AutoscaleListHistoryResponse, autoscale, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_list_history(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): + await async_client.gpu_droplets.autoscale.with_raw_response.list_history( + autoscale_pool_id="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list_members(self, async_client: AsyncGradientAI) -> None: + autoscale = await async_client.gpu_droplets.autoscale.list_members( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + ) + assert_matches_type(AutoscaleListMembersResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_members_with_all_params(self, async_client: AsyncGradientAI) -> None: + autoscale = await async_client.gpu_droplets.autoscale.list_members( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + page=1, + per_page=1, + ) + assert_matches_type(AutoscaleListMembersResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list_members(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.autoscale.with_raw_response.list_members( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + autoscale = await response.parse() + assert_matches_type(AutoscaleListMembersResponse, autoscale, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list_members(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.autoscale.with_streaming_response.list_members( + autoscale_pool_id="0d3db13e-a604-4944-9827-7ec2642d32ac", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + autoscale = await response.parse() + assert_matches_type(AutoscaleListMembersResponse, autoscale, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_list_members(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `autoscale_pool_id` but received ''"): + await async_client.gpu_droplets.autoscale.with_raw_response.list_members( + autoscale_pool_id="", + ) diff --git a/tests/api_resources/gpu_droplets/test_backups.py b/tests/api_resources/gpu_droplets/test_backups.py new file mode 100644 index 00000000..f8f72140 --- /dev/null +++ b/tests/api_resources/gpu_droplets/test_backups.py @@ -0,0 +1,315 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.gpu_droplets import ( + BackupListResponse, + BackupListPoliciesResponse, + BackupRetrievePolicyResponse, + BackupListSupportedPoliciesResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestBackups: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + backup = client.gpu_droplets.backups.list( + droplet_id=3164444, + ) + assert_matches_type(BackupListResponse, backup, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + backup = client.gpu_droplets.backups.list( + droplet_id=3164444, + page=1, + per_page=1, + ) + assert_matches_type(BackupListResponse, backup, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.gpu_droplets.backups.with_raw_response.list( + droplet_id=3164444, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + backup = response.parse() + assert_matches_type(BackupListResponse, backup, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.gpu_droplets.backups.with_streaming_response.list( + droplet_id=3164444, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + backup = response.parse() + assert_matches_type(BackupListResponse, backup, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_list_policies(self, client: GradientAI) -> None: + backup = client.gpu_droplets.backups.list_policies() + assert_matches_type(BackupListPoliciesResponse, backup, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_policies_with_all_params(self, client: GradientAI) -> None: + backup = client.gpu_droplets.backups.list_policies( + page=1, + per_page=1, + ) + assert_matches_type(BackupListPoliciesResponse, backup, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list_policies(self, client: GradientAI) -> None: + response = client.gpu_droplets.backups.with_raw_response.list_policies() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + backup = response.parse() + assert_matches_type(BackupListPoliciesResponse, backup, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list_policies(self, client: GradientAI) -> None: + with client.gpu_droplets.backups.with_streaming_response.list_policies() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + backup = response.parse() + assert_matches_type(BackupListPoliciesResponse, backup, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_list_supported_policies(self, client: GradientAI) -> None: + backup = client.gpu_droplets.backups.list_supported_policies() + assert_matches_type(BackupListSupportedPoliciesResponse, backup, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list_supported_policies(self, client: GradientAI) -> None: + response = client.gpu_droplets.backups.with_raw_response.list_supported_policies() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + backup = response.parse() + assert_matches_type(BackupListSupportedPoliciesResponse, backup, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list_supported_policies(self, client: GradientAI) -> None: + with client.gpu_droplets.backups.with_streaming_response.list_supported_policies() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + backup = response.parse() + assert_matches_type(BackupListSupportedPoliciesResponse, backup, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve_policy(self, client: GradientAI) -> None: + backup = client.gpu_droplets.backups.retrieve_policy( + 1, + ) + assert_matches_type(BackupRetrievePolicyResponse, backup, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve_policy(self, client: GradientAI) -> None: + response = client.gpu_droplets.backups.with_raw_response.retrieve_policy( + 1, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + backup = response.parse() + assert_matches_type(BackupRetrievePolicyResponse, backup, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve_policy(self, client: GradientAI) -> None: + with client.gpu_droplets.backups.with_streaming_response.retrieve_policy( + 1, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + backup = response.parse() + assert_matches_type(BackupRetrievePolicyResponse, backup, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncBackups: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + backup = await async_client.gpu_droplets.backups.list( + droplet_id=3164444, + ) + assert_matches_type(BackupListResponse, backup, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + backup = await async_client.gpu_droplets.backups.list( + droplet_id=3164444, + page=1, + per_page=1, + ) + assert_matches_type(BackupListResponse, backup, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.backups.with_raw_response.list( + droplet_id=3164444, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + backup = await response.parse() + assert_matches_type(BackupListResponse, backup, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.backups.with_streaming_response.list( + droplet_id=3164444, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + backup = await response.parse() + assert_matches_type(BackupListResponse, backup, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_list_policies(self, async_client: AsyncGradientAI) -> None: + backup = await async_client.gpu_droplets.backups.list_policies() + assert_matches_type(BackupListPoliciesResponse, backup, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_policies_with_all_params(self, async_client: AsyncGradientAI) -> None: + backup = await async_client.gpu_droplets.backups.list_policies( + page=1, + per_page=1, + ) + assert_matches_type(BackupListPoliciesResponse, backup, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list_policies(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.backups.with_raw_response.list_policies() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + backup = await response.parse() + assert_matches_type(BackupListPoliciesResponse, backup, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list_policies(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.backups.with_streaming_response.list_policies() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + backup = await response.parse() + assert_matches_type(BackupListPoliciesResponse, backup, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_list_supported_policies(self, async_client: AsyncGradientAI) -> None: + backup = await async_client.gpu_droplets.backups.list_supported_policies() + assert_matches_type(BackupListSupportedPoliciesResponse, backup, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list_supported_policies(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.backups.with_raw_response.list_supported_policies() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + backup = await response.parse() + assert_matches_type(BackupListSupportedPoliciesResponse, backup, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list_supported_policies(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.backups.with_streaming_response.list_supported_policies() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + backup = await response.parse() + assert_matches_type(BackupListSupportedPoliciesResponse, backup, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve_policy(self, async_client: AsyncGradientAI) -> None: + backup = await async_client.gpu_droplets.backups.retrieve_policy( + 1, + ) + assert_matches_type(BackupRetrievePolicyResponse, backup, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve_policy(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.backups.with_raw_response.retrieve_policy( + 1, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + backup = await response.parse() + assert_matches_type(BackupRetrievePolicyResponse, backup, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve_policy(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.backups.with_streaming_response.retrieve_policy( + 1, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + backup = await response.parse() + assert_matches_type(BackupRetrievePolicyResponse, backup, path=["response"]) + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/gpu_droplets/test_destroy_with_associated_resources.py b/tests/api_resources/gpu_droplets/test_destroy_with_associated_resources.py new file mode 100644 index 00000000..b6922feb --- /dev/null +++ b/tests/api_resources/gpu_droplets/test_destroy_with_associated_resources.py @@ -0,0 +1,431 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.gpu_droplets import ( + DestroyWithAssociatedResourceListResponse, + DestroyWithAssociatedResourceCheckStatusResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestDestroyWithAssociatedResources: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + destroy_with_associated_resource = client.gpu_droplets.destroy_with_associated_resources.list( + 1, + ) + assert_matches_type( + DestroyWithAssociatedResourceListResponse, destroy_with_associated_resource, path=["response"] + ) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.gpu_droplets.destroy_with_associated_resources.with_raw_response.list( + 1, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + destroy_with_associated_resource = response.parse() + assert_matches_type( + DestroyWithAssociatedResourceListResponse, destroy_with_associated_resource, path=["response"] + ) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.list( + 1, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + destroy_with_associated_resource = response.parse() + assert_matches_type( + DestroyWithAssociatedResourceListResponse, destroy_with_associated_resource, path=["response"] + ) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_check_status(self, client: GradientAI) -> None: + destroy_with_associated_resource = client.gpu_droplets.destroy_with_associated_resources.check_status( + 1, + ) + assert_matches_type( + DestroyWithAssociatedResourceCheckStatusResponse, destroy_with_associated_resource, path=["response"] + ) + + @pytest.mark.skip() + @parametrize + def test_raw_response_check_status(self, client: GradientAI) -> None: + response = client.gpu_droplets.destroy_with_associated_resources.with_raw_response.check_status( + 1, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + destroy_with_associated_resource = response.parse() + assert_matches_type( + DestroyWithAssociatedResourceCheckStatusResponse, destroy_with_associated_resource, path=["response"] + ) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_check_status(self, client: GradientAI) -> None: + with client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.check_status( + 1, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + destroy_with_associated_resource = response.parse() + assert_matches_type( + DestroyWithAssociatedResourceCheckStatusResponse, destroy_with_associated_resource, path=["response"] + ) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_delete_dangerous(self, client: GradientAI) -> None: + destroy_with_associated_resource = client.gpu_droplets.destroy_with_associated_resources.delete_dangerous( + droplet_id=3164444, + x_dangerous=True, + ) + assert destroy_with_associated_resource is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete_dangerous(self, client: GradientAI) -> None: + response = client.gpu_droplets.destroy_with_associated_resources.with_raw_response.delete_dangerous( + droplet_id=3164444, + x_dangerous=True, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + destroy_with_associated_resource = response.parse() + assert destroy_with_associated_resource is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete_dangerous(self, client: GradientAI) -> None: + with client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.delete_dangerous( + droplet_id=3164444, + x_dangerous=True, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + destroy_with_associated_resource = response.parse() + assert destroy_with_associated_resource is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_delete_selective(self, client: GradientAI) -> None: + destroy_with_associated_resource = client.gpu_droplets.destroy_with_associated_resources.delete_selective( + droplet_id=3164444, + ) + assert destroy_with_associated_resource is None + + @pytest.mark.skip() + @parametrize + def test_method_delete_selective_with_all_params(self, client: GradientAI) -> None: + destroy_with_associated_resource = client.gpu_droplets.destroy_with_associated_resources.delete_selective( + droplet_id=3164444, + floating_ips=["6186916"], + reserved_ips=["6186916"], + snapshots=["61486916"], + volume_snapshots=["edb0478d-7436-11ea-86e6-0a58ac144b91"], + volumes=["ba49449a-7435-11ea-b89e-0a58ac14480f"], + ) + assert destroy_with_associated_resource is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete_selective(self, client: GradientAI) -> None: + response = client.gpu_droplets.destroy_with_associated_resources.with_raw_response.delete_selective( + droplet_id=3164444, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + destroy_with_associated_resource = response.parse() + assert destroy_with_associated_resource is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete_selective(self, client: GradientAI) -> None: + with client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.delete_selective( + droplet_id=3164444, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + destroy_with_associated_resource = response.parse() + assert destroy_with_associated_resource is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retry(self, client: GradientAI) -> None: + destroy_with_associated_resource = client.gpu_droplets.destroy_with_associated_resources.retry( + 1, + ) + assert destroy_with_associated_resource is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_retry(self, client: GradientAI) -> None: + response = client.gpu_droplets.destroy_with_associated_resources.with_raw_response.retry( + 1, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + destroy_with_associated_resource = response.parse() + assert destroy_with_associated_resource is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retry(self, client: GradientAI) -> None: + with client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.retry( + 1, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + destroy_with_associated_resource = response.parse() + assert destroy_with_associated_resource is None + + assert cast(Any, response.is_closed) is True + + +class TestAsyncDestroyWithAssociatedResources: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + destroy_with_associated_resource = await async_client.gpu_droplets.destroy_with_associated_resources.list( + 1, + ) + assert_matches_type( + DestroyWithAssociatedResourceListResponse, destroy_with_associated_resource, path=["response"] + ) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.destroy_with_associated_resources.with_raw_response.list( + 1, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + destroy_with_associated_resource = await response.parse() + assert_matches_type( + DestroyWithAssociatedResourceListResponse, destroy_with_associated_resource, path=["response"] + ) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.list( + 1, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + destroy_with_associated_resource = await response.parse() + assert_matches_type( + DestroyWithAssociatedResourceListResponse, destroy_with_associated_resource, path=["response"] + ) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_check_status(self, async_client: AsyncGradientAI) -> None: + destroy_with_associated_resource = ( + await async_client.gpu_droplets.destroy_with_associated_resources.check_status( + 1, + ) + ) + assert_matches_type( + DestroyWithAssociatedResourceCheckStatusResponse, destroy_with_associated_resource, path=["response"] + ) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_check_status(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.destroy_with_associated_resources.with_raw_response.check_status( + 1, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + destroy_with_associated_resource = await response.parse() + assert_matches_type( + DestroyWithAssociatedResourceCheckStatusResponse, destroy_with_associated_resource, path=["response"] + ) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_check_status(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.check_status( + 1, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + destroy_with_associated_resource = await response.parse() + assert_matches_type( + DestroyWithAssociatedResourceCheckStatusResponse, destroy_with_associated_resource, path=["response"] + ) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete_dangerous(self, async_client: AsyncGradientAI) -> None: + destroy_with_associated_resource = ( + await async_client.gpu_droplets.destroy_with_associated_resources.delete_dangerous( + droplet_id=3164444, + x_dangerous=True, + ) + ) + assert destroy_with_associated_resource is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete_dangerous(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.destroy_with_associated_resources.with_raw_response.delete_dangerous( + droplet_id=3164444, + x_dangerous=True, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + destroy_with_associated_resource = await response.parse() + assert destroy_with_associated_resource is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete_dangerous(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.delete_dangerous( + droplet_id=3164444, + x_dangerous=True, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + destroy_with_associated_resource = await response.parse() + assert destroy_with_associated_resource is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete_selective(self, async_client: AsyncGradientAI) -> None: + destroy_with_associated_resource = ( + await async_client.gpu_droplets.destroy_with_associated_resources.delete_selective( + droplet_id=3164444, + ) + ) + assert destroy_with_associated_resource is None + + @pytest.mark.skip() + @parametrize + async def test_method_delete_selective_with_all_params(self, async_client: AsyncGradientAI) -> None: + destroy_with_associated_resource = ( + await async_client.gpu_droplets.destroy_with_associated_resources.delete_selective( + droplet_id=3164444, + floating_ips=["6186916"], + reserved_ips=["6186916"], + snapshots=["61486916"], + volume_snapshots=["edb0478d-7436-11ea-86e6-0a58ac144b91"], + volumes=["ba49449a-7435-11ea-b89e-0a58ac14480f"], + ) + ) + assert destroy_with_associated_resource is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete_selective(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.destroy_with_associated_resources.with_raw_response.delete_selective( + droplet_id=3164444, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + destroy_with_associated_resource = await response.parse() + assert destroy_with_associated_resource is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete_selective(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.delete_selective( + droplet_id=3164444, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + destroy_with_associated_resource = await response.parse() + assert destroy_with_associated_resource is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retry(self, async_client: AsyncGradientAI) -> None: + destroy_with_associated_resource = await async_client.gpu_droplets.destroy_with_associated_resources.retry( + 1, + ) + assert destroy_with_associated_resource is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retry(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.destroy_with_associated_resources.with_raw_response.retry( + 1, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + destroy_with_associated_resource = await response.parse() + assert destroy_with_associated_resource is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retry(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.destroy_with_associated_resources.with_streaming_response.retry( + 1, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + destroy_with_associated_resource = await response.parse() + assert destroy_with_associated_resource is None + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/gpu_droplets/test_firewalls.py b/tests/api_resources/gpu_droplets/test_firewalls.py new file mode 100644 index 00000000..537fe7d2 --- /dev/null +++ b/tests/api_resources/gpu_droplets/test_firewalls.py @@ -0,0 +1,617 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.gpu_droplets import ( + FirewallListResponse, + FirewallCreateResponse, + FirewallUpdateResponse, + FirewallRetrieveResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestFirewalls: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: GradientAI) -> None: + firewall = client.gpu_droplets.firewalls.create() + assert_matches_type(FirewallCreateResponse, firewall, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: GradientAI) -> None: + firewall = client.gpu_droplets.firewalls.create( + body={ + "droplet_ids": [8043964], + "inbound_rules": [ + { + "ports": "80", + "protocol": "tcp", + "sources": { + "addresses": ["1.2.3.4", "18.0.0.0/8"], + "droplet_ids": [8043964], + "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], + "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], + "tags": ["base-image", "prod"], + }, + }, + { + "ports": "22", + "protocol": "tcp", + "sources": { + "addresses": ["18.0.0.0/8"], + "droplet_ids": [8043964], + "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], + "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], + "tags": ["gateway"], + }, + }, + ], + "name": "firewall", + "outbound_rules": [ + { + "destinations": { + "addresses": ["0.0.0.0/0", "::/0"], + "droplet_ids": [8043964], + "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], + "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], + "tags": ["base-image", "prod"], + }, + "ports": "80", + "protocol": "tcp", + } + ], + "tags": ["base-image", "prod"], + }, + ) + assert_matches_type(FirewallCreateResponse, firewall, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: GradientAI) -> None: + response = client.gpu_droplets.firewalls.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + firewall = response.parse() + assert_matches_type(FirewallCreateResponse, firewall, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: GradientAI) -> None: + with client.gpu_droplets.firewalls.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + firewall = response.parse() + assert_matches_type(FirewallCreateResponse, firewall, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + firewall = client.gpu_droplets.firewalls.retrieve( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert_matches_type(FirewallRetrieveResponse, firewall, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.gpu_droplets.firewalls.with_raw_response.retrieve( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + firewall = response.parse() + assert_matches_type(FirewallRetrieveResponse, firewall, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.gpu_droplets.firewalls.with_streaming_response.retrieve( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + firewall = response.parse() + assert_matches_type(FirewallRetrieveResponse, firewall, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): + client.gpu_droplets.firewalls.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: GradientAI) -> None: + firewall = client.gpu_droplets.firewalls.update( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + firewall={"name": "frontend-firewall"}, + ) + assert_matches_type(FirewallUpdateResponse, firewall, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params(self, client: GradientAI) -> None: + firewall = client.gpu_droplets.firewalls.update( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + firewall={ + "droplet_ids": [8043964], + "inbound_rules": [ + { + "ports": "8080", + "protocol": "tcp", + "sources": { + "addresses": ["1.2.3.4", "18.0.0.0/8"], + "droplet_ids": [8043964], + "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], + "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], + "tags": ["base-image", "prod"], + }, + }, + { + "ports": "22", + "protocol": "tcp", + "sources": { + "addresses": ["18.0.0.0/8"], + "droplet_ids": [8043964], + "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], + "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], + "tags": ["gateway"], + }, + }, + ], + "name": "frontend-firewall", + "outbound_rules": [ + { + "destinations": { + "addresses": ["0.0.0.0/0", "::/0"], + "droplet_ids": [8043964], + "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], + "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], + "tags": ["base-image", "prod"], + }, + "ports": "8080", + "protocol": "tcp", + } + ], + "tags": ["frontend"], + }, + ) + assert_matches_type(FirewallUpdateResponse, firewall, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: GradientAI) -> None: + response = client.gpu_droplets.firewalls.with_raw_response.update( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + firewall={"name": "frontend-firewall"}, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + firewall = response.parse() + assert_matches_type(FirewallUpdateResponse, firewall, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: GradientAI) -> None: + with client.gpu_droplets.firewalls.with_streaming_response.update( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + firewall={"name": "frontend-firewall"}, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + firewall = response.parse() + assert_matches_type(FirewallUpdateResponse, firewall, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): + client.gpu_droplets.firewalls.with_raw_response.update( + firewall_id="", + firewall={"name": "frontend-firewall"}, + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + firewall = client.gpu_droplets.firewalls.list() + assert_matches_type(FirewallListResponse, firewall, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + firewall = client.gpu_droplets.firewalls.list( + page=1, + per_page=1, + ) + assert_matches_type(FirewallListResponse, firewall, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.gpu_droplets.firewalls.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + firewall = response.parse() + assert_matches_type(FirewallListResponse, firewall, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.gpu_droplets.firewalls.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + firewall = response.parse() + assert_matches_type(FirewallListResponse, firewall, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: GradientAI) -> None: + firewall = client.gpu_droplets.firewalls.delete( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert firewall is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: GradientAI) -> None: + response = client.gpu_droplets.firewalls.with_raw_response.delete( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + firewall = response.parse() + assert firewall is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: GradientAI) -> None: + with client.gpu_droplets.firewalls.with_streaming_response.delete( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + firewall = response.parse() + assert firewall is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): + client.gpu_droplets.firewalls.with_raw_response.delete( + "", + ) + + +class TestAsyncFirewalls: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncGradientAI) -> None: + firewall = await async_client.gpu_droplets.firewalls.create() + assert_matches_type(FirewallCreateResponse, firewall, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + firewall = await async_client.gpu_droplets.firewalls.create( + body={ + "droplet_ids": [8043964], + "inbound_rules": [ + { + "ports": "80", + "protocol": "tcp", + "sources": { + "addresses": ["1.2.3.4", "18.0.0.0/8"], + "droplet_ids": [8043964], + "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], + "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], + "tags": ["base-image", "prod"], + }, + }, + { + "ports": "22", + "protocol": "tcp", + "sources": { + "addresses": ["18.0.0.0/8"], + "droplet_ids": [8043964], + "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], + "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], + "tags": ["gateway"], + }, + }, + ], + "name": "firewall", + "outbound_rules": [ + { + "destinations": { + "addresses": ["0.0.0.0/0", "::/0"], + "droplet_ids": [8043964], + "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], + "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], + "tags": ["base-image", "prod"], + }, + "ports": "80", + "protocol": "tcp", + } + ], + "tags": ["base-image", "prod"], + }, + ) + assert_matches_type(FirewallCreateResponse, firewall, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.firewalls.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + firewall = await response.parse() + assert_matches_type(FirewallCreateResponse, firewall, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.firewalls.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + firewall = await response.parse() + assert_matches_type(FirewallCreateResponse, firewall, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + firewall = await async_client.gpu_droplets.firewalls.retrieve( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert_matches_type(FirewallRetrieveResponse, firewall, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.firewalls.with_raw_response.retrieve( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + firewall = await response.parse() + assert_matches_type(FirewallRetrieveResponse, firewall, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.firewalls.with_streaming_response.retrieve( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + firewall = await response.parse() + assert_matches_type(FirewallRetrieveResponse, firewall, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): + await async_client.gpu_droplets.firewalls.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncGradientAI) -> None: + firewall = await async_client.gpu_droplets.firewalls.update( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + firewall={"name": "frontend-firewall"}, + ) + assert_matches_type(FirewallUpdateResponse, firewall, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: + firewall = await async_client.gpu_droplets.firewalls.update( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + firewall={ + "droplet_ids": [8043964], + "inbound_rules": [ + { + "ports": "8080", + "protocol": "tcp", + "sources": { + "addresses": ["1.2.3.4", "18.0.0.0/8"], + "droplet_ids": [8043964], + "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], + "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], + "tags": ["base-image", "prod"], + }, + }, + { + "ports": "22", + "protocol": "tcp", + "sources": { + "addresses": ["18.0.0.0/8"], + "droplet_ids": [8043964], + "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], + "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], + "tags": ["gateway"], + }, + }, + ], + "name": "frontend-firewall", + "outbound_rules": [ + { + "destinations": { + "addresses": ["0.0.0.0/0", "::/0"], + "droplet_ids": [8043964], + "kubernetes_ids": ["41b74c5d-9bd0-5555-5555-a57c495b81a3"], + "load_balancer_uids": ["4de7ac8b-495b-4884-9a69-1050c6793cd6"], + "tags": ["base-image", "prod"], + }, + "ports": "8080", + "protocol": "tcp", + } + ], + "tags": ["frontend"], + }, + ) + assert_matches_type(FirewallUpdateResponse, firewall, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.firewalls.with_raw_response.update( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + firewall={"name": "frontend-firewall"}, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + firewall = await response.parse() + assert_matches_type(FirewallUpdateResponse, firewall, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.firewalls.with_streaming_response.update( + firewall_id="bb4b2611-3d72-467b-8602-280330ecd65c", + firewall={"name": "frontend-firewall"}, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + firewall = await response.parse() + assert_matches_type(FirewallUpdateResponse, firewall, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): + await async_client.gpu_droplets.firewalls.with_raw_response.update( + firewall_id="", + firewall={"name": "frontend-firewall"}, + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + firewall = await async_client.gpu_droplets.firewalls.list() + assert_matches_type(FirewallListResponse, firewall, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + firewall = await async_client.gpu_droplets.firewalls.list( + page=1, + per_page=1, + ) + assert_matches_type(FirewallListResponse, firewall, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.firewalls.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + firewall = await response.parse() + assert_matches_type(FirewallListResponse, firewall, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.firewalls.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + firewall = await response.parse() + assert_matches_type(FirewallListResponse, firewall, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + firewall = await async_client.gpu_droplets.firewalls.delete( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert firewall is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.firewalls.with_raw_response.delete( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + firewall = await response.parse() + assert firewall is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.firewalls.with_streaming_response.delete( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + firewall = await response.parse() + assert firewall is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `firewall_id` but received ''"): + await async_client.gpu_droplets.firewalls.with_raw_response.delete( + "", + ) diff --git a/tests/api_resources/gpu_droplets/test_floating_ips.py b/tests/api_resources/gpu_droplets/test_floating_ips.py new file mode 100644 index 00000000..830e9b39 --- /dev/null +++ b/tests/api_resources/gpu_droplets/test_floating_ips.py @@ -0,0 +1,424 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.gpu_droplets import ( + FloatingIPListResponse, + FloatingIPCreateResponse, + FloatingIPRetrieveResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestFloatingIPs: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_overload_1(self, client: GradientAI) -> None: + floating_ip = client.gpu_droplets.floating_ips.create( + droplet_id=2457247, + ) + assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create_overload_1(self, client: GradientAI) -> None: + response = client.gpu_droplets.floating_ips.with_raw_response.create( + droplet_id=2457247, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + floating_ip = response.parse() + assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create_overload_1(self, client: GradientAI) -> None: + with client.gpu_droplets.floating_ips.with_streaming_response.create( + droplet_id=2457247, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + floating_ip = response.parse() + assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_create_overload_2(self, client: GradientAI) -> None: + floating_ip = client.gpu_droplets.floating_ips.create( + region="nyc3", + ) + assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params_overload_2(self, client: GradientAI) -> None: + floating_ip = client.gpu_droplets.floating_ips.create( + region="nyc3", + project_id="746c6152-2fa2-11ed-92d3-27aaa54e4988", + ) + assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create_overload_2(self, client: GradientAI) -> None: + response = client.gpu_droplets.floating_ips.with_raw_response.create( + region="nyc3", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + floating_ip = response.parse() + assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create_overload_2(self, client: GradientAI) -> None: + with client.gpu_droplets.floating_ips.with_streaming_response.create( + region="nyc3", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + floating_ip = response.parse() + assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + floating_ip = client.gpu_droplets.floating_ips.retrieve( + "192.168.1.1", + ) + assert_matches_type(FloatingIPRetrieveResponse, floating_ip, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.gpu_droplets.floating_ips.with_raw_response.retrieve( + "192.168.1.1", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + floating_ip = response.parse() + assert_matches_type(FloatingIPRetrieveResponse, floating_ip, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.gpu_droplets.floating_ips.with_streaming_response.retrieve( + "192.168.1.1", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + floating_ip = response.parse() + assert_matches_type(FloatingIPRetrieveResponse, floating_ip, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): + client.gpu_droplets.floating_ips.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + floating_ip = client.gpu_droplets.floating_ips.list() + assert_matches_type(FloatingIPListResponse, floating_ip, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + floating_ip = client.gpu_droplets.floating_ips.list( + page=1, + per_page=1, + ) + assert_matches_type(FloatingIPListResponse, floating_ip, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.gpu_droplets.floating_ips.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + floating_ip = response.parse() + assert_matches_type(FloatingIPListResponse, floating_ip, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.gpu_droplets.floating_ips.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + floating_ip = response.parse() + assert_matches_type(FloatingIPListResponse, floating_ip, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: GradientAI) -> None: + floating_ip = client.gpu_droplets.floating_ips.delete( + "192.168.1.1", + ) + assert floating_ip is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: GradientAI) -> None: + response = client.gpu_droplets.floating_ips.with_raw_response.delete( + "192.168.1.1", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + floating_ip = response.parse() + assert floating_ip is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: GradientAI) -> None: + with client.gpu_droplets.floating_ips.with_streaming_response.delete( + "192.168.1.1", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + floating_ip = response.parse() + assert floating_ip is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): + client.gpu_droplets.floating_ips.with_raw_response.delete( + "", + ) + + +class TestAsyncFloatingIPs: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_create_overload_1(self, async_client: AsyncGradientAI) -> None: + floating_ip = await async_client.gpu_droplets.floating_ips.create( + droplet_id=2457247, + ) + assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.floating_ips.with_raw_response.create( + droplet_id=2457247, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + floating_ip = await response.parse() + assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.floating_ips.with_streaming_response.create( + droplet_id=2457247, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + floating_ip = await response.parse() + assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_create_overload_2(self, async_client: AsyncGradientAI) -> None: + floating_ip = await async_client.gpu_droplets.floating_ips.create( + region="nyc3", + ) + assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params_overload_2(self, async_client: AsyncGradientAI) -> None: + floating_ip = await async_client.gpu_droplets.floating_ips.create( + region="nyc3", + project_id="746c6152-2fa2-11ed-92d3-27aaa54e4988", + ) + assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.floating_ips.with_raw_response.create( + region="nyc3", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + floating_ip = await response.parse() + assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.floating_ips.with_streaming_response.create( + region="nyc3", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + floating_ip = await response.parse() + assert_matches_type(FloatingIPCreateResponse, floating_ip, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + floating_ip = await async_client.gpu_droplets.floating_ips.retrieve( + "192.168.1.1", + ) + assert_matches_type(FloatingIPRetrieveResponse, floating_ip, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.floating_ips.with_raw_response.retrieve( + "192.168.1.1", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + floating_ip = await response.parse() + assert_matches_type(FloatingIPRetrieveResponse, floating_ip, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.floating_ips.with_streaming_response.retrieve( + "192.168.1.1", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + floating_ip = await response.parse() + assert_matches_type(FloatingIPRetrieveResponse, floating_ip, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): + await async_client.gpu_droplets.floating_ips.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + floating_ip = await async_client.gpu_droplets.floating_ips.list() + assert_matches_type(FloatingIPListResponse, floating_ip, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + floating_ip = await async_client.gpu_droplets.floating_ips.list( + page=1, + per_page=1, + ) + assert_matches_type(FloatingIPListResponse, floating_ip, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.floating_ips.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + floating_ip = await response.parse() + assert_matches_type(FloatingIPListResponse, floating_ip, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.floating_ips.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + floating_ip = await response.parse() + assert_matches_type(FloatingIPListResponse, floating_ip, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + floating_ip = await async_client.gpu_droplets.floating_ips.delete( + "192.168.1.1", + ) + assert floating_ip is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.floating_ips.with_raw_response.delete( + "192.168.1.1", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + floating_ip = await response.parse() + assert floating_ip is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.floating_ips.with_streaming_response.delete( + "192.168.1.1", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + floating_ip = await response.parse() + assert floating_ip is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `floating_ip` but received ''"): + await async_client.gpu_droplets.floating_ips.with_raw_response.delete( + "", + ) diff --git a/tests/api_resources/gpu_droplets/test_images.py b/tests/api_resources/gpu_droplets/test_images.py new file mode 100644 index 00000000..7be6a786 --- /dev/null +++ b/tests/api_resources/gpu_droplets/test_images.py @@ -0,0 +1,417 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.gpu_droplets import ( + ImageListResponse, + ImageCreateResponse, + ImageUpdateResponse, + ImageRetrieveResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestImages: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: GradientAI) -> None: + image = client.gpu_droplets.images.create() + assert_matches_type(ImageCreateResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: GradientAI) -> None: + image = client.gpu_droplets.images.create( + description=" ", + distribution="Ubuntu", + name="Nifty New Snapshot", + region="nyc3", + tags=["base-image", "prod"], + url="http://cloud-images.ubuntu.com/minimal/releases/bionic/release/ubuntu-18.04-minimal-cloudimg-amd64.img", + ) + assert_matches_type(ImageCreateResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: GradientAI) -> None: + response = client.gpu_droplets.images.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + image = response.parse() + assert_matches_type(ImageCreateResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: GradientAI) -> None: + with client.gpu_droplets.images.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + image = response.parse() + assert_matches_type(ImageCreateResponse, image, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + image = client.gpu_droplets.images.retrieve( + 0, + ) + assert_matches_type(ImageRetrieveResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.gpu_droplets.images.with_raw_response.retrieve( + 0, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + image = response.parse() + assert_matches_type(ImageRetrieveResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.gpu_droplets.images.with_streaming_response.retrieve( + 0, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + image = response.parse() + assert_matches_type(ImageRetrieveResponse, image, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_update(self, client: GradientAI) -> None: + image = client.gpu_droplets.images.update( + image_id=62137902, + ) + assert_matches_type(ImageUpdateResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params(self, client: GradientAI) -> None: + image = client.gpu_droplets.images.update( + image_id=62137902, + description=" ", + distribution="Ubuntu", + name="Nifty New Snapshot", + ) + assert_matches_type(ImageUpdateResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update(self, client: GradientAI) -> None: + response = client.gpu_droplets.images.with_raw_response.update( + image_id=62137902, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + image = response.parse() + assert_matches_type(ImageUpdateResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update(self, client: GradientAI) -> None: + with client.gpu_droplets.images.with_streaming_response.update( + image_id=62137902, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + image = response.parse() + assert_matches_type(ImageUpdateResponse, image, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + image = client.gpu_droplets.images.list() + assert_matches_type(ImageListResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + image = client.gpu_droplets.images.list( + page=1, + per_page=1, + private=True, + tag_name="tag_name", + type="application", + ) + assert_matches_type(ImageListResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.gpu_droplets.images.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + image = response.parse() + assert_matches_type(ImageListResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.gpu_droplets.images.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + image = response.parse() + assert_matches_type(ImageListResponse, image, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: GradientAI) -> None: + image = client.gpu_droplets.images.delete( + 0, + ) + assert image is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: GradientAI) -> None: + response = client.gpu_droplets.images.with_raw_response.delete( + 0, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + image = response.parse() + assert image is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: GradientAI) -> None: + with client.gpu_droplets.images.with_streaming_response.delete( + 0, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + image = response.parse() + assert image is None + + assert cast(Any, response.is_closed) is True + + +class TestAsyncImages: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncGradientAI) -> None: + image = await async_client.gpu_droplets.images.create() + assert_matches_type(ImageCreateResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + image = await async_client.gpu_droplets.images.create( + description=" ", + distribution="Ubuntu", + name="Nifty New Snapshot", + region="nyc3", + tags=["base-image", "prod"], + url="http://cloud-images.ubuntu.com/minimal/releases/bionic/release/ubuntu-18.04-minimal-cloudimg-amd64.img", + ) + assert_matches_type(ImageCreateResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.images.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + image = await response.parse() + assert_matches_type(ImageCreateResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.images.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + image = await response.parse() + assert_matches_type(ImageCreateResponse, image, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + image = await async_client.gpu_droplets.images.retrieve( + 0, + ) + assert_matches_type(ImageRetrieveResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.images.with_raw_response.retrieve( + 0, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + image = await response.parse() + assert_matches_type(ImageRetrieveResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.images.with_streaming_response.retrieve( + 0, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + image = await response.parse() + assert_matches_type(ImageRetrieveResponse, image, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_update(self, async_client: AsyncGradientAI) -> None: + image = await async_client.gpu_droplets.images.update( + image_id=62137902, + ) + assert_matches_type(ImageUpdateResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: + image = await async_client.gpu_droplets.images.update( + image_id=62137902, + description=" ", + distribution="Ubuntu", + name="Nifty New Snapshot", + ) + assert_matches_type(ImageUpdateResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.images.with_raw_response.update( + image_id=62137902, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + image = await response.parse() + assert_matches_type(ImageUpdateResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.images.with_streaming_response.update( + image_id=62137902, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + image = await response.parse() + assert_matches_type(ImageUpdateResponse, image, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + image = await async_client.gpu_droplets.images.list() + assert_matches_type(ImageListResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + image = await async_client.gpu_droplets.images.list( + page=1, + per_page=1, + private=True, + tag_name="tag_name", + type="application", + ) + assert_matches_type(ImageListResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.images.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + image = await response.parse() + assert_matches_type(ImageListResponse, image, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.images.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + image = await response.parse() + assert_matches_type(ImageListResponse, image, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + image = await async_client.gpu_droplets.images.delete( + 0, + ) + assert image is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.images.with_raw_response.delete( + 0, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + image = await response.parse() + assert image is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.images.with_streaming_response.delete( + 0, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + image = await response.parse() + assert image is None + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/gpu_droplets/test_load_balancers.py b/tests/api_resources/gpu_droplets/test_load_balancers.py new file mode 100644 index 00000000..c1ce1ce2 --- /dev/null +++ b/tests/api_resources/gpu_droplets/test_load_balancers.py @@ -0,0 +1,1443 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.gpu_droplets import ( + LoadBalancerListResponse, + LoadBalancerCreateResponse, + LoadBalancerUpdateResponse, + LoadBalancerRetrieveResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestLoadBalancers: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_overload_1(self, client: GradientAI) -> None: + load_balancer = client.gpu_droplets.load_balancers.create( + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params_overload_1(self, client: GradientAI) -> None: + load_balancer = client.gpu_droplets.load_balancers.create( + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf", + "tls_passthrough": False, + } + ], + algorithm="round_robin", + disable_lets_encrypt_dns_records=True, + domains=[ + { + "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf", + "is_managed": True, + "name": "example.com", + } + ], + droplet_ids=[3164444, 3164445], + enable_backend_keepalive=True, + enable_proxy_protocol=True, + firewall={ + "allow": ["ip:1.2.3.4", "cidr:2.3.0.0/16"], + "deny": ["ip:1.2.3.4", "cidr:2.3.0.0/16"], + }, + glb_settings={ + "cdn": {"is_enabled": True}, + "failover_threshold": 50, + "region_priorities": { + "nyc1": 1, + "fra1": 2, + "sgp1": 3, + }, + "target_port": 80, + "target_protocol": "http", + }, + health_check={ + "check_interval_seconds": 10, + "healthy_threshold": 3, + "path": "/", + "port": 80, + "protocol": "http", + "response_timeout_seconds": 5, + "unhealthy_threshold": 5, + }, + http_idle_timeout_seconds=90, + name="example-lb-01", + network="EXTERNAL", + network_stack="IPV4", + project_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + redirect_http_to_https=True, + region="nyc3", + size="lb-small", + size_unit=3, + sticky_sessions={ + "cookie_name": "DO-LB", + "cookie_ttl_seconds": 300, + "type": "cookies", + }, + target_load_balancer_ids=["7dbf91fe-cbdb-48dc-8290-c3a181554905", "996fa239-fac3-42a2-b9a1-9fa822268b7a"], + tls_cipher_policy="STRONG", + type="REGIONAL", + vpc_uuid="c33931f2-a26a-4e61-b85c-4e95a2ec431b", + ) + assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create_overload_1(self, client: GradientAI) -> None: + response = client.gpu_droplets.load_balancers.with_raw_response.create( + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + load_balancer = response.parse() + assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create_overload_1(self, client: GradientAI) -> None: + with client.gpu_droplets.load_balancers.with_streaming_response.create( + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + load_balancer = response.parse() + assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_create_overload_2(self, client: GradientAI) -> None: + load_balancer = client.gpu_droplets.load_balancers.create( + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params_overload_2(self, client: GradientAI) -> None: + load_balancer = client.gpu_droplets.load_balancers.create( + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf", + "tls_passthrough": False, + } + ], + algorithm="round_robin", + disable_lets_encrypt_dns_records=True, + domains=[ + { + "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf", + "is_managed": True, + "name": "example.com", + } + ], + enable_backend_keepalive=True, + enable_proxy_protocol=True, + firewall={ + "allow": ["ip:1.2.3.4", "cidr:2.3.0.0/16"], + "deny": ["ip:1.2.3.4", "cidr:2.3.0.0/16"], + }, + glb_settings={ + "cdn": {"is_enabled": True}, + "failover_threshold": 50, + "region_priorities": { + "nyc1": 1, + "fra1": 2, + "sgp1": 3, + }, + "target_port": 80, + "target_protocol": "http", + }, + health_check={ + "check_interval_seconds": 10, + "healthy_threshold": 3, + "path": "/", + "port": 80, + "protocol": "http", + "response_timeout_seconds": 5, + "unhealthy_threshold": 5, + }, + http_idle_timeout_seconds=90, + name="example-lb-01", + network="EXTERNAL", + network_stack="IPV4", + project_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + redirect_http_to_https=True, + region="nyc3", + size="lb-small", + size_unit=3, + sticky_sessions={ + "cookie_name": "DO-LB", + "cookie_ttl_seconds": 300, + "type": "cookies", + }, + tag="prod:web", + target_load_balancer_ids=["7dbf91fe-cbdb-48dc-8290-c3a181554905", "996fa239-fac3-42a2-b9a1-9fa822268b7a"], + tls_cipher_policy="STRONG", + type="REGIONAL", + vpc_uuid="c33931f2-a26a-4e61-b85c-4e95a2ec431b", + ) + assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create_overload_2(self, client: GradientAI) -> None: + response = client.gpu_droplets.load_balancers.with_raw_response.create( + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + load_balancer = response.parse() + assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create_overload_2(self, client: GradientAI) -> None: + with client.gpu_droplets.load_balancers.with_streaming_response.create( + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + load_balancer = response.parse() + assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + load_balancer = client.gpu_droplets.load_balancers.retrieve( + "lb_id", + ) + assert_matches_type(LoadBalancerRetrieveResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.gpu_droplets.load_balancers.with_raw_response.retrieve( + "lb_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + load_balancer = response.parse() + assert_matches_type(LoadBalancerRetrieveResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.gpu_droplets.load_balancers.with_streaming_response.retrieve( + "lb_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + load_balancer = response.parse() + assert_matches_type(LoadBalancerRetrieveResponse, load_balancer, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): + client.gpu_droplets.load_balancers.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_update_overload_1(self, client: GradientAI) -> None: + load_balancer = client.gpu_droplets.load_balancers.update( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params_overload_1(self, client: GradientAI) -> None: + load_balancer = client.gpu_droplets.load_balancers.update( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf", + "tls_passthrough": False, + } + ], + algorithm="round_robin", + disable_lets_encrypt_dns_records=True, + domains=[ + { + "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf", + "is_managed": True, + "name": "example.com", + } + ], + droplet_ids=[3164444, 3164445], + enable_backend_keepalive=True, + enable_proxy_protocol=True, + firewall={ + "allow": ["ip:1.2.3.4", "cidr:2.3.0.0/16"], + "deny": ["ip:1.2.3.4", "cidr:2.3.0.0/16"], + }, + glb_settings={ + "cdn": {"is_enabled": True}, + "failover_threshold": 50, + "region_priorities": { + "nyc1": 1, + "fra1": 2, + "sgp1": 3, + }, + "target_port": 80, + "target_protocol": "http", + }, + health_check={ + "check_interval_seconds": 10, + "healthy_threshold": 3, + "path": "/", + "port": 80, + "protocol": "http", + "response_timeout_seconds": 5, + "unhealthy_threshold": 5, + }, + http_idle_timeout_seconds=90, + name="example-lb-01", + network="EXTERNAL", + network_stack="IPV4", + project_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + redirect_http_to_https=True, + region="nyc3", + size="lb-small", + size_unit=3, + sticky_sessions={ + "cookie_name": "DO-LB", + "cookie_ttl_seconds": 300, + "type": "cookies", + }, + target_load_balancer_ids=["7dbf91fe-cbdb-48dc-8290-c3a181554905", "996fa239-fac3-42a2-b9a1-9fa822268b7a"], + tls_cipher_policy="STRONG", + type="REGIONAL", + vpc_uuid="c33931f2-a26a-4e61-b85c-4e95a2ec431b", + ) + assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update_overload_1(self, client: GradientAI) -> None: + response = client.gpu_droplets.load_balancers.with_raw_response.update( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + load_balancer = response.parse() + assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update_overload_1(self, client: GradientAI) -> None: + with client.gpu_droplets.load_balancers.with_streaming_response.update( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + load_balancer = response.parse() + assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update_overload_1(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): + client.gpu_droplets.load_balancers.with_raw_response.update( + lb_id="", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + + @pytest.mark.skip() + @parametrize + def test_method_update_overload_2(self, client: GradientAI) -> None: + load_balancer = client.gpu_droplets.load_balancers.update( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_update_with_all_params_overload_2(self, client: GradientAI) -> None: + load_balancer = client.gpu_droplets.load_balancers.update( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf", + "tls_passthrough": False, + } + ], + algorithm="round_robin", + disable_lets_encrypt_dns_records=True, + domains=[ + { + "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf", + "is_managed": True, + "name": "example.com", + } + ], + enable_backend_keepalive=True, + enable_proxy_protocol=True, + firewall={ + "allow": ["ip:1.2.3.4", "cidr:2.3.0.0/16"], + "deny": ["ip:1.2.3.4", "cidr:2.3.0.0/16"], + }, + glb_settings={ + "cdn": {"is_enabled": True}, + "failover_threshold": 50, + "region_priorities": { + "nyc1": 1, + "fra1": 2, + "sgp1": 3, + }, + "target_port": 80, + "target_protocol": "http", + }, + health_check={ + "check_interval_seconds": 10, + "healthy_threshold": 3, + "path": "/", + "port": 80, + "protocol": "http", + "response_timeout_seconds": 5, + "unhealthy_threshold": 5, + }, + http_idle_timeout_seconds=90, + name="example-lb-01", + network="EXTERNAL", + network_stack="IPV4", + project_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + redirect_http_to_https=True, + region="nyc3", + size="lb-small", + size_unit=3, + sticky_sessions={ + "cookie_name": "DO-LB", + "cookie_ttl_seconds": 300, + "type": "cookies", + }, + tag="prod:web", + target_load_balancer_ids=["7dbf91fe-cbdb-48dc-8290-c3a181554905", "996fa239-fac3-42a2-b9a1-9fa822268b7a"], + tls_cipher_policy="STRONG", + type="REGIONAL", + vpc_uuid="c33931f2-a26a-4e61-b85c-4e95a2ec431b", + ) + assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_update_overload_2(self, client: GradientAI) -> None: + response = client.gpu_droplets.load_balancers.with_raw_response.update( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + load_balancer = response.parse() + assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_update_overload_2(self, client: GradientAI) -> None: + with client.gpu_droplets.load_balancers.with_streaming_response.update( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + load_balancer = response.parse() + assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_update_overload_2(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): + client.gpu_droplets.load_balancers.with_raw_response.update( + lb_id="", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + load_balancer = client.gpu_droplets.load_balancers.list() + assert_matches_type(LoadBalancerListResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + load_balancer = client.gpu_droplets.load_balancers.list( + page=1, + per_page=1, + ) + assert_matches_type(LoadBalancerListResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.gpu_droplets.load_balancers.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + load_balancer = response.parse() + assert_matches_type(LoadBalancerListResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.gpu_droplets.load_balancers.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + load_balancer = response.parse() + assert_matches_type(LoadBalancerListResponse, load_balancer, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: GradientAI) -> None: + load_balancer = client.gpu_droplets.load_balancers.delete( + "lb_id", + ) + assert load_balancer is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: GradientAI) -> None: + response = client.gpu_droplets.load_balancers.with_raw_response.delete( + "lb_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + load_balancer = response.parse() + assert load_balancer is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: GradientAI) -> None: + with client.gpu_droplets.load_balancers.with_streaming_response.delete( + "lb_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + load_balancer = response.parse() + assert load_balancer is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): + client.gpu_droplets.load_balancers.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_delete_cache(self, client: GradientAI) -> None: + load_balancer = client.gpu_droplets.load_balancers.delete_cache( + "lb_id", + ) + assert load_balancer is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete_cache(self, client: GradientAI) -> None: + response = client.gpu_droplets.load_balancers.with_raw_response.delete_cache( + "lb_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + load_balancer = response.parse() + assert load_balancer is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete_cache(self, client: GradientAI) -> None: + with client.gpu_droplets.load_balancers.with_streaming_response.delete_cache( + "lb_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + load_balancer = response.parse() + assert load_balancer is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete_cache(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): + client.gpu_droplets.load_balancers.with_raw_response.delete_cache( + "", + ) + + +class TestAsyncLoadBalancers: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_create_overload_1(self, async_client: AsyncGradientAI) -> None: + load_balancer = await async_client.gpu_droplets.load_balancers.create( + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params_overload_1(self, async_client: AsyncGradientAI) -> None: + load_balancer = await async_client.gpu_droplets.load_balancers.create( + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf", + "tls_passthrough": False, + } + ], + algorithm="round_robin", + disable_lets_encrypt_dns_records=True, + domains=[ + { + "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf", + "is_managed": True, + "name": "example.com", + } + ], + droplet_ids=[3164444, 3164445], + enable_backend_keepalive=True, + enable_proxy_protocol=True, + firewall={ + "allow": ["ip:1.2.3.4", "cidr:2.3.0.0/16"], + "deny": ["ip:1.2.3.4", "cidr:2.3.0.0/16"], + }, + glb_settings={ + "cdn": {"is_enabled": True}, + "failover_threshold": 50, + "region_priorities": { + "nyc1": 1, + "fra1": 2, + "sgp1": 3, + }, + "target_port": 80, + "target_protocol": "http", + }, + health_check={ + "check_interval_seconds": 10, + "healthy_threshold": 3, + "path": "/", + "port": 80, + "protocol": "http", + "response_timeout_seconds": 5, + "unhealthy_threshold": 5, + }, + http_idle_timeout_seconds=90, + name="example-lb-01", + network="EXTERNAL", + network_stack="IPV4", + project_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + redirect_http_to_https=True, + region="nyc3", + size="lb-small", + size_unit=3, + sticky_sessions={ + "cookie_name": "DO-LB", + "cookie_ttl_seconds": 300, + "type": "cookies", + }, + target_load_balancer_ids=["7dbf91fe-cbdb-48dc-8290-c3a181554905", "996fa239-fac3-42a2-b9a1-9fa822268b7a"], + tls_cipher_policy="STRONG", + type="REGIONAL", + vpc_uuid="c33931f2-a26a-4e61-b85c-4e95a2ec431b", + ) + assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.load_balancers.with_raw_response.create( + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + load_balancer = await response.parse() + assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.load_balancers.with_streaming_response.create( + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + load_balancer = await response.parse() + assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_create_overload_2(self, async_client: AsyncGradientAI) -> None: + load_balancer = await async_client.gpu_droplets.load_balancers.create( + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params_overload_2(self, async_client: AsyncGradientAI) -> None: + load_balancer = await async_client.gpu_droplets.load_balancers.create( + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf", + "tls_passthrough": False, + } + ], + algorithm="round_robin", + disable_lets_encrypt_dns_records=True, + domains=[ + { + "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf", + "is_managed": True, + "name": "example.com", + } + ], + enable_backend_keepalive=True, + enable_proxy_protocol=True, + firewall={ + "allow": ["ip:1.2.3.4", "cidr:2.3.0.0/16"], + "deny": ["ip:1.2.3.4", "cidr:2.3.0.0/16"], + }, + glb_settings={ + "cdn": {"is_enabled": True}, + "failover_threshold": 50, + "region_priorities": { + "nyc1": 1, + "fra1": 2, + "sgp1": 3, + }, + "target_port": 80, + "target_protocol": "http", + }, + health_check={ + "check_interval_seconds": 10, + "healthy_threshold": 3, + "path": "/", + "port": 80, + "protocol": "http", + "response_timeout_seconds": 5, + "unhealthy_threshold": 5, + }, + http_idle_timeout_seconds=90, + name="example-lb-01", + network="EXTERNAL", + network_stack="IPV4", + project_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + redirect_http_to_https=True, + region="nyc3", + size="lb-small", + size_unit=3, + sticky_sessions={ + "cookie_name": "DO-LB", + "cookie_ttl_seconds": 300, + "type": "cookies", + }, + tag="prod:web", + target_load_balancer_ids=["7dbf91fe-cbdb-48dc-8290-c3a181554905", "996fa239-fac3-42a2-b9a1-9fa822268b7a"], + tls_cipher_policy="STRONG", + type="REGIONAL", + vpc_uuid="c33931f2-a26a-4e61-b85c-4e95a2ec431b", + ) + assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.load_balancers.with_raw_response.create( + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + load_balancer = await response.parse() + assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.load_balancers.with_streaming_response.create( + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + load_balancer = await response.parse() + assert_matches_type(LoadBalancerCreateResponse, load_balancer, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + load_balancer = await async_client.gpu_droplets.load_balancers.retrieve( + "lb_id", + ) + assert_matches_type(LoadBalancerRetrieveResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.load_balancers.with_raw_response.retrieve( + "lb_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + load_balancer = await response.parse() + assert_matches_type(LoadBalancerRetrieveResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.load_balancers.with_streaming_response.retrieve( + "lb_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + load_balancer = await response.parse() + assert_matches_type(LoadBalancerRetrieveResponse, load_balancer, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): + await async_client.gpu_droplets.load_balancers.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update_overload_1(self, async_client: AsyncGradientAI) -> None: + load_balancer = await async_client.gpu_droplets.load_balancers.update( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params_overload_1(self, async_client: AsyncGradientAI) -> None: + load_balancer = await async_client.gpu_droplets.load_balancers.update( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf", + "tls_passthrough": False, + } + ], + algorithm="round_robin", + disable_lets_encrypt_dns_records=True, + domains=[ + { + "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf", + "is_managed": True, + "name": "example.com", + } + ], + droplet_ids=[3164444, 3164445], + enable_backend_keepalive=True, + enable_proxy_protocol=True, + firewall={ + "allow": ["ip:1.2.3.4", "cidr:2.3.0.0/16"], + "deny": ["ip:1.2.3.4", "cidr:2.3.0.0/16"], + }, + glb_settings={ + "cdn": {"is_enabled": True}, + "failover_threshold": 50, + "region_priorities": { + "nyc1": 1, + "fra1": 2, + "sgp1": 3, + }, + "target_port": 80, + "target_protocol": "http", + }, + health_check={ + "check_interval_seconds": 10, + "healthy_threshold": 3, + "path": "/", + "port": 80, + "protocol": "http", + "response_timeout_seconds": 5, + "unhealthy_threshold": 5, + }, + http_idle_timeout_seconds=90, + name="example-lb-01", + network="EXTERNAL", + network_stack="IPV4", + project_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + redirect_http_to_https=True, + region="nyc3", + size="lb-small", + size_unit=3, + sticky_sessions={ + "cookie_name": "DO-LB", + "cookie_ttl_seconds": 300, + "type": "cookies", + }, + target_load_balancer_ids=["7dbf91fe-cbdb-48dc-8290-c3a181554905", "996fa239-fac3-42a2-b9a1-9fa822268b7a"], + tls_cipher_policy="STRONG", + type="REGIONAL", + vpc_uuid="c33931f2-a26a-4e61-b85c-4e95a2ec431b", + ) + assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update_overload_1(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.load_balancers.with_raw_response.update( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + load_balancer = await response.parse() + assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update_overload_1(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.load_balancers.with_streaming_response.update( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + load_balancer = await response.parse() + assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update_overload_1(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): + await async_client.gpu_droplets.load_balancers.with_raw_response.update( + lb_id="", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + + @pytest.mark.skip() + @parametrize + async def test_method_update_overload_2(self, async_client: AsyncGradientAI) -> None: + load_balancer = await async_client.gpu_droplets.load_balancers.update( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_update_with_all_params_overload_2(self, async_client: AsyncGradientAI) -> None: + load_balancer = await async_client.gpu_droplets.load_balancers.update( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf", + "tls_passthrough": False, + } + ], + algorithm="round_robin", + disable_lets_encrypt_dns_records=True, + domains=[ + { + "certificate_id": "892071a0-bb95-49bc-8021-3afd67a210bf", + "is_managed": True, + "name": "example.com", + } + ], + enable_backend_keepalive=True, + enable_proxy_protocol=True, + firewall={ + "allow": ["ip:1.2.3.4", "cidr:2.3.0.0/16"], + "deny": ["ip:1.2.3.4", "cidr:2.3.0.0/16"], + }, + glb_settings={ + "cdn": {"is_enabled": True}, + "failover_threshold": 50, + "region_priorities": { + "nyc1": 1, + "fra1": 2, + "sgp1": 3, + }, + "target_port": 80, + "target_protocol": "http", + }, + health_check={ + "check_interval_seconds": 10, + "healthy_threshold": 3, + "path": "/", + "port": 80, + "protocol": "http", + "response_timeout_seconds": 5, + "unhealthy_threshold": 5, + }, + http_idle_timeout_seconds=90, + name="example-lb-01", + network="EXTERNAL", + network_stack="IPV4", + project_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + redirect_http_to_https=True, + region="nyc3", + size="lb-small", + size_unit=3, + sticky_sessions={ + "cookie_name": "DO-LB", + "cookie_ttl_seconds": 300, + "type": "cookies", + }, + tag="prod:web", + target_load_balancer_ids=["7dbf91fe-cbdb-48dc-8290-c3a181554905", "996fa239-fac3-42a2-b9a1-9fa822268b7a"], + tls_cipher_policy="STRONG", + type="REGIONAL", + vpc_uuid="c33931f2-a26a-4e61-b85c-4e95a2ec431b", + ) + assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_update_overload_2(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.load_balancers.with_raw_response.update( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + load_balancer = await response.parse() + assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_update_overload_2(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.load_balancers.with_streaming_response.update( + lb_id="4de7ac8b-495b-4884-9a69-1050c6793cd6", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + load_balancer = await response.parse() + assert_matches_type(LoadBalancerUpdateResponse, load_balancer, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_update_overload_2(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): + await async_client.gpu_droplets.load_balancers.with_raw_response.update( + lb_id="", + forwarding_rules=[ + { + "entry_port": 443, + "entry_protocol": "https", + "target_port": 80, + "target_protocol": "http", + } + ], + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + load_balancer = await async_client.gpu_droplets.load_balancers.list() + assert_matches_type(LoadBalancerListResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + load_balancer = await async_client.gpu_droplets.load_balancers.list( + page=1, + per_page=1, + ) + assert_matches_type(LoadBalancerListResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.load_balancers.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + load_balancer = await response.parse() + assert_matches_type(LoadBalancerListResponse, load_balancer, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.load_balancers.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + load_balancer = await response.parse() + assert_matches_type(LoadBalancerListResponse, load_balancer, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + load_balancer = await async_client.gpu_droplets.load_balancers.delete( + "lb_id", + ) + assert load_balancer is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.load_balancers.with_raw_response.delete( + "lb_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + load_balancer = await response.parse() + assert load_balancer is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.load_balancers.with_streaming_response.delete( + "lb_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + load_balancer = await response.parse() + assert load_balancer is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): + await async_client.gpu_droplets.load_balancers.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_delete_cache(self, async_client: AsyncGradientAI) -> None: + load_balancer = await async_client.gpu_droplets.load_balancers.delete_cache( + "lb_id", + ) + assert load_balancer is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete_cache(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.load_balancers.with_raw_response.delete_cache( + "lb_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + load_balancer = await response.parse() + assert load_balancer is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete_cache(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.load_balancers.with_streaming_response.delete_cache( + "lb_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + load_balancer = await response.parse() + assert load_balancer is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete_cache(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `lb_id` but received ''"): + await async_client.gpu_droplets.load_balancers.with_raw_response.delete_cache( + "", + ) diff --git a/tests/api_resources/gpu_droplets/test_sizes.py b/tests/api_resources/gpu_droplets/test_sizes.py new file mode 100644 index 00000000..eda73b1e --- /dev/null +++ b/tests/api_resources/gpu_droplets/test_sizes.py @@ -0,0 +1,98 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.gpu_droplets import SizeListResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestSizes: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + size = client.gpu_droplets.sizes.list() + assert_matches_type(SizeListResponse, size, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + size = client.gpu_droplets.sizes.list( + page=1, + per_page=1, + ) + assert_matches_type(SizeListResponse, size, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.gpu_droplets.sizes.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + size = response.parse() + assert_matches_type(SizeListResponse, size, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.gpu_droplets.sizes.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + size = response.parse() + assert_matches_type(SizeListResponse, size, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncSizes: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + size = await async_client.gpu_droplets.sizes.list() + assert_matches_type(SizeListResponse, size, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + size = await async_client.gpu_droplets.sizes.list( + page=1, + per_page=1, + ) + assert_matches_type(SizeListResponse, size, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.sizes.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + size = await response.parse() + assert_matches_type(SizeListResponse, size, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.sizes.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + size = await response.parse() + assert_matches_type(SizeListResponse, size, path=["response"]) + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/gpu_droplets/test_snapshots.py b/tests/api_resources/gpu_droplets/test_snapshots.py new file mode 100644 index 00000000..5d7132c2 --- /dev/null +++ b/tests/api_resources/gpu_droplets/test_snapshots.py @@ -0,0 +1,236 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.gpu_droplets import SnapshotListResponse, SnapshotRetrieveResponse + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestSnapshots: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + snapshot = client.gpu_droplets.snapshots.retrieve( + 6372321, + ) + assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.gpu_droplets.snapshots.with_raw_response.retrieve( + 6372321, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + snapshot = response.parse() + assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.gpu_droplets.snapshots.with_streaming_response.retrieve( + 6372321, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + snapshot = response.parse() + assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + snapshot = client.gpu_droplets.snapshots.list() + assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + snapshot = client.gpu_droplets.snapshots.list( + page=1, + per_page=1, + resource_type="droplet", + ) + assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.gpu_droplets.snapshots.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + snapshot = response.parse() + assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.gpu_droplets.snapshots.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + snapshot = response.parse() + assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: GradientAI) -> None: + snapshot = client.gpu_droplets.snapshots.delete( + 6372321, + ) + assert snapshot is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: GradientAI) -> None: + response = client.gpu_droplets.snapshots.with_raw_response.delete( + 6372321, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + snapshot = response.parse() + assert snapshot is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: GradientAI) -> None: + with client.gpu_droplets.snapshots.with_streaming_response.delete( + 6372321, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + snapshot = response.parse() + assert snapshot is None + + assert cast(Any, response.is_closed) is True + + +class TestAsyncSnapshots: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + snapshot = await async_client.gpu_droplets.snapshots.retrieve( + 6372321, + ) + assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.snapshots.with_raw_response.retrieve( + 6372321, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + snapshot = await response.parse() + assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.snapshots.with_streaming_response.retrieve( + 6372321, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + snapshot = await response.parse() + assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + snapshot = await async_client.gpu_droplets.snapshots.list() + assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + snapshot = await async_client.gpu_droplets.snapshots.list( + page=1, + per_page=1, + resource_type="droplet", + ) + assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.snapshots.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + snapshot = await response.parse() + assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.snapshots.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + snapshot = await response.parse() + assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + snapshot = await async_client.gpu_droplets.snapshots.delete( + 6372321, + ) + assert snapshot is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.snapshots.with_raw_response.delete( + 6372321, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + snapshot = await response.parse() + assert snapshot is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.snapshots.with_streaming_response.delete( + 6372321, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + snapshot = await response.parse() + assert snapshot is None + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/gpu_droplets/test_volumes.py b/tests/api_resources/gpu_droplets/test_volumes.py new file mode 100644 index 00000000..64bcb4c5 --- /dev/null +++ b/tests/api_resources/gpu_droplets/test_volumes.py @@ -0,0 +1,568 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.gpu_droplets import ( + VolumeListResponse, + VolumeCreateResponse, + VolumeRetrieveResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestVolumes: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_overload_1(self, client: GradientAI) -> None: + volume = client.gpu_droplets.volumes.create( + name="example", + region="nyc3", + size_gigabytes=10, + ) + assert_matches_type(VolumeCreateResponse, volume, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params_overload_1(self, client: GradientAI) -> None: + volume = client.gpu_droplets.volumes.create( + name="example", + region="nyc3", + size_gigabytes=10, + description="Block store for examples", + filesystem_label="example", + filesystem_type="ext4", + snapshot_id="b0798135-fb76-11eb-946a-0a58ac146f33", + tags=["base-image", "prod"], + ) + assert_matches_type(VolumeCreateResponse, volume, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create_overload_1(self, client: GradientAI) -> None: + response = client.gpu_droplets.volumes.with_raw_response.create( + name="example", + region="nyc3", + size_gigabytes=10, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + volume = response.parse() + assert_matches_type(VolumeCreateResponse, volume, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create_overload_1(self, client: GradientAI) -> None: + with client.gpu_droplets.volumes.with_streaming_response.create( + name="example", + region="nyc3", + size_gigabytes=10, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + volume = response.parse() + assert_matches_type(VolumeCreateResponse, volume, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_create_overload_2(self, client: GradientAI) -> None: + volume = client.gpu_droplets.volumes.create( + name="example", + region="nyc3", + size_gigabytes=10, + ) + assert_matches_type(VolumeCreateResponse, volume, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params_overload_2(self, client: GradientAI) -> None: + volume = client.gpu_droplets.volumes.create( + name="example", + region="nyc3", + size_gigabytes=10, + description="Block store for examples", + filesystem_label="example", + filesystem_type="ext4", + snapshot_id="b0798135-fb76-11eb-946a-0a58ac146f33", + tags=["base-image", "prod"], + ) + assert_matches_type(VolumeCreateResponse, volume, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create_overload_2(self, client: GradientAI) -> None: + response = client.gpu_droplets.volumes.with_raw_response.create( + name="example", + region="nyc3", + size_gigabytes=10, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + volume = response.parse() + assert_matches_type(VolumeCreateResponse, volume, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create_overload_2(self, client: GradientAI) -> None: + with client.gpu_droplets.volumes.with_streaming_response.create( + name="example", + region="nyc3", + size_gigabytes=10, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + volume = response.parse() + assert_matches_type(VolumeCreateResponse, volume, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + volume = client.gpu_droplets.volumes.retrieve( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert_matches_type(VolumeRetrieveResponse, volume, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.gpu_droplets.volumes.with_raw_response.retrieve( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + volume = response.parse() + assert_matches_type(VolumeRetrieveResponse, volume, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.gpu_droplets.volumes.with_streaming_response.retrieve( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + volume = response.parse() + assert_matches_type(VolumeRetrieveResponse, volume, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): + client.gpu_droplets.volumes.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + volume = client.gpu_droplets.volumes.list() + assert_matches_type(VolumeListResponse, volume, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + volume = client.gpu_droplets.volumes.list( + name="name", + page=1, + per_page=1, + region="nyc3", + ) + assert_matches_type(VolumeListResponse, volume, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.gpu_droplets.volumes.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + volume = response.parse() + assert_matches_type(VolumeListResponse, volume, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.gpu_droplets.volumes.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + volume = response.parse() + assert_matches_type(VolumeListResponse, volume, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: GradientAI) -> None: + volume = client.gpu_droplets.volumes.delete( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert volume is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: GradientAI) -> None: + response = client.gpu_droplets.volumes.with_raw_response.delete( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + volume = response.parse() + assert volume is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: GradientAI) -> None: + with client.gpu_droplets.volumes.with_streaming_response.delete( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + volume = response.parse() + assert volume is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): + client.gpu_droplets.volumes.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_delete_by_name(self, client: GradientAI) -> None: + volume = client.gpu_droplets.volumes.delete_by_name() + assert volume is None + + @pytest.mark.skip() + @parametrize + def test_method_delete_by_name_with_all_params(self, client: GradientAI) -> None: + volume = client.gpu_droplets.volumes.delete_by_name( + name="name", + region="nyc3", + ) + assert volume is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete_by_name(self, client: GradientAI) -> None: + response = client.gpu_droplets.volumes.with_raw_response.delete_by_name() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + volume = response.parse() + assert volume is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete_by_name(self, client: GradientAI) -> None: + with client.gpu_droplets.volumes.with_streaming_response.delete_by_name() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + volume = response.parse() + assert volume is None + + assert cast(Any, response.is_closed) is True + + +class TestAsyncVolumes: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_create_overload_1(self, async_client: AsyncGradientAI) -> None: + volume = await async_client.gpu_droplets.volumes.create( + name="example", + region="nyc3", + size_gigabytes=10, + ) + assert_matches_type(VolumeCreateResponse, volume, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params_overload_1(self, async_client: AsyncGradientAI) -> None: + volume = await async_client.gpu_droplets.volumes.create( + name="example", + region="nyc3", + size_gigabytes=10, + description="Block store for examples", + filesystem_label="example", + filesystem_type="ext4", + snapshot_id="b0798135-fb76-11eb-946a-0a58ac146f33", + tags=["base-image", "prod"], + ) + assert_matches_type(VolumeCreateResponse, volume, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.volumes.with_raw_response.create( + name="example", + region="nyc3", + size_gigabytes=10, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + volume = await response.parse() + assert_matches_type(VolumeCreateResponse, volume, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.volumes.with_streaming_response.create( + name="example", + region="nyc3", + size_gigabytes=10, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + volume = await response.parse() + assert_matches_type(VolumeCreateResponse, volume, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_create_overload_2(self, async_client: AsyncGradientAI) -> None: + volume = await async_client.gpu_droplets.volumes.create( + name="example", + region="nyc3", + size_gigabytes=10, + ) + assert_matches_type(VolumeCreateResponse, volume, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params_overload_2(self, async_client: AsyncGradientAI) -> None: + volume = await async_client.gpu_droplets.volumes.create( + name="example", + region="nyc3", + size_gigabytes=10, + description="Block store for examples", + filesystem_label="example", + filesystem_type="ext4", + snapshot_id="b0798135-fb76-11eb-946a-0a58ac146f33", + tags=["base-image", "prod"], + ) + assert_matches_type(VolumeCreateResponse, volume, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.volumes.with_raw_response.create( + name="example", + region="nyc3", + size_gigabytes=10, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + volume = await response.parse() + assert_matches_type(VolumeCreateResponse, volume, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.volumes.with_streaming_response.create( + name="example", + region="nyc3", + size_gigabytes=10, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + volume = await response.parse() + assert_matches_type(VolumeCreateResponse, volume, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + volume = await async_client.gpu_droplets.volumes.retrieve( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert_matches_type(VolumeRetrieveResponse, volume, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.volumes.with_raw_response.retrieve( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + volume = await response.parse() + assert_matches_type(VolumeRetrieveResponse, volume, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.volumes.with_streaming_response.retrieve( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + volume = await response.parse() + assert_matches_type(VolumeRetrieveResponse, volume, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): + await async_client.gpu_droplets.volumes.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + volume = await async_client.gpu_droplets.volumes.list() + assert_matches_type(VolumeListResponse, volume, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + volume = await async_client.gpu_droplets.volumes.list( + name="name", + page=1, + per_page=1, + region="nyc3", + ) + assert_matches_type(VolumeListResponse, volume, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.volumes.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + volume = await response.parse() + assert_matches_type(VolumeListResponse, volume, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.volumes.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + volume = await response.parse() + assert_matches_type(VolumeListResponse, volume, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + volume = await async_client.gpu_droplets.volumes.delete( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + assert volume is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.volumes.with_raw_response.delete( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + volume = await response.parse() + assert volume is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.volumes.with_streaming_response.delete( + "182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + volume = await response.parse() + assert volume is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): + await async_client.gpu_droplets.volumes.with_raw_response.delete( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_delete_by_name(self, async_client: AsyncGradientAI) -> None: + volume = await async_client.gpu_droplets.volumes.delete_by_name() + assert volume is None + + @pytest.mark.skip() + @parametrize + async def test_method_delete_by_name_with_all_params(self, async_client: AsyncGradientAI) -> None: + volume = await async_client.gpu_droplets.volumes.delete_by_name( + name="name", + region="nyc3", + ) + assert volume is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete_by_name(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.volumes.with_raw_response.delete_by_name() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + volume = await response.parse() + assert volume is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete_by_name(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.volumes.with_streaming_response.delete_by_name() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + volume = await response.parse() + assert volume is None + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/gpu_droplets/volumes/__init__.py b/tests/api_resources/gpu_droplets/volumes/__init__.py new file mode 100644 index 00000000..fd8019a9 --- /dev/null +++ b/tests/api_resources/gpu_droplets/volumes/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/gpu_droplets/volumes/test_actions.py b/tests/api_resources/gpu_droplets/volumes/test_actions.py new file mode 100644 index 00000000..d5338c97 --- /dev/null +++ b/tests/api_resources/gpu_droplets/volumes/test_actions.py @@ -0,0 +1,825 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.gpu_droplets.volumes import ( + ActionListResponse, + ActionRetrieveResponse, + ActionInitiateByIDResponse, + ActionInitiateByNameResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestActions: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + action = client.gpu_droplets.volumes.actions.retrieve( + action_id=36804636, + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + ) + assert_matches_type(ActionRetrieveResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve_with_all_params(self, client: GradientAI) -> None: + action = client.gpu_droplets.volumes.actions.retrieve( + action_id=36804636, + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + page=1, + per_page=1, + ) + assert_matches_type(ActionRetrieveResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.gpu_droplets.volumes.actions.with_raw_response.retrieve( + action_id=36804636, + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(ActionRetrieveResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.gpu_droplets.volumes.actions.with_streaming_response.retrieve( + action_id=36804636, + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(ActionRetrieveResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): + client.gpu_droplets.volumes.actions.with_raw_response.retrieve( + action_id=36804636, + volume_id="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + action = client.gpu_droplets.volumes.actions.list( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + ) + assert_matches_type(ActionListResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + action = client.gpu_droplets.volumes.actions.list( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + page=1, + per_page=1, + ) + assert_matches_type(ActionListResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.gpu_droplets.volumes.actions.with_raw_response.list( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(ActionListResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.gpu_droplets.volumes.actions.with_streaming_response.list( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(ActionListResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_list(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): + client.gpu_droplets.volumes.actions.with_raw_response.list( + volume_id="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_initiate_by_id_overload_1(self, client: GradientAI) -> None: + action = client.gpu_droplets.volumes.actions.initiate_by_id( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + droplet_id=11612190, + type="attach", + ) + assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_initiate_by_id_with_all_params_overload_1(self, client: GradientAI) -> None: + action = client.gpu_droplets.volumes.actions.initiate_by_id( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + droplet_id=11612190, + type="attach", + page=1, + per_page=1, + region="nyc3", + tags=["base-image", "prod"], + ) + assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_initiate_by_id_overload_1(self, client: GradientAI) -> None: + response = client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + droplet_id=11612190, + type="attach", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_initiate_by_id_overload_1(self, client: GradientAI) -> None: + with client.gpu_droplets.volumes.actions.with_streaming_response.initiate_by_id( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + droplet_id=11612190, + type="attach", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_initiate_by_id_overload_1(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): + client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id( + volume_id="", + droplet_id=11612190, + type="attach", + ) + + @pytest.mark.skip() + @parametrize + def test_method_initiate_by_id_overload_2(self, client: GradientAI) -> None: + action = client.gpu_droplets.volumes.actions.initiate_by_id( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + droplet_id=11612190, + type="attach", + ) + assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_initiate_by_id_with_all_params_overload_2(self, client: GradientAI) -> None: + action = client.gpu_droplets.volumes.actions.initiate_by_id( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + droplet_id=11612190, + type="attach", + page=1, + per_page=1, + region="nyc3", + ) + assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_initiate_by_id_overload_2(self, client: GradientAI) -> None: + response = client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + droplet_id=11612190, + type="attach", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_initiate_by_id_overload_2(self, client: GradientAI) -> None: + with client.gpu_droplets.volumes.actions.with_streaming_response.initiate_by_id( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + droplet_id=11612190, + type="attach", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_initiate_by_id_overload_2(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): + client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id( + volume_id="", + droplet_id=11612190, + type="attach", + ) + + @pytest.mark.skip() + @parametrize + def test_method_initiate_by_id_overload_3(self, client: GradientAI) -> None: + action = client.gpu_droplets.volumes.actions.initiate_by_id( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + size_gigabytes=16384, + type="attach", + ) + assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_initiate_by_id_with_all_params_overload_3(self, client: GradientAI) -> None: + action = client.gpu_droplets.volumes.actions.initiate_by_id( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + size_gigabytes=16384, + type="attach", + page=1, + per_page=1, + region="nyc3", + ) + assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_initiate_by_id_overload_3(self, client: GradientAI) -> None: + response = client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + size_gigabytes=16384, + type="attach", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_initiate_by_id_overload_3(self, client: GradientAI) -> None: + with client.gpu_droplets.volumes.actions.with_streaming_response.initiate_by_id( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + size_gigabytes=16384, + type="attach", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_initiate_by_id_overload_3(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): + client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id( + volume_id="", + size_gigabytes=16384, + type="attach", + ) + + @pytest.mark.skip() + @parametrize + def test_method_initiate_by_name_overload_1(self, client: GradientAI) -> None: + action = client.gpu_droplets.volumes.actions.initiate_by_name( + droplet_id=11612190, + type="attach", + ) + assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_initiate_by_name_with_all_params_overload_1(self, client: GradientAI) -> None: + action = client.gpu_droplets.volumes.actions.initiate_by_name( + droplet_id=11612190, + type="attach", + page=1, + per_page=1, + region="nyc3", + tags=["base-image", "prod"], + ) + assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_initiate_by_name_overload_1(self, client: GradientAI) -> None: + response = client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_name( + droplet_id=11612190, + type="attach", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_initiate_by_name_overload_1(self, client: GradientAI) -> None: + with client.gpu_droplets.volumes.actions.with_streaming_response.initiate_by_name( + droplet_id=11612190, + type="attach", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_initiate_by_name_overload_2(self, client: GradientAI) -> None: + action = client.gpu_droplets.volumes.actions.initiate_by_name( + droplet_id=11612190, + type="attach", + ) + assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_initiate_by_name_with_all_params_overload_2(self, client: GradientAI) -> None: + action = client.gpu_droplets.volumes.actions.initiate_by_name( + droplet_id=11612190, + type="attach", + page=1, + per_page=1, + region="nyc3", + ) + assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_initiate_by_name_overload_2(self, client: GradientAI) -> None: + response = client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_name( + droplet_id=11612190, + type="attach", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = response.parse() + assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_initiate_by_name_overload_2(self, client: GradientAI) -> None: + with client.gpu_droplets.volumes.actions.with_streaming_response.initiate_by_name( + droplet_id=11612190, + type="attach", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = response.parse() + assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncActions: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.volumes.actions.retrieve( + action_id=36804636, + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + ) + assert_matches_type(ActionRetrieveResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve_with_all_params(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.volumes.actions.retrieve( + action_id=36804636, + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + page=1, + per_page=1, + ) + assert_matches_type(ActionRetrieveResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.volumes.actions.with_raw_response.retrieve( + action_id=36804636, + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(ActionRetrieveResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.volumes.actions.with_streaming_response.retrieve( + action_id=36804636, + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(ActionRetrieveResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): + await async_client.gpu_droplets.volumes.actions.with_raw_response.retrieve( + action_id=36804636, + volume_id="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.volumes.actions.list( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + ) + assert_matches_type(ActionListResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.volumes.actions.list( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + page=1, + per_page=1, + ) + assert_matches_type(ActionListResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.volumes.actions.with_raw_response.list( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(ActionListResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.volumes.actions.with_streaming_response.list( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(ActionListResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_list(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): + await async_client.gpu_droplets.volumes.actions.with_raw_response.list( + volume_id="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_by_id_overload_1(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.volumes.actions.initiate_by_id( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + droplet_id=11612190, + type="attach", + ) + assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_by_id_with_all_params_overload_1(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.volumes.actions.initiate_by_id( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + droplet_id=11612190, + type="attach", + page=1, + per_page=1, + region="nyc3", + tags=["base-image", "prod"], + ) + assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_initiate_by_id_overload_1(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + droplet_id=11612190, + type="attach", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_initiate_by_id_overload_1(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.volumes.actions.with_streaming_response.initiate_by_id( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + droplet_id=11612190, + type="attach", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_initiate_by_id_overload_1(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): + await async_client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id( + volume_id="", + droplet_id=11612190, + type="attach", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_by_id_overload_2(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.volumes.actions.initiate_by_id( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + droplet_id=11612190, + type="attach", + ) + assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_by_id_with_all_params_overload_2(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.volumes.actions.initiate_by_id( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + droplet_id=11612190, + type="attach", + page=1, + per_page=1, + region="nyc3", + ) + assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_initiate_by_id_overload_2(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + droplet_id=11612190, + type="attach", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_initiate_by_id_overload_2(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.volumes.actions.with_streaming_response.initiate_by_id( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + droplet_id=11612190, + type="attach", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_initiate_by_id_overload_2(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): + await async_client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id( + volume_id="", + droplet_id=11612190, + type="attach", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_by_id_overload_3(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.volumes.actions.initiate_by_id( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + size_gigabytes=16384, + type="attach", + ) + assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_by_id_with_all_params_overload_3(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.volumes.actions.initiate_by_id( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + size_gigabytes=16384, + type="attach", + page=1, + per_page=1, + region="nyc3", + ) + assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_initiate_by_id_overload_3(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + size_gigabytes=16384, + type="attach", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_initiate_by_id_overload_3(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.volumes.actions.with_streaming_response.initiate_by_id( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + size_gigabytes=16384, + type="attach", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(ActionInitiateByIDResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_initiate_by_id_overload_3(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): + await async_client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_id( + volume_id="", + size_gigabytes=16384, + type="attach", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_by_name_overload_1(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.volumes.actions.initiate_by_name( + droplet_id=11612190, + type="attach", + ) + assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_by_name_with_all_params_overload_1(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.volumes.actions.initiate_by_name( + droplet_id=11612190, + type="attach", + page=1, + per_page=1, + region="nyc3", + tags=["base-image", "prod"], + ) + assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_initiate_by_name_overload_1(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_name( + droplet_id=11612190, + type="attach", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_initiate_by_name_overload_1(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.volumes.actions.with_streaming_response.initiate_by_name( + droplet_id=11612190, + type="attach", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_by_name_overload_2(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.volumes.actions.initiate_by_name( + droplet_id=11612190, + type="attach", + ) + assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_initiate_by_name_with_all_params_overload_2(self, async_client: AsyncGradientAI) -> None: + action = await async_client.gpu_droplets.volumes.actions.initiate_by_name( + droplet_id=11612190, + type="attach", + page=1, + per_page=1, + region="nyc3", + ) + assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_initiate_by_name_overload_2(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.volumes.actions.with_raw_response.initiate_by_name( + droplet_id=11612190, + type="attach", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + action = await response.parse() + assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_initiate_by_name_overload_2(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.volumes.actions.with_streaming_response.initiate_by_name( + droplet_id=11612190, + type="attach", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + action = await response.parse() + assert_matches_type(ActionInitiateByNameResponse, action, path=["response"]) + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/gpu_droplets/volumes/test_snapshots.py b/tests/api_resources/gpu_droplets/volumes/test_snapshots.py new file mode 100644 index 00000000..8b72305c --- /dev/null +++ b/tests/api_resources/gpu_droplets/volumes/test_snapshots.py @@ -0,0 +1,412 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types.gpu_droplets.volumes import ( + SnapshotListResponse, + SnapshotCreateResponse, + SnapshotRetrieveResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestSnapshots: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create(self, client: GradientAI) -> None: + snapshot = client.gpu_droplets.volumes.snapshots.create( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + name="big-data-snapshot1475261774", + ) + assert_matches_type(SnapshotCreateResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params(self, client: GradientAI) -> None: + snapshot = client.gpu_droplets.volumes.snapshots.create( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + name="big-data-snapshot1475261774", + tags=["base-image", "prod"], + ) + assert_matches_type(SnapshotCreateResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create(self, client: GradientAI) -> None: + response = client.gpu_droplets.volumes.snapshots.with_raw_response.create( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + name="big-data-snapshot1475261774", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + snapshot = response.parse() + assert_matches_type(SnapshotCreateResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create(self, client: GradientAI) -> None: + with client.gpu_droplets.volumes.snapshots.with_streaming_response.create( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + name="big-data-snapshot1475261774", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + snapshot = response.parse() + assert_matches_type(SnapshotCreateResponse, snapshot, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_create(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): + client.gpu_droplets.volumes.snapshots.with_raw_response.create( + volume_id="", + name="big-data-snapshot1475261774", + ) + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + snapshot = client.gpu_droplets.volumes.snapshots.retrieve( + "snapshot_id", + ) + assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.gpu_droplets.volumes.snapshots.with_raw_response.retrieve( + "snapshot_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + snapshot = response.parse() + assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.gpu_droplets.volumes.snapshots.with_streaming_response.retrieve( + "snapshot_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + snapshot = response.parse() + assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `snapshot_id` but received ''"): + client.gpu_droplets.volumes.snapshots.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + snapshot = client.gpu_droplets.volumes.snapshots.list( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + ) + assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + snapshot = client.gpu_droplets.volumes.snapshots.list( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + page=1, + per_page=1, + ) + assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.gpu_droplets.volumes.snapshots.with_raw_response.list( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + snapshot = response.parse() + assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.gpu_droplets.volumes.snapshots.with_streaming_response.list( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + snapshot = response.parse() + assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_list(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): + client.gpu_droplets.volumes.snapshots.with_raw_response.list( + volume_id="", + ) + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: GradientAI) -> None: + snapshot = client.gpu_droplets.volumes.snapshots.delete( + "snapshot_id", + ) + assert snapshot is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: GradientAI) -> None: + response = client.gpu_droplets.volumes.snapshots.with_raw_response.delete( + "snapshot_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + snapshot = response.parse() + assert snapshot is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: GradientAI) -> None: + with client.gpu_droplets.volumes.snapshots.with_streaming_response.delete( + "snapshot_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + snapshot = response.parse() + assert snapshot is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_delete(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `snapshot_id` but received ''"): + client.gpu_droplets.volumes.snapshots.with_raw_response.delete( + "", + ) + + +class TestAsyncSnapshots: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_create(self, async_client: AsyncGradientAI) -> None: + snapshot = await async_client.gpu_droplets.volumes.snapshots.create( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + name="big-data-snapshot1475261774", + ) + assert_matches_type(SnapshotCreateResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: + snapshot = await async_client.gpu_droplets.volumes.snapshots.create( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + name="big-data-snapshot1475261774", + tags=["base-image", "prod"], + ) + assert_matches_type(SnapshotCreateResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.volumes.snapshots.with_raw_response.create( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + name="big-data-snapshot1475261774", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + snapshot = await response.parse() + assert_matches_type(SnapshotCreateResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.volumes.snapshots.with_streaming_response.create( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + name="big-data-snapshot1475261774", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + snapshot = await response.parse() + assert_matches_type(SnapshotCreateResponse, snapshot, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_create(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): + await async_client.gpu_droplets.volumes.snapshots.with_raw_response.create( + volume_id="", + name="big-data-snapshot1475261774", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + snapshot = await async_client.gpu_droplets.volumes.snapshots.retrieve( + "snapshot_id", + ) + assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.volumes.snapshots.with_raw_response.retrieve( + "snapshot_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + snapshot = await response.parse() + assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.volumes.snapshots.with_streaming_response.retrieve( + "snapshot_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + snapshot = await response.parse() + assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `snapshot_id` but received ''"): + await async_client.gpu_droplets.volumes.snapshots.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + snapshot = await async_client.gpu_droplets.volumes.snapshots.list( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + ) + assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + snapshot = await async_client.gpu_droplets.volumes.snapshots.list( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + page=1, + per_page=1, + ) + assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.volumes.snapshots.with_raw_response.list( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + snapshot = await response.parse() + assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.volumes.snapshots.with_streaming_response.list( + volume_id="7724db7c-e098-11e5-b522-000f53304e51", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + snapshot = await response.parse() + assert_matches_type(SnapshotListResponse, snapshot, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_list(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `volume_id` but received ''"): + await async_client.gpu_droplets.volumes.snapshots.with_raw_response.list( + volume_id="", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + snapshot = await async_client.gpu_droplets.volumes.snapshots.delete( + "snapshot_id", + ) + assert snapshot is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.volumes.snapshots.with_raw_response.delete( + "snapshot_id", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + snapshot = await response.parse() + assert snapshot is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.volumes.snapshots.with_streaming_response.delete( + "snapshot_id", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + snapshot = await response.parse() + assert snapshot is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `snapshot_id` but received ''"): + await async_client.gpu_droplets.volumes.snapshots.with_raw_response.delete( + "", + ) diff --git a/tests/api_resources/inference/test_api_keys.py b/tests/api_resources/inference/test_api_keys.py index 90bf95b9..157a2e3d 100644 --- a/tests/api_resources/inference/test_api_keys.py +++ b/tests/api_resources/inference/test_api_keys.py @@ -33,7 +33,7 @@ def test_method_create(self, client: GradientAI) -> None: @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: api_key = client.inference.api_keys.create( - name="name", + name="Production Key", ) assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) @@ -63,7 +63,7 @@ def test_streaming_response_create(self, client: GradientAI) -> None: @parametrize def test_method_update(self, client: GradientAI) -> None: api_key = client.inference.api_keys.update( - path_api_key_uuid="api_key_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) @@ -71,9 +71,9 @@ def test_method_update(self, client: GradientAI) -> None: @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: api_key = client.inference.api_keys.update( - path_api_key_uuid="api_key_uuid", - body_api_key_uuid="api_key_uuid", - name="name", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + body_api_key_uuid='"12345678-1234-1234-1234-123456789012"', + name='"Production Key"', ) assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) @@ -81,7 +81,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_update(self, client: GradientAI) -> None: response = client.inference.api_keys.with_raw_response.update( - path_api_key_uuid="api_key_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -93,7 +93,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: with client.inference.api_keys.with_streaming_response.update( - path_api_key_uuid="api_key_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -248,7 +248,7 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: api_key = await async_client.inference.api_keys.create( - name="name", + name="Production Key", ) assert_matches_type(APIKeyCreateResponse, api_key, path=["response"]) @@ -278,7 +278,7 @@ async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: api_key = await async_client.inference.api_keys.update( - path_api_key_uuid="api_key_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) @@ -286,9 +286,9 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: api_key = await async_client.inference.api_keys.update( - path_api_key_uuid="api_key_uuid", - body_api_key_uuid="api_key_uuid", - name="name", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + body_api_key_uuid='"12345678-1234-1234-1234-123456789012"', + name='"Production Key"', ) assert_matches_type(APIKeyUpdateResponse, api_key, path=["response"]) @@ -296,7 +296,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: response = await async_client.inference.api_keys.with_raw_response.update( - path_api_key_uuid="api_key_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -308,7 +308,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: async with async_client.inference.api_keys.with_streaming_response.update( - path_api_key_uuid="api_key_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/knowledge_bases/test_data_sources.py b/tests/api_resources/knowledge_bases/test_data_sources.py index 9c466e2f..55b056b8 100644 --- a/tests/api_resources/knowledge_bases/test_data_sources.py +++ b/tests/api_resources/knowledge_bases/test_data_sources.py @@ -25,7 +25,7 @@ class TestDataSources: @parametrize def test_method_create(self, client: GradientAI) -> None: data_source = client.knowledge_bases.data_sources.create( - path_knowledge_base_uuid="knowledge_base_uuid", + path_knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) @@ -33,22 +33,22 @@ def test_method_create(self, client: GradientAI) -> None: @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: data_source = client.knowledge_bases.data_sources.create( - path_knowledge_base_uuid="knowledge_base_uuid", + path_knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', aws_data_source={ - "bucket_name": "bucket_name", - "item_path": "item_path", - "key_id": "key_id", - "region": "region", - "secret_key": "secret_key", + "bucket_name": '"example name"', + "item_path": '"example string"', + "key_id": '"123e4567-e89b-12d3-a456-426614174000"', + "region": '"example string"', + "secret_key": '"example string"', }, - body_knowledge_base_uuid="knowledge_base_uuid", + body_knowledge_base_uuid='"12345678-1234-1234-1234-123456789012"', spaces_data_source={ - "bucket_name": "bucket_name", - "item_path": "item_path", - "region": "region", + "bucket_name": '"example name"', + "item_path": '"example string"', + "region": '"example string"', }, web_crawler_data_source={ - "base_url": "base_url", + "base_url": '"example string"', "crawling_option": "UNKNOWN", "embed_media": True, }, @@ -59,7 +59,7 @@ def test_method_create_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_create(self, client: GradientAI) -> None: response = client.knowledge_bases.data_sources.with_raw_response.create( - path_knowledge_base_uuid="knowledge_base_uuid", + path_knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -71,7 +71,7 @@ def test_raw_response_create(self, client: GradientAI) -> None: @parametrize def test_streaming_response_create(self, client: GradientAI) -> None: with client.knowledge_bases.data_sources.with_streaming_response.create( - path_knowledge_base_uuid="knowledge_base_uuid", + path_knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -95,7 +95,7 @@ def test_path_params_create(self, client: GradientAI) -> None: @parametrize def test_method_list(self, client: GradientAI) -> None: data_source = client.knowledge_bases.data_sources.list( - knowledge_base_uuid="knowledge_base_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(DataSourceListResponse, data_source, path=["response"]) @@ -103,7 +103,7 @@ def test_method_list(self, client: GradientAI) -> None: @parametrize def test_method_list_with_all_params(self, client: GradientAI) -> None: data_source = client.knowledge_bases.data_sources.list( - knowledge_base_uuid="knowledge_base_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', page=0, per_page=0, ) @@ -113,7 +113,7 @@ def test_method_list_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_list(self, client: GradientAI) -> None: response = client.knowledge_bases.data_sources.with_raw_response.list( - knowledge_base_uuid="knowledge_base_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -125,7 +125,7 @@ def test_raw_response_list(self, client: GradientAI) -> None: @parametrize def test_streaming_response_list(self, client: GradientAI) -> None: with client.knowledge_bases.data_sources.with_streaming_response.list( - knowledge_base_uuid="knowledge_base_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -147,8 +147,8 @@ def test_path_params_list(self, client: GradientAI) -> None: @parametrize def test_method_delete(self, client: GradientAI) -> None: data_source = client.knowledge_bases.data_sources.delete( - data_source_uuid="data_source_uuid", - knowledge_base_uuid="knowledge_base_uuid", + data_source_uuid='"123e4567-e89b-12d3-a456-426614174000"', + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(DataSourceDeleteResponse, data_source, path=["response"]) @@ -156,8 +156,8 @@ def test_method_delete(self, client: GradientAI) -> None: @parametrize def test_raw_response_delete(self, client: GradientAI) -> None: response = client.knowledge_bases.data_sources.with_raw_response.delete( - data_source_uuid="data_source_uuid", - knowledge_base_uuid="knowledge_base_uuid", + data_source_uuid='"123e4567-e89b-12d3-a456-426614174000"', + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -169,8 +169,8 @@ def test_raw_response_delete(self, client: GradientAI) -> None: @parametrize def test_streaming_response_delete(self, client: GradientAI) -> None: with client.knowledge_bases.data_sources.with_streaming_response.delete( - data_source_uuid="data_source_uuid", - knowledge_base_uuid="knowledge_base_uuid", + data_source_uuid='"123e4567-e89b-12d3-a456-426614174000"', + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -185,14 +185,14 @@ def test_streaming_response_delete(self, client: GradientAI) -> None: def test_path_params_delete(self, client: GradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): client.knowledge_bases.data_sources.with_raw_response.delete( - data_source_uuid="data_source_uuid", + data_source_uuid='"123e4567-e89b-12d3-a456-426614174000"', knowledge_base_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `data_source_uuid` but received ''"): client.knowledge_bases.data_sources.with_raw_response.delete( data_source_uuid="", - knowledge_base_uuid="knowledge_base_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) @@ -205,7 +205,7 @@ class TestAsyncDataSources: @parametrize async def test_method_create(self, async_client: AsyncGradientAI) -> None: data_source = await async_client.knowledge_bases.data_sources.create( - path_knowledge_base_uuid="knowledge_base_uuid", + path_knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(DataSourceCreateResponse, data_source, path=["response"]) @@ -213,22 +213,22 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: data_source = await async_client.knowledge_bases.data_sources.create( - path_knowledge_base_uuid="knowledge_base_uuid", + path_knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', aws_data_source={ - "bucket_name": "bucket_name", - "item_path": "item_path", - "key_id": "key_id", - "region": "region", - "secret_key": "secret_key", + "bucket_name": '"example name"', + "item_path": '"example string"', + "key_id": '"123e4567-e89b-12d3-a456-426614174000"', + "region": '"example string"', + "secret_key": '"example string"', }, - body_knowledge_base_uuid="knowledge_base_uuid", + body_knowledge_base_uuid='"12345678-1234-1234-1234-123456789012"', spaces_data_source={ - "bucket_name": "bucket_name", - "item_path": "item_path", - "region": "region", + "bucket_name": '"example name"', + "item_path": '"example string"', + "region": '"example string"', }, web_crawler_data_source={ - "base_url": "base_url", + "base_url": '"example string"', "crawling_option": "UNKNOWN", "embed_media": True, }, @@ -239,7 +239,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradientAI @parametrize async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: response = await async_client.knowledge_bases.data_sources.with_raw_response.create( - path_knowledge_base_uuid="knowledge_base_uuid", + path_knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -251,7 +251,7 @@ async def test_raw_response_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_create(self, async_client: AsyncGradientAI) -> None: async with async_client.knowledge_bases.data_sources.with_streaming_response.create( - path_knowledge_base_uuid="knowledge_base_uuid", + path_knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -275,7 +275,7 @@ async def test_path_params_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_list(self, async_client: AsyncGradientAI) -> None: data_source = await async_client.knowledge_bases.data_sources.list( - knowledge_base_uuid="knowledge_base_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(DataSourceListResponse, data_source, path=["response"]) @@ -283,7 +283,7 @@ async def test_method_list(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: data_source = await async_client.knowledge_bases.data_sources.list( - knowledge_base_uuid="knowledge_base_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', page=0, per_page=0, ) @@ -293,7 +293,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) @parametrize async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: response = await async_client.knowledge_bases.data_sources.with_raw_response.list( - knowledge_base_uuid="knowledge_base_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -305,7 +305,7 @@ async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: async with async_client.knowledge_bases.data_sources.with_streaming_response.list( - knowledge_base_uuid="knowledge_base_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -327,8 +327,8 @@ async def test_path_params_list(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_delete(self, async_client: AsyncGradientAI) -> None: data_source = await async_client.knowledge_bases.data_sources.delete( - data_source_uuid="data_source_uuid", - knowledge_base_uuid="knowledge_base_uuid", + data_source_uuid='"123e4567-e89b-12d3-a456-426614174000"', + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(DataSourceDeleteResponse, data_source, path=["response"]) @@ -336,8 +336,8 @@ async def test_method_delete(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: response = await async_client.knowledge_bases.data_sources.with_raw_response.delete( - data_source_uuid="data_source_uuid", - knowledge_base_uuid="knowledge_base_uuid", + data_source_uuid='"123e4567-e89b-12d3-a456-426614174000"', + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -349,8 +349,8 @@ async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: async with async_client.knowledge_bases.data_sources.with_streaming_response.delete( - data_source_uuid="data_source_uuid", - knowledge_base_uuid="knowledge_base_uuid", + data_source_uuid='"123e4567-e89b-12d3-a456-426614174000"', + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -365,12 +365,12 @@ async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"): await async_client.knowledge_bases.data_sources.with_raw_response.delete( - data_source_uuid="data_source_uuid", + data_source_uuid='"123e4567-e89b-12d3-a456-426614174000"', knowledge_base_uuid="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `data_source_uuid` but received ''"): await async_client.knowledge_bases.data_sources.with_raw_response.delete( data_source_uuid="", - knowledge_base_uuid="knowledge_base_uuid", + knowledge_base_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) diff --git a/tests/api_resources/knowledge_bases/test_indexing_jobs.py b/tests/api_resources/knowledge_bases/test_indexing_jobs.py index 8bf1829f..ed32d7f8 100644 --- a/tests/api_resources/knowledge_bases/test_indexing_jobs.py +++ b/tests/api_resources/knowledge_bases/test_indexing_jobs.py @@ -33,8 +33,8 @@ def test_method_create(self, client: GradientAI) -> None: @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: indexing_job = client.knowledge_bases.indexing_jobs.create( - data_source_uuids=["string"], - knowledge_base_uuid="knowledge_base_uuid", + data_source_uuids=["example string"], + knowledge_base_uuid='"12345678-1234-1234-1234-123456789012"', ) assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) @@ -185,7 +185,7 @@ def test_path_params_retrieve_data_sources(self, client: GradientAI) -> None: @parametrize def test_method_update_cancel(self, client: GradientAI) -> None: indexing_job = client.knowledge_bases.indexing_jobs.update_cancel( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) @@ -193,8 +193,8 @@ def test_method_update_cancel(self, client: GradientAI) -> None: @parametrize def test_method_update_cancel_with_all_params(self, client: GradientAI) -> None: indexing_job = client.knowledge_bases.indexing_jobs.update_cancel( - path_uuid="uuid", - body_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + body_uuid='"12345678-1234-1234-1234-123456789012"', ) assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) @@ -202,7 +202,7 @@ def test_method_update_cancel_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_update_cancel(self, client: GradientAI) -> None: response = client.knowledge_bases.indexing_jobs.with_raw_response.update_cancel( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -214,7 +214,7 @@ def test_raw_response_update_cancel(self, client: GradientAI) -> None: @parametrize def test_streaming_response_update_cancel(self, client: GradientAI) -> None: with client.knowledge_bases.indexing_jobs.with_streaming_response.update_cancel( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -248,8 +248,8 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: indexing_job = await async_client.knowledge_bases.indexing_jobs.create( - data_source_uuids=["string"], - knowledge_base_uuid="knowledge_base_uuid", + data_source_uuids=["example string"], + knowledge_base_uuid='"12345678-1234-1234-1234-123456789012"', ) assert_matches_type(IndexingJobCreateResponse, indexing_job, path=["response"]) @@ -400,7 +400,7 @@ async def test_path_params_retrieve_data_sources(self, async_client: AsyncGradie @parametrize async def test_method_update_cancel(self, async_client: AsyncGradientAI) -> None: indexing_job = await async_client.knowledge_bases.indexing_jobs.update_cancel( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) @@ -408,8 +408,8 @@ async def test_method_update_cancel(self, async_client: AsyncGradientAI) -> None @parametrize async def test_method_update_cancel_with_all_params(self, async_client: AsyncGradientAI) -> None: indexing_job = await async_client.knowledge_bases.indexing_jobs.update_cancel( - path_uuid="uuid", - body_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + body_uuid='"12345678-1234-1234-1234-123456789012"', ) assert_matches_type(IndexingJobUpdateCancelResponse, indexing_job, path=["response"]) @@ -417,7 +417,7 @@ async def test_method_update_cancel_with_all_params(self, async_client: AsyncGra @parametrize async def test_raw_response_update_cancel(self, async_client: AsyncGradientAI) -> None: response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.update_cancel( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -429,7 +429,7 @@ async def test_raw_response_update_cancel(self, async_client: AsyncGradientAI) - @parametrize async def test_streaming_response_update_cancel(self, async_client: AsyncGradientAI) -> None: async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.update_cancel( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/models/providers/test_anthropic.py b/tests/api_resources/models/providers/test_anthropic.py index 79bfcdc3..c61a97ea 100644 --- a/tests/api_resources/models/providers/test_anthropic.py +++ b/tests/api_resources/models/providers/test_anthropic.py @@ -34,8 +34,8 @@ def test_method_create(self, client: GradientAI) -> None: @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: anthropic = client.models.providers.anthropic.create( - api_key="api_key", - name="name", + api_key='"sk-ant-12345678901234567890123456789012"', + name='"Production Key"', ) assert_matches_type(AnthropicCreateResponse, anthropic, path=["response"]) @@ -107,7 +107,7 @@ def test_path_params_retrieve(self, client: GradientAI) -> None: @parametrize def test_method_update(self, client: GradientAI) -> None: anthropic = client.models.providers.anthropic.update( - path_api_key_uuid="api_key_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(AnthropicUpdateResponse, anthropic, path=["response"]) @@ -115,10 +115,10 @@ def test_method_update(self, client: GradientAI) -> None: @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: anthropic = client.models.providers.anthropic.update( - path_api_key_uuid="api_key_uuid", - api_key="api_key", - body_api_key_uuid="api_key_uuid", - name="name", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + api_key='"sk-ant-12345678901234567890123456789012"', + body_api_key_uuid='"12345678-1234-1234-1234-123456789012"', + name='"Production Key"', ) assert_matches_type(AnthropicUpdateResponse, anthropic, path=["response"]) @@ -126,7 +126,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_update(self, client: GradientAI) -> None: response = client.models.providers.anthropic.with_raw_response.update( - path_api_key_uuid="api_key_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -138,7 +138,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: with client.models.providers.anthropic.with_streaming_response.update( - path_api_key_uuid="api_key_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -239,7 +239,7 @@ def test_path_params_delete(self, client: GradientAI) -> None: @parametrize def test_method_list_agents(self, client: GradientAI) -> None: anthropic = client.models.providers.anthropic.list_agents( - uuid="uuid", + uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(AnthropicListAgentsResponse, anthropic, path=["response"]) @@ -247,7 +247,7 @@ def test_method_list_agents(self, client: GradientAI) -> None: @parametrize def test_method_list_agents_with_all_params(self, client: GradientAI) -> None: anthropic = client.models.providers.anthropic.list_agents( - uuid="uuid", + uuid='"123e4567-e89b-12d3-a456-426614174000"', page=0, per_page=0, ) @@ -257,7 +257,7 @@ def test_method_list_agents_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_list_agents(self, client: GradientAI) -> None: response = client.models.providers.anthropic.with_raw_response.list_agents( - uuid="uuid", + uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -269,7 +269,7 @@ def test_raw_response_list_agents(self, client: GradientAI) -> None: @parametrize def test_streaming_response_list_agents(self, client: GradientAI) -> None: with client.models.providers.anthropic.with_streaming_response.list_agents( - uuid="uuid", + uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -303,8 +303,8 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: anthropic = await async_client.models.providers.anthropic.create( - api_key="api_key", - name="name", + api_key='"sk-ant-12345678901234567890123456789012"', + name='"Production Key"', ) assert_matches_type(AnthropicCreateResponse, anthropic, path=["response"]) @@ -376,7 +376,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: anthropic = await async_client.models.providers.anthropic.update( - path_api_key_uuid="api_key_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(AnthropicUpdateResponse, anthropic, path=["response"]) @@ -384,10 +384,10 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: anthropic = await async_client.models.providers.anthropic.update( - path_api_key_uuid="api_key_uuid", - api_key="api_key", - body_api_key_uuid="api_key_uuid", - name="name", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + api_key='"sk-ant-12345678901234567890123456789012"', + body_api_key_uuid='"12345678-1234-1234-1234-123456789012"', + name='"Production Key"', ) assert_matches_type(AnthropicUpdateResponse, anthropic, path=["response"]) @@ -395,7 +395,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: response = await async_client.models.providers.anthropic.with_raw_response.update( - path_api_key_uuid="api_key_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -407,7 +407,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: async with async_client.models.providers.anthropic.with_streaming_response.update( - path_api_key_uuid="api_key_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -508,7 +508,7 @@ async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_list_agents(self, async_client: AsyncGradientAI) -> None: anthropic = await async_client.models.providers.anthropic.list_agents( - uuid="uuid", + uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(AnthropicListAgentsResponse, anthropic, path=["response"]) @@ -516,7 +516,7 @@ async def test_method_list_agents(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_list_agents_with_all_params(self, async_client: AsyncGradientAI) -> None: anthropic = await async_client.models.providers.anthropic.list_agents( - uuid="uuid", + uuid='"123e4567-e89b-12d3-a456-426614174000"', page=0, per_page=0, ) @@ -526,7 +526,7 @@ async def test_method_list_agents_with_all_params(self, async_client: AsyncGradi @parametrize async def test_raw_response_list_agents(self, async_client: AsyncGradientAI) -> None: response = await async_client.models.providers.anthropic.with_raw_response.list_agents( - uuid="uuid", + uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -538,7 +538,7 @@ async def test_raw_response_list_agents(self, async_client: AsyncGradientAI) -> @parametrize async def test_streaming_response_list_agents(self, async_client: AsyncGradientAI) -> None: async with async_client.models.providers.anthropic.with_streaming_response.list_agents( - uuid="uuid", + uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/models/providers/test_openai.py b/tests/api_resources/models/providers/test_openai.py index 2640601e..7fde1a69 100644 --- a/tests/api_resources/models/providers/test_openai.py +++ b/tests/api_resources/models/providers/test_openai.py @@ -34,8 +34,8 @@ def test_method_create(self, client: GradientAI) -> None: @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: openai = client.models.providers.openai.create( - api_key="api_key", - name="name", + api_key='"sk-proj--123456789098765432123456789"', + name='"Production Key"', ) assert_matches_type(OpenAICreateResponse, openai, path=["response"]) @@ -107,7 +107,7 @@ def test_path_params_retrieve(self, client: GradientAI) -> None: @parametrize def test_method_update(self, client: GradientAI) -> None: openai = client.models.providers.openai.update( - path_api_key_uuid="api_key_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(OpenAIUpdateResponse, openai, path=["response"]) @@ -115,10 +115,10 @@ def test_method_update(self, client: GradientAI) -> None: @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: openai = client.models.providers.openai.update( - path_api_key_uuid="api_key_uuid", - api_key="api_key", - body_api_key_uuid="api_key_uuid", - name="name", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + api_key='"sk-ant-12345678901234567890123456789012"', + body_api_key_uuid='"12345678-1234-1234-1234-123456789012"', + name='"Production Key"', ) assert_matches_type(OpenAIUpdateResponse, openai, path=["response"]) @@ -126,7 +126,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_update(self, client: GradientAI) -> None: response = client.models.providers.openai.with_raw_response.update( - path_api_key_uuid="api_key_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -138,7 +138,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: with client.models.providers.openai.with_streaming_response.update( - path_api_key_uuid="api_key_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -239,7 +239,7 @@ def test_path_params_delete(self, client: GradientAI) -> None: @parametrize def test_method_retrieve_agents(self, client: GradientAI) -> None: openai = client.models.providers.openai.retrieve_agents( - uuid="uuid", + uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(OpenAIRetrieveAgentsResponse, openai, path=["response"]) @@ -247,7 +247,7 @@ def test_method_retrieve_agents(self, client: GradientAI) -> None: @parametrize def test_method_retrieve_agents_with_all_params(self, client: GradientAI) -> None: openai = client.models.providers.openai.retrieve_agents( - uuid="uuid", + uuid='"123e4567-e89b-12d3-a456-426614174000"', page=0, per_page=0, ) @@ -257,7 +257,7 @@ def test_method_retrieve_agents_with_all_params(self, client: GradientAI) -> Non @parametrize def test_raw_response_retrieve_agents(self, client: GradientAI) -> None: response = client.models.providers.openai.with_raw_response.retrieve_agents( - uuid="uuid", + uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -269,7 +269,7 @@ def test_raw_response_retrieve_agents(self, client: GradientAI) -> None: @parametrize def test_streaming_response_retrieve_agents(self, client: GradientAI) -> None: with client.models.providers.openai.with_streaming_response.retrieve_agents( - uuid="uuid", + uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -303,8 +303,8 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: openai = await async_client.models.providers.openai.create( - api_key="api_key", - name="name", + api_key='"sk-proj--123456789098765432123456789"', + name='"Production Key"', ) assert_matches_type(OpenAICreateResponse, openai, path=["response"]) @@ -376,7 +376,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: openai = await async_client.models.providers.openai.update( - path_api_key_uuid="api_key_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(OpenAIUpdateResponse, openai, path=["response"]) @@ -384,10 +384,10 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: openai = await async_client.models.providers.openai.update( - path_api_key_uuid="api_key_uuid", - api_key="api_key", - body_api_key_uuid="api_key_uuid", - name="name", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', + api_key='"sk-ant-12345678901234567890123456789012"', + body_api_key_uuid='"12345678-1234-1234-1234-123456789012"', + name='"Production Key"', ) assert_matches_type(OpenAIUpdateResponse, openai, path=["response"]) @@ -395,7 +395,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: response = await async_client.models.providers.openai.with_raw_response.update( - path_api_key_uuid="api_key_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -407,7 +407,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: async with async_client.models.providers.openai.with_streaming_response.update( - path_api_key_uuid="api_key_uuid", + path_api_key_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -508,7 +508,7 @@ async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_retrieve_agents(self, async_client: AsyncGradientAI) -> None: openai = await async_client.models.providers.openai.retrieve_agents( - uuid="uuid", + uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(OpenAIRetrieveAgentsResponse, openai, path=["response"]) @@ -516,7 +516,7 @@ async def test_method_retrieve_agents(self, async_client: AsyncGradientAI) -> No @parametrize async def test_method_retrieve_agents_with_all_params(self, async_client: AsyncGradientAI) -> None: openai = await async_client.models.providers.openai.retrieve_agents( - uuid="uuid", + uuid='"123e4567-e89b-12d3-a456-426614174000"', page=0, per_page=0, ) @@ -526,7 +526,7 @@ async def test_method_retrieve_agents_with_all_params(self, async_client: AsyncG @parametrize async def test_raw_response_retrieve_agents(self, async_client: AsyncGradientAI) -> None: response = await async_client.models.providers.openai.with_raw_response.retrieve_agents( - uuid="uuid", + uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -538,7 +538,7 @@ async def test_raw_response_retrieve_agents(self, async_client: AsyncGradientAI) @parametrize async def test_streaming_response_retrieve_agents(self, async_client: AsyncGradientAI) -> None: async with async_client.models.providers.openai.with_streaming_response.retrieve_agents( - uuid="uuid", + uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/test_agents.py b/tests/api_resources/test_agents.py index 2cc0e080..8a6a7d69 100644 --- a/tests/api_resources/test_agents.py +++ b/tests/api_resources/test_agents.py @@ -34,16 +34,16 @@ def test_method_create(self, client: GradientAI) -> None: @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: agent = client.agents.create( - anthropic_key_uuid="anthropic_key_uuid", - description="description", - instruction="instruction", - knowledge_base_uuid=["string"], - model_uuid="model_uuid", - name="name", - openai_key_uuid="open_ai_key_uuid", - project_id="project_id", - region="region", - tags=["string"], + anthropic_key_uuid='"12345678-1234-1234-1234-123456789012"', + description='"My Agent Description"', + instruction='"You are an agent who thinks deeply about the world"', + knowledge_base_uuid=["example string"], + model_uuid='"12345678-1234-1234-1234-123456789012"', + name='"My Agent"', + openai_key_uuid='"12345678-1234-1234-1234-123456789012"', + project_id='"12345678-1234-1234-1234-123456789012"', + region='"tor1"', + tags=["example string"], ) assert_matches_type(AgentCreateResponse, agent, path=["response"]) @@ -115,7 +115,7 @@ def test_path_params_retrieve(self, client: GradientAI) -> None: @parametrize def test_method_update(self, client: GradientAI) -> None: agent = client.agents.update( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(AgentUpdateResponse, agent, path=["response"]) @@ -123,22 +123,23 @@ def test_method_update(self, client: GradientAI) -> None: @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: agent = client.agents.update( - path_uuid="uuid", - anthropic_key_uuid="anthropic_key_uuid", - description="description", - instruction="instruction", - k=0, - max_tokens=0, - model_uuid="model_uuid", - name="name", - openai_key_uuid="open_ai_key_uuid", - project_id="project_id", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + anthropic_key_uuid='"12345678-1234-1234-1234-123456789012"', + conversation_logs_enabled=True, + description='"My Agent Description"', + instruction='"You are an agent who thinks deeply about the world"', + k=5, + max_tokens=100, + model_uuid='"12345678-1234-1234-1234-123456789012"', + name='"My New Agent Name"', + openai_key_uuid='"12345678-1234-1234-1234-123456789012"', + project_id='"12345678-1234-1234-1234-123456789012"', provide_citations=True, retrieval_method="RETRIEVAL_METHOD_UNKNOWN", - tags=["string"], - temperature=0, - top_p=0, - body_uuid="uuid", + tags=["example string"], + temperature=0.7, + top_p=0.9, + body_uuid='"12345678-1234-1234-1234-123456789012"', ) assert_matches_type(AgentUpdateResponse, agent, path=["response"]) @@ -146,7 +147,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_update(self, client: GradientAI) -> None: response = client.agents.with_raw_response.update( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -158,7 +159,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: with client.agents.with_streaming_response.update( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -260,7 +261,7 @@ def test_path_params_delete(self, client: GradientAI) -> None: @parametrize def test_method_update_status(self, client: GradientAI) -> None: agent = client.agents.update_status( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) @@ -268,8 +269,8 @@ def test_method_update_status(self, client: GradientAI) -> None: @parametrize def test_method_update_status_with_all_params(self, client: GradientAI) -> None: agent = client.agents.update_status( - path_uuid="uuid", - body_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + body_uuid='"12345678-1234-1234-1234-123456789012"', visibility="VISIBILITY_UNKNOWN", ) assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) @@ -278,7 +279,7 @@ def test_method_update_status_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_update_status(self, client: GradientAI) -> None: response = client.agents.with_raw_response.update_status( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -290,7 +291,7 @@ def test_raw_response_update_status(self, client: GradientAI) -> None: @parametrize def test_streaming_response_update_status(self, client: GradientAI) -> None: with client.agents.with_streaming_response.update_status( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -324,16 +325,16 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: agent = await async_client.agents.create( - anthropic_key_uuid="anthropic_key_uuid", - description="description", - instruction="instruction", - knowledge_base_uuid=["string"], - model_uuid="model_uuid", - name="name", - openai_key_uuid="open_ai_key_uuid", - project_id="project_id", - region="region", - tags=["string"], + anthropic_key_uuid='"12345678-1234-1234-1234-123456789012"', + description='"My Agent Description"', + instruction='"You are an agent who thinks deeply about the world"', + knowledge_base_uuid=["example string"], + model_uuid='"12345678-1234-1234-1234-123456789012"', + name='"My Agent"', + openai_key_uuid='"12345678-1234-1234-1234-123456789012"', + project_id='"12345678-1234-1234-1234-123456789012"', + region='"tor1"', + tags=["example string"], ) assert_matches_type(AgentCreateResponse, agent, path=["response"]) @@ -405,7 +406,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: agent = await async_client.agents.update( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(AgentUpdateResponse, agent, path=["response"]) @@ -413,22 +414,23 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: agent = await async_client.agents.update( - path_uuid="uuid", - anthropic_key_uuid="anthropic_key_uuid", - description="description", - instruction="instruction", - k=0, - max_tokens=0, - model_uuid="model_uuid", - name="name", - openai_key_uuid="open_ai_key_uuid", - project_id="project_id", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + anthropic_key_uuid='"12345678-1234-1234-1234-123456789012"', + conversation_logs_enabled=True, + description='"My Agent Description"', + instruction='"You are an agent who thinks deeply about the world"', + k=5, + max_tokens=100, + model_uuid='"12345678-1234-1234-1234-123456789012"', + name='"My New Agent Name"', + openai_key_uuid='"12345678-1234-1234-1234-123456789012"', + project_id='"12345678-1234-1234-1234-123456789012"', provide_citations=True, retrieval_method="RETRIEVAL_METHOD_UNKNOWN", - tags=["string"], - temperature=0, - top_p=0, - body_uuid="uuid", + tags=["example string"], + temperature=0.7, + top_p=0.9, + body_uuid='"12345678-1234-1234-1234-123456789012"', ) assert_matches_type(AgentUpdateResponse, agent, path=["response"]) @@ -436,7 +438,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.with_raw_response.update( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -448,7 +450,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.with_streaming_response.update( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -550,7 +552,7 @@ async def test_path_params_delete(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_update_status(self, async_client: AsyncGradientAI) -> None: agent = await async_client.agents.update_status( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) @@ -558,8 +560,8 @@ async def test_method_update_status(self, async_client: AsyncGradientAI) -> None @parametrize async def test_method_update_status_with_all_params(self, async_client: AsyncGradientAI) -> None: agent = await async_client.agents.update_status( - path_uuid="uuid", - body_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + body_uuid='"12345678-1234-1234-1234-123456789012"', visibility="VISIBILITY_UNKNOWN", ) assert_matches_type(AgentUpdateStatusResponse, agent, path=["response"]) @@ -568,7 +570,7 @@ async def test_method_update_status_with_all_params(self, async_client: AsyncGra @parametrize async def test_raw_response_update_status(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.with_raw_response.update_status( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -580,7 +582,7 @@ async def test_raw_response_update_status(self, async_client: AsyncGradientAI) - @parametrize async def test_streaming_response_update_status(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.with_streaming_response.update_status( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/test_gpu_droplets.py b/tests/api_resources/test_gpu_droplets.py new file mode 100644 index 00000000..22f3d2d0 --- /dev/null +++ b/tests/api_resources/test_gpu_droplets.py @@ -0,0 +1,912 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from gradientai import GradientAI, AsyncGradientAI +from tests.utils import assert_matches_type +from gradientai.types import ( + GPUDropletListResponse, + GPUDropletCreateResponse, + GPUDropletRetrieveResponse, + GPUDropletListKernelsResponse, + GPUDropletListFirewallsResponse, + GPUDropletListNeighborsResponse, + GPUDropletListSnapshotsResponse, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestGPUDroplets: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_overload_1(self, client: GradientAI) -> None: + gpu_droplet = client.gpu_droplets.create( + image="ubuntu-20-04-x64", + name="example.com", + size="s-1vcpu-1gb", + ) + assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params_overload_1(self, client: GradientAI) -> None: + gpu_droplet = client.gpu_droplets.create( + image="ubuntu-20-04-x64", + name="example.com", + size="s-1vcpu-1gb", + backup_policy={ + "hour": 0, + "plan": "daily", + "weekday": "SUN", + }, + backups=True, + ipv6=True, + monitoring=True, + private_networking=True, + region="nyc3", + ssh_keys=[289794, "3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], + tags=["env:prod", "web"], + user_data="#cloud-config\nruncmd:\n - touch /test.txt\n", + volumes=["12e97116-7280-11ed-b3d0-0a58ac146812"], + vpc_uuid="760e09ef-dc84-11e8-981e-3cfdfeaae000", + with_droplet_agent=True, + ) + assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create_overload_1(self, client: GradientAI) -> None: + response = client.gpu_droplets.with_raw_response.create( + image="ubuntu-20-04-x64", + name="example.com", + size="s-1vcpu-1gb", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + gpu_droplet = response.parse() + assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create_overload_1(self, client: GradientAI) -> None: + with client.gpu_droplets.with_streaming_response.create( + image="ubuntu-20-04-x64", + name="example.com", + size="s-1vcpu-1gb", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + gpu_droplet = response.parse() + assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_create_overload_2(self, client: GradientAI) -> None: + gpu_droplet = client.gpu_droplets.create( + image="ubuntu-20-04-x64", + names=["sub-01.example.com", "sub-02.example.com"], + size="s-1vcpu-1gb", + ) + assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_create_with_all_params_overload_2(self, client: GradientAI) -> None: + gpu_droplet = client.gpu_droplets.create( + image="ubuntu-20-04-x64", + names=["sub-01.example.com", "sub-02.example.com"], + size="s-1vcpu-1gb", + backup_policy={ + "hour": 0, + "plan": "daily", + "weekday": "SUN", + }, + backups=True, + ipv6=True, + monitoring=True, + private_networking=True, + region="nyc3", + ssh_keys=[289794, "3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], + tags=["env:prod", "web"], + user_data="#cloud-config\nruncmd:\n - touch /test.txt\n", + volumes=["12e97116-7280-11ed-b3d0-0a58ac146812"], + vpc_uuid="760e09ef-dc84-11e8-981e-3cfdfeaae000", + with_droplet_agent=True, + ) + assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_create_overload_2(self, client: GradientAI) -> None: + response = client.gpu_droplets.with_raw_response.create( + image="ubuntu-20-04-x64", + names=["sub-01.example.com", "sub-02.example.com"], + size="s-1vcpu-1gb", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + gpu_droplet = response.parse() + assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_create_overload_2(self, client: GradientAI) -> None: + with client.gpu_droplets.with_streaming_response.create( + image="ubuntu-20-04-x64", + names=["sub-01.example.com", "sub-02.example.com"], + size="s-1vcpu-1gb", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + gpu_droplet = response.parse() + assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_retrieve(self, client: GradientAI) -> None: + gpu_droplet = client.gpu_droplets.retrieve( + 1, + ) + assert_matches_type(GPUDropletRetrieveResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.gpu_droplets.with_raw_response.retrieve( + 1, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + gpu_droplet = response.parse() + assert_matches_type(GPUDropletRetrieveResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.gpu_droplets.with_streaming_response.retrieve( + 1, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + gpu_droplet = response.parse() + assert_matches_type(GPUDropletRetrieveResponse, gpu_droplet, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + gpu_droplet = client.gpu_droplets.list() + assert_matches_type(GPUDropletListResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_with_all_params(self, client: GradientAI) -> None: + gpu_droplet = client.gpu_droplets.list( + name="name", + page=1, + per_page=1, + tag_name="tag_name", + type="droplets", + ) + assert_matches_type(GPUDropletListResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list(self, client: GradientAI) -> None: + response = client.gpu_droplets.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + gpu_droplet = response.parse() + assert_matches_type(GPUDropletListResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list(self, client: GradientAI) -> None: + with client.gpu_droplets.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + gpu_droplet = response.parse() + assert_matches_type(GPUDropletListResponse, gpu_droplet, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_delete(self, client: GradientAI) -> None: + gpu_droplet = client.gpu_droplets.delete( + 1, + ) + assert gpu_droplet is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete(self, client: GradientAI) -> None: + response = client.gpu_droplets.with_raw_response.delete( + 1, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + gpu_droplet = response.parse() + assert gpu_droplet is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete(self, client: GradientAI) -> None: + with client.gpu_droplets.with_streaming_response.delete( + 1, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + gpu_droplet = response.parse() + assert gpu_droplet is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_delete_by_tag(self, client: GradientAI) -> None: + gpu_droplet = client.gpu_droplets.delete_by_tag( + tag_name="tag_name", + ) + assert gpu_droplet is None + + @pytest.mark.skip() + @parametrize + def test_raw_response_delete_by_tag(self, client: GradientAI) -> None: + response = client.gpu_droplets.with_raw_response.delete_by_tag( + tag_name="tag_name", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + gpu_droplet = response.parse() + assert gpu_droplet is None + + @pytest.mark.skip() + @parametrize + def test_streaming_response_delete_by_tag(self, client: GradientAI) -> None: + with client.gpu_droplets.with_streaming_response.delete_by_tag( + tag_name="tag_name", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + gpu_droplet = response.parse() + assert gpu_droplet is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_list_firewalls(self, client: GradientAI) -> None: + gpu_droplet = client.gpu_droplets.list_firewalls( + droplet_id=3164444, + ) + assert_matches_type(GPUDropletListFirewallsResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_firewalls_with_all_params(self, client: GradientAI) -> None: + gpu_droplet = client.gpu_droplets.list_firewalls( + droplet_id=3164444, + page=1, + per_page=1, + ) + assert_matches_type(GPUDropletListFirewallsResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list_firewalls(self, client: GradientAI) -> None: + response = client.gpu_droplets.with_raw_response.list_firewalls( + droplet_id=3164444, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + gpu_droplet = response.parse() + assert_matches_type(GPUDropletListFirewallsResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list_firewalls(self, client: GradientAI) -> None: + with client.gpu_droplets.with_streaming_response.list_firewalls( + droplet_id=3164444, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + gpu_droplet = response.parse() + assert_matches_type(GPUDropletListFirewallsResponse, gpu_droplet, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_list_kernels(self, client: GradientAI) -> None: + gpu_droplet = client.gpu_droplets.list_kernels( + droplet_id=3164444, + ) + assert_matches_type(GPUDropletListKernelsResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_kernels_with_all_params(self, client: GradientAI) -> None: + gpu_droplet = client.gpu_droplets.list_kernels( + droplet_id=3164444, + page=1, + per_page=1, + ) + assert_matches_type(GPUDropletListKernelsResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list_kernels(self, client: GradientAI) -> None: + response = client.gpu_droplets.with_raw_response.list_kernels( + droplet_id=3164444, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + gpu_droplet = response.parse() + assert_matches_type(GPUDropletListKernelsResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list_kernels(self, client: GradientAI) -> None: + with client.gpu_droplets.with_streaming_response.list_kernels( + droplet_id=3164444, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + gpu_droplet = response.parse() + assert_matches_type(GPUDropletListKernelsResponse, gpu_droplet, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_list_neighbors(self, client: GradientAI) -> None: + gpu_droplet = client.gpu_droplets.list_neighbors( + 1, + ) + assert_matches_type(GPUDropletListNeighborsResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list_neighbors(self, client: GradientAI) -> None: + response = client.gpu_droplets.with_raw_response.list_neighbors( + 1, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + gpu_droplet = response.parse() + assert_matches_type(GPUDropletListNeighborsResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list_neighbors(self, client: GradientAI) -> None: + with client.gpu_droplets.with_streaming_response.list_neighbors( + 1, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + gpu_droplet = response.parse() + assert_matches_type(GPUDropletListNeighborsResponse, gpu_droplet, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_method_list_snapshots(self, client: GradientAI) -> None: + gpu_droplet = client.gpu_droplets.list_snapshots( + droplet_id=3164444, + ) + assert_matches_type(GPUDropletListSnapshotsResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_method_list_snapshots_with_all_params(self, client: GradientAI) -> None: + gpu_droplet = client.gpu_droplets.list_snapshots( + droplet_id=3164444, + page=1, + per_page=1, + ) + assert_matches_type(GPUDropletListSnapshotsResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_raw_response_list_snapshots(self, client: GradientAI) -> None: + response = client.gpu_droplets.with_raw_response.list_snapshots( + droplet_id=3164444, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + gpu_droplet = response.parse() + assert_matches_type(GPUDropletListSnapshotsResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_list_snapshots(self, client: GradientAI) -> None: + with client.gpu_droplets.with_streaming_response.list_snapshots( + droplet_id=3164444, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + gpu_droplet = response.parse() + assert_matches_type(GPUDropletListSnapshotsResponse, gpu_droplet, path=["response"]) + + assert cast(Any, response.is_closed) is True + + +class TestAsyncGPUDroplets: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @pytest.mark.skip() + @parametrize + async def test_method_create_overload_1(self, async_client: AsyncGradientAI) -> None: + gpu_droplet = await async_client.gpu_droplets.create( + image="ubuntu-20-04-x64", + name="example.com", + size="s-1vcpu-1gb", + ) + assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params_overload_1(self, async_client: AsyncGradientAI) -> None: + gpu_droplet = await async_client.gpu_droplets.create( + image="ubuntu-20-04-x64", + name="example.com", + size="s-1vcpu-1gb", + backup_policy={ + "hour": 0, + "plan": "daily", + "weekday": "SUN", + }, + backups=True, + ipv6=True, + monitoring=True, + private_networking=True, + region="nyc3", + ssh_keys=[289794, "3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], + tags=["env:prod", "web"], + user_data="#cloud-config\nruncmd:\n - touch /test.txt\n", + volumes=["12e97116-7280-11ed-b3d0-0a58ac146812"], + vpc_uuid="760e09ef-dc84-11e8-981e-3cfdfeaae000", + with_droplet_agent=True, + ) + assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.with_raw_response.create( + image="ubuntu-20-04-x64", + name="example.com", + size="s-1vcpu-1gb", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + gpu_droplet = await response.parse() + assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.with_streaming_response.create( + image="ubuntu-20-04-x64", + name="example.com", + size="s-1vcpu-1gb", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + gpu_droplet = await response.parse() + assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_create_overload_2(self, async_client: AsyncGradientAI) -> None: + gpu_droplet = await async_client.gpu_droplets.create( + image="ubuntu-20-04-x64", + names=["sub-01.example.com", "sub-02.example.com"], + size="s-1vcpu-1gb", + ) + assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_create_with_all_params_overload_2(self, async_client: AsyncGradientAI) -> None: + gpu_droplet = await async_client.gpu_droplets.create( + image="ubuntu-20-04-x64", + names=["sub-01.example.com", "sub-02.example.com"], + size="s-1vcpu-1gb", + backup_policy={ + "hour": 0, + "plan": "daily", + "weekday": "SUN", + }, + backups=True, + ipv6=True, + monitoring=True, + private_networking=True, + region="nyc3", + ssh_keys=[289794, "3b:16:e4:bf:8b:00:8b:b8:59:8c:a9:d3:f0:19:fa:45"], + tags=["env:prod", "web"], + user_data="#cloud-config\nruncmd:\n - touch /test.txt\n", + volumes=["12e97116-7280-11ed-b3d0-0a58ac146812"], + vpc_uuid="760e09ef-dc84-11e8-981e-3cfdfeaae000", + with_droplet_agent=True, + ) + assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.with_raw_response.create( + image="ubuntu-20-04-x64", + names=["sub-01.example.com", "sub-02.example.com"], + size="s-1vcpu-1gb", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + gpu_droplet = await response.parse() + assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.with_streaming_response.create( + image="ubuntu-20-04-x64", + names=["sub-01.example.com", "sub-02.example.com"], + size="s-1vcpu-1gb", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + gpu_droplet = await response.parse() + assert_matches_type(GPUDropletCreateResponse, gpu_droplet, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + gpu_droplet = await async_client.gpu_droplets.retrieve( + 1, + ) + assert_matches_type(GPUDropletRetrieveResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.with_raw_response.retrieve( + 1, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + gpu_droplet = await response.parse() + assert_matches_type(GPUDropletRetrieveResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.with_streaming_response.retrieve( + 1, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + gpu_droplet = await response.parse() + assert_matches_type(GPUDropletRetrieveResponse, gpu_droplet, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + gpu_droplet = await async_client.gpu_droplets.list() + assert_matches_type(GPUDropletListResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: + gpu_droplet = await async_client.gpu_droplets.list( + name="name", + page=1, + per_page=1, + tag_name="tag_name", + type="droplets", + ) + assert_matches_type(GPUDropletListResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.with_raw_response.list() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + gpu_droplet = await response.parse() + assert_matches_type(GPUDropletListResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.with_streaming_response.list() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + gpu_droplet = await response.parse() + assert_matches_type(GPUDropletListResponse, gpu_droplet, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete(self, async_client: AsyncGradientAI) -> None: + gpu_droplet = await async_client.gpu_droplets.delete( + 1, + ) + assert gpu_droplet is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.with_raw_response.delete( + 1, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + gpu_droplet = await response.parse() + assert gpu_droplet is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.with_streaming_response.delete( + 1, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + gpu_droplet = await response.parse() + assert gpu_droplet is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_delete_by_tag(self, async_client: AsyncGradientAI) -> None: + gpu_droplet = await async_client.gpu_droplets.delete_by_tag( + tag_name="tag_name", + ) + assert gpu_droplet is None + + @pytest.mark.skip() + @parametrize + async def test_raw_response_delete_by_tag(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.with_raw_response.delete_by_tag( + tag_name="tag_name", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + gpu_droplet = await response.parse() + assert gpu_droplet is None + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_delete_by_tag(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.with_streaming_response.delete_by_tag( + tag_name="tag_name", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + gpu_droplet = await response.parse() + assert gpu_droplet is None + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_list_firewalls(self, async_client: AsyncGradientAI) -> None: + gpu_droplet = await async_client.gpu_droplets.list_firewalls( + droplet_id=3164444, + ) + assert_matches_type(GPUDropletListFirewallsResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_firewalls_with_all_params(self, async_client: AsyncGradientAI) -> None: + gpu_droplet = await async_client.gpu_droplets.list_firewalls( + droplet_id=3164444, + page=1, + per_page=1, + ) + assert_matches_type(GPUDropletListFirewallsResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list_firewalls(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.with_raw_response.list_firewalls( + droplet_id=3164444, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + gpu_droplet = await response.parse() + assert_matches_type(GPUDropletListFirewallsResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list_firewalls(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.with_streaming_response.list_firewalls( + droplet_id=3164444, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + gpu_droplet = await response.parse() + assert_matches_type(GPUDropletListFirewallsResponse, gpu_droplet, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_list_kernels(self, async_client: AsyncGradientAI) -> None: + gpu_droplet = await async_client.gpu_droplets.list_kernels( + droplet_id=3164444, + ) + assert_matches_type(GPUDropletListKernelsResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_kernels_with_all_params(self, async_client: AsyncGradientAI) -> None: + gpu_droplet = await async_client.gpu_droplets.list_kernels( + droplet_id=3164444, + page=1, + per_page=1, + ) + assert_matches_type(GPUDropletListKernelsResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list_kernels(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.with_raw_response.list_kernels( + droplet_id=3164444, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + gpu_droplet = await response.parse() + assert_matches_type(GPUDropletListKernelsResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list_kernels(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.with_streaming_response.list_kernels( + droplet_id=3164444, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + gpu_droplet = await response.parse() + assert_matches_type(GPUDropletListKernelsResponse, gpu_droplet, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_list_neighbors(self, async_client: AsyncGradientAI) -> None: + gpu_droplet = await async_client.gpu_droplets.list_neighbors( + 1, + ) + assert_matches_type(GPUDropletListNeighborsResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list_neighbors(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.with_raw_response.list_neighbors( + 1, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + gpu_droplet = await response.parse() + assert_matches_type(GPUDropletListNeighborsResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list_neighbors(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.with_streaming_response.list_neighbors( + 1, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + gpu_droplet = await response.parse() + assert_matches_type(GPUDropletListNeighborsResponse, gpu_droplet, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_method_list_snapshots(self, async_client: AsyncGradientAI) -> None: + gpu_droplet = await async_client.gpu_droplets.list_snapshots( + droplet_id=3164444, + ) + assert_matches_type(GPUDropletListSnapshotsResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_method_list_snapshots_with_all_params(self, async_client: AsyncGradientAI) -> None: + gpu_droplet = await async_client.gpu_droplets.list_snapshots( + droplet_id=3164444, + page=1, + per_page=1, + ) + assert_matches_type(GPUDropletListSnapshotsResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_raw_response_list_snapshots(self, async_client: AsyncGradientAI) -> None: + response = await async_client.gpu_droplets.with_raw_response.list_snapshots( + droplet_id=3164444, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + gpu_droplet = await response.parse() + assert_matches_type(GPUDropletListSnapshotsResponse, gpu_droplet, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_list_snapshots(self, async_client: AsyncGradientAI) -> None: + async with async_client.gpu_droplets.with_streaming_response.list_snapshots( + droplet_id=3164444, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + gpu_droplet = await response.parse() + assert_matches_type(GPUDropletListSnapshotsResponse, gpu_droplet, path=["response"]) + + assert cast(Any, response.is_closed) is True diff --git a/tests/api_resources/test_knowledge_bases.py b/tests/api_resources/test_knowledge_bases.py index 508820ce..8a331b52 100644 --- a/tests/api_resources/test_knowledge_bases.py +++ b/tests/api_resources/test_knowledge_bases.py @@ -33,42 +33,42 @@ def test_method_create(self, client: GradientAI) -> None: @parametrize def test_method_create_with_all_params(self, client: GradientAI) -> None: knowledge_base = client.knowledge_bases.create( - database_id="database_id", + database_id='"12345678-1234-1234-1234-123456789012"', datasources=[ { "aws_data_source": { - "bucket_name": "bucket_name", - "item_path": "item_path", - "key_id": "key_id", - "region": "region", - "secret_key": "secret_key", + "bucket_name": '"example name"', + "item_path": '"example string"', + "key_id": '"123e4567-e89b-12d3-a456-426614174000"', + "region": '"example string"', + "secret_key": '"example string"', }, - "bucket_name": "bucket_name", - "bucket_region": "bucket_region", + "bucket_name": '"example name"', + "bucket_region": '"example string"', "file_upload_data_source": { - "original_file_name": "original_file_name", - "size_in_bytes": "size_in_bytes", - "stored_object_key": "stored_object_key", + "original_file_name": '"example name"', + "size_in_bytes": '"12345"', + "stored_object_key": '"example string"', }, - "item_path": "item_path", + "item_path": '"example string"', "spaces_data_source": { - "bucket_name": "bucket_name", - "item_path": "item_path", - "region": "region", + "bucket_name": '"example name"', + "item_path": '"example string"', + "region": '"example string"', }, "web_crawler_data_source": { - "base_url": "base_url", + "base_url": '"example string"', "crawling_option": "UNKNOWN", "embed_media": True, }, } ], - embedding_model_uuid="embedding_model_uuid", - name="name", - project_id="project_id", - region="region", - tags=["string"], - vpc_uuid="vpc_uuid", + embedding_model_uuid='"12345678-1234-1234-1234-123456789012"', + name='"My Knowledge Base"', + project_id='"12345678-1234-1234-1234-123456789012"', + region='"tor1"', + tags=["example string"], + vpc_uuid='"12345678-1234-1234-1234-123456789012"', ) assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) @@ -140,7 +140,7 @@ def test_path_params_retrieve(self, client: GradientAI) -> None: @parametrize def test_method_update(self, client: GradientAI) -> None: knowledge_base = client.knowledge_bases.update( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) @@ -148,13 +148,13 @@ def test_method_update(self, client: GradientAI) -> None: @parametrize def test_method_update_with_all_params(self, client: GradientAI) -> None: knowledge_base = client.knowledge_bases.update( - path_uuid="uuid", - database_id="database_id", - embedding_model_uuid="embedding_model_uuid", - name="name", - project_id="project_id", - tags=["string"], - body_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + database_id='"12345678-1234-1234-1234-123456789012"', + embedding_model_uuid='"12345678-1234-1234-1234-123456789012"', + name='"My Knowledge Base"', + project_id='"12345678-1234-1234-1234-123456789012"', + tags=["example string"], + body_uuid='"12345678-1234-1234-1234-123456789012"', ) assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) @@ -162,7 +162,7 @@ def test_method_update_with_all_params(self, client: GradientAI) -> None: @parametrize def test_raw_response_update(self, client: GradientAI) -> None: response = client.knowledge_bases.with_raw_response.update( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -174,7 +174,7 @@ def test_raw_response_update(self, client: GradientAI) -> None: @parametrize def test_streaming_response_update(self, client: GradientAI) -> None: with client.knowledge_bases.with_streaming_response.update( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -287,42 +287,42 @@ async def test_method_create(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGradientAI) -> None: knowledge_base = await async_client.knowledge_bases.create( - database_id="database_id", + database_id='"12345678-1234-1234-1234-123456789012"', datasources=[ { "aws_data_source": { - "bucket_name": "bucket_name", - "item_path": "item_path", - "key_id": "key_id", - "region": "region", - "secret_key": "secret_key", + "bucket_name": '"example name"', + "item_path": '"example string"', + "key_id": '"123e4567-e89b-12d3-a456-426614174000"', + "region": '"example string"', + "secret_key": '"example string"', }, - "bucket_name": "bucket_name", - "bucket_region": "bucket_region", + "bucket_name": '"example name"', + "bucket_region": '"example string"', "file_upload_data_source": { - "original_file_name": "original_file_name", - "size_in_bytes": "size_in_bytes", - "stored_object_key": "stored_object_key", + "original_file_name": '"example name"', + "size_in_bytes": '"12345"', + "stored_object_key": '"example string"', }, - "item_path": "item_path", + "item_path": '"example string"', "spaces_data_source": { - "bucket_name": "bucket_name", - "item_path": "item_path", - "region": "region", + "bucket_name": '"example name"', + "item_path": '"example string"', + "region": '"example string"', }, "web_crawler_data_source": { - "base_url": "base_url", + "base_url": '"example string"', "crawling_option": "UNKNOWN", "embed_media": True, }, } ], - embedding_model_uuid="embedding_model_uuid", - name="name", - project_id="project_id", - region="region", - tags=["string"], - vpc_uuid="vpc_uuid", + embedding_model_uuid='"12345678-1234-1234-1234-123456789012"', + name='"My Knowledge Base"', + project_id='"12345678-1234-1234-1234-123456789012"', + region='"tor1"', + tags=["example string"], + vpc_uuid='"12345678-1234-1234-1234-123456789012"', ) assert_matches_type(KnowledgeBaseCreateResponse, knowledge_base, path=["response"]) @@ -394,7 +394,7 @@ async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None @parametrize async def test_method_update(self, async_client: AsyncGradientAI) -> None: knowledge_base = await async_client.knowledge_bases.update( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) @@ -402,13 +402,13 @@ async def test_method_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_update_with_all_params(self, async_client: AsyncGradientAI) -> None: knowledge_base = await async_client.knowledge_bases.update( - path_uuid="uuid", - database_id="database_id", - embedding_model_uuid="embedding_model_uuid", - name="name", - project_id="project_id", - tags=["string"], - body_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', + database_id='"12345678-1234-1234-1234-123456789012"', + embedding_model_uuid='"12345678-1234-1234-1234-123456789012"', + name='"My Knowledge Base"', + project_id='"12345678-1234-1234-1234-123456789012"', + tags=["example string"], + body_uuid='"12345678-1234-1234-1234-123456789012"', ) assert_matches_type(KnowledgeBaseUpdateResponse, knowledge_base, path=["response"]) @@ -416,7 +416,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradientAI @parametrize async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: response = await async_client.knowledge_bases.with_raw_response.update( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) assert response.is_closed is True @@ -428,7 +428,7 @@ async def test_raw_response_update(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_streaming_response_update(self, async_client: AsyncGradientAI) -> None: async with async_client.knowledge_bases.with_streaming_response.update( - path_uuid="uuid", + path_uuid='"123e4567-e89b-12d3-a456-426614174000"', ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/test_models.py b/tests/api_resources/test_models.py index 5e119f71..fe837973 100644 --- a/tests/api_resources/test_models.py +++ b/tests/api_resources/test_models.py @@ -9,7 +9,7 @@ from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types import ModelListResponse +from gradientai.types import ModelListResponse, ModelRetrieveResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") @@ -19,19 +19,50 @@ class TestModels: @pytest.mark.skip() @parametrize - def test_method_list(self, client: GradientAI) -> None: - model = client.models.list() - assert_matches_type(ModelListResponse, model, path=["response"]) + def test_method_retrieve(self, client: GradientAI) -> None: + model = client.models.retrieve( + "llama3-8b-instruct", + ) + assert_matches_type(ModelRetrieveResponse, model, path=["response"]) @pytest.mark.skip() @parametrize - def test_method_list_with_all_params(self, client: GradientAI) -> None: - model = client.models.list( - page=0, - per_page=0, - public_only=True, - usecases=["MODEL_USECASE_UNKNOWN"], + def test_raw_response_retrieve(self, client: GradientAI) -> None: + response = client.models.with_raw_response.retrieve( + "llama3-8b-instruct", ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + model = response.parse() + assert_matches_type(ModelRetrieveResponse, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + def test_streaming_response_retrieve(self, client: GradientAI) -> None: + with client.models.with_streaming_response.retrieve( + "llama3-8b-instruct", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + model = response.parse() + assert_matches_type(ModelRetrieveResponse, model, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + def test_path_params_retrieve(self, client: GradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `model` but received ''"): + client.models.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + def test_method_list(self, client: GradientAI) -> None: + model = client.models.list() assert_matches_type(ModelListResponse, model, path=["response"]) @pytest.mark.skip() @@ -64,19 +95,50 @@ class TestAsyncModels: @pytest.mark.skip() @parametrize - async def test_method_list(self, async_client: AsyncGradientAI) -> None: - model = await async_client.models.list() - assert_matches_type(ModelListResponse, model, path=["response"]) + async def test_method_retrieve(self, async_client: AsyncGradientAI) -> None: + model = await async_client.models.retrieve( + "llama3-8b-instruct", + ) + assert_matches_type(ModelRetrieveResponse, model, path=["response"]) @pytest.mark.skip() @parametrize - async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: - model = await async_client.models.list( - page=0, - per_page=0, - public_only=True, - usecases=["MODEL_USECASE_UNKNOWN"], + async def test_raw_response_retrieve(self, async_client: AsyncGradientAI) -> None: + response = await async_client.models.with_raw_response.retrieve( + "llama3-8b-instruct", ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + model = await response.parse() + assert_matches_type(ModelRetrieveResponse, model, path=["response"]) + + @pytest.mark.skip() + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncGradientAI) -> None: + async with async_client.models.with_streaming_response.retrieve( + "llama3-8b-instruct", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + model = await response.parse() + assert_matches_type(ModelRetrieveResponse, model, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @pytest.mark.skip() + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncGradientAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `model` but received ''"): + await async_client.models.with_raw_response.retrieve( + "", + ) + + @pytest.mark.skip() + @parametrize + async def test_method_list(self, async_client: AsyncGradientAI) -> None: + model = await async_client.models.list() assert_matches_type(ModelListResponse, model, path=["response"]) @pytest.mark.skip() diff --git a/tests/api_resources/test_regions.py b/tests/api_resources/test_regions.py index 8e25617f..4f232293 100644 --- a/tests/api_resources/test_regions.py +++ b/tests/api_resources/test_regions.py @@ -27,8 +27,8 @@ def test_method_list(self, client: GradientAI) -> None: @parametrize def test_method_list_with_all_params(self, client: GradientAI) -> None: region = client.regions.list( - serves_batch=True, - serves_inference=True, + page=1, + per_page=1, ) assert_matches_type(RegionListResponse, region, path=["response"]) @@ -70,8 +70,8 @@ async def test_method_list(self, async_client: AsyncGradientAI) -> None: @parametrize async def test_method_list_with_all_params(self, async_client: AsyncGradientAI) -> None: region = await async_client.regions.list( - serves_batch=True, - serves_inference=True, + page=1, + per_page=1, ) assert_matches_type(RegionListResponse, region, path=["response"]) From 06d7f19cd42a6bc578b39709fe6efed8741a24bc Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 17 Jul 2025 20:16:17 +0000 Subject: [PATCH 04/19] chore(internal): version bump From 74161477f98e3a76b7227b07d942e1f26a4612b3 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 17 Jul 2025 20:16:17 +0000 Subject: [PATCH 05/19] chore(internal): version bump From 95d1dd24d290d7d5f23328e4c45c439dca5df748 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 17 Jul 2025 20:16:17 +0000 Subject: [PATCH 06/19] chore(internal): version bump From 44a045a9c0ce0f0769cce66bc7421a9d81cbc645 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 17 Jul 2025 20:16:17 +0000 Subject: [PATCH 07/19] chore(internal): version bump From 9d2039919e1d9c9e6d153edfb03bccff18b56686 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 17 Jul 2025 20:16:17 +0000 Subject: [PATCH 08/19] chore(internal): version bump From d83fe97aa2f77c84c3c7f4bf40b9fb94c5c28aca Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 17 Jul 2025 20:16:17 +0000 Subject: [PATCH 09/19] chore(internal): version bump From 8ac0f2a6d4862907243ba78b132373289e2c3543 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 17 Jul 2025 20:16:17 +0000 Subject: [PATCH 10/19] chore(internal): version bump From 52e2c23c23d4dc27c176ebf4783c8fbd86a4c07b Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 17 Jul 2025 20:16:17 +0000 Subject: [PATCH 11/19] chore(internal): version bump From 20220246634accf95c4a53df200db5ace7107c55 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 17 Jul 2025 20:16:17 +0000 Subject: [PATCH 12/19] chore(internal): version bump From bb3ad60d02fe01b937eaced64682fd66d95a9aec Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 17 Jul 2025 20:16:17 +0000 Subject: [PATCH 13/19] chore(internal): version bump From 881409847161671b798baf2c89f37ae29e195f29 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 17 Jul 2025 20:16:17 +0000 Subject: [PATCH 14/19] chore(internal): version bump From 9c546a1f97241bb448430e1e43f4e20589e243c1 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 17 Jul 2025 20:16:17 +0000 Subject: [PATCH 15/19] chore(internal): version bump From 1a661264f68580dff74c3f7d4891ab2661fde190 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 17 Jul 2025 22:28:28 +0000 Subject: [PATCH 16/19] chore(internal): version bump From d940e66107e00f351853c0bc667ca6ed3cf98605 Mon Sep 17 00:00:00 2001 From: meorphis Date: Thu, 17 Jul 2025 19:21:43 -0400 Subject: [PATCH 17/19] chore: format --- README.md | 5 +- src/gradientai/_client.py | 64 +++++-------------- .../resources/agents/chat/completions.py | 32 +++------- .../agents/chat/test_completions.py | 36 +++-------- 4 files changed, 36 insertions(+), 101 deletions(-) diff --git a/README.md b/README.md index 2c739c6d..67ca8b6c 100644 --- a/README.md +++ b/README.md @@ -32,7 +32,9 @@ api_client = GradientAI( api_key=os.environ.get("GRADIENTAI_API_KEY"), # This is the default and can be omitted ) inference_client = GradientAI( - inference_key=os.environ.get("GRADIENTAI_INFERENCE_KEY"), # This is the default and can be omitted + inference_key=os.environ.get( + "GRADIENTAI_INFERENCE_KEY" + ), # This is the default and can be omitted ) agent_client = GradientAI( agent_key=os.environ.get("GRADIENTAI_AGENT_KEY"), # This is the default and can be omitted @@ -51,7 +53,6 @@ completion = inference_client.chat.completions.create( ) print(completion.choices[0].message) - ``` While you can provide an `api_key`, `inference_key` keyword argument, diff --git a/src/gradientai/_client.py b/src/gradientai/_client.py index 43bcc2c4..53d2a4e4 100644 --- a/src/gradientai/_client.py +++ b/src/gradientai/_client.py @@ -294,9 +294,7 @@ def default_headers(self) -> dict[str, str | Omit]: @override def _validate_headers(self, headers: Headers, custom_headers: Headers) -> None: - if (self.api_key or self.agent_key or self.inference_key) and headers.get( - "Authorization" - ): + if (self.api_key or self.agent_key or self.inference_key) and headers.get("Authorization"): return if isinstance(custom_headers.get("Authorization"), Omit): return @@ -326,14 +324,10 @@ def copy( Create a new client instance re-using the same options given to the current client with optional overriding. """ if default_headers is not None and set_default_headers is not None: - raise ValueError( - "The `default_headers` and `set_default_headers` arguments are mutually exclusive" - ) + raise ValueError("The `default_headers` and `set_default_headers` arguments are mutually exclusive") if default_query is not None and set_default_query is not None: - raise ValueError( - "The `default_query` and `set_default_query` arguments are mutually exclusive" - ) + raise ValueError("The `default_query` and `set_default_query` arguments are mutually exclusive") headers = self._custom_headers if default_headers is not None: @@ -380,14 +374,10 @@ def _make_status_error( return _exceptions.BadRequestError(err_msg, response=response, body=body) if response.status_code == 401: - return _exceptions.AuthenticationError( - err_msg, response=response, body=body - ) + return _exceptions.AuthenticationError(err_msg, response=response, body=body) if response.status_code == 403: - return _exceptions.PermissionDeniedError( - err_msg, response=response, body=body - ) + return _exceptions.PermissionDeniedError(err_msg, response=response, body=body) if response.status_code == 404: return _exceptions.NotFoundError(err_msg, response=response, body=body) @@ -396,17 +386,13 @@ def _make_status_error( return _exceptions.ConflictError(err_msg, response=response, body=body) if response.status_code == 422: - return _exceptions.UnprocessableEntityError( - err_msg, response=response, body=body - ) + return _exceptions.UnprocessableEntityError(err_msg, response=response, body=body) if response.status_code == 429: return _exceptions.RateLimitError(err_msg, response=response, body=body) if response.status_code >= 500: - return _exceptions.InternalServerError( - err_msg, response=response, body=body - ) + return _exceptions.InternalServerError(err_msg, response=response, body=body) return APIStatusError(err_msg, response=response, body=body) @@ -618,9 +604,7 @@ def default_headers(self) -> dict[str, str | Omit]: @override def _validate_headers(self, headers: Headers, custom_headers: Headers) -> None: - if (self.api_key or self.agent_key or self.inference_key) and headers.get( - "Authorization" - ): + if (self.api_key or self.agent_key or self.inference_key) and headers.get("Authorization"): return if isinstance(custom_headers.get("Authorization"), Omit): return @@ -650,14 +634,10 @@ def copy( Create a new client instance re-using the same options given to the current client with optional overriding. """ if default_headers is not None and set_default_headers is not None: - raise ValueError( - "The `default_headers` and `set_default_headers` arguments are mutually exclusive" - ) + raise ValueError("The `default_headers` and `set_default_headers` arguments are mutually exclusive") if default_query is not None and set_default_query is not None: - raise ValueError( - "The `default_query` and `set_default_query` arguments are mutually exclusive" - ) + raise ValueError("The `default_query` and `set_default_query` arguments are mutually exclusive") headers = self._custom_headers if default_headers is not None: @@ -704,14 +684,10 @@ def _make_status_error( return _exceptions.BadRequestError(err_msg, response=response, body=body) if response.status_code == 401: - return _exceptions.AuthenticationError( - err_msg, response=response, body=body - ) + return _exceptions.AuthenticationError(err_msg, response=response, body=body) if response.status_code == 403: - return _exceptions.PermissionDeniedError( - err_msg, response=response, body=body - ) + return _exceptions.PermissionDeniedError(err_msg, response=response, body=body) if response.status_code == 404: return _exceptions.NotFoundError(err_msg, response=response, body=body) @@ -720,17 +696,13 @@ def _make_status_error( return _exceptions.ConflictError(err_msg, response=response, body=body) if response.status_code == 422: - return _exceptions.UnprocessableEntityError( - err_msg, response=response, body=body - ) + return _exceptions.UnprocessableEntityError(err_msg, response=response, body=body) if response.status_code == 429: return _exceptions.RateLimitError(err_msg, response=response, body=body) if response.status_code >= 500: - return _exceptions.InternalServerError( - err_msg, response=response, body=body - ) + return _exceptions.InternalServerError(err_msg, response=response, body=body) return APIStatusError(err_msg, response=response, body=body) @@ -1069,9 +1041,7 @@ def knowledge_bases( AsyncKnowledgeBasesResourceWithStreamingResponse, ) - return AsyncKnowledgeBasesResourceWithStreamingResponse( - self._client.knowledge_bases - ) + return AsyncKnowledgeBasesResourceWithStreamingResponse(self._client.knowledge_bases) @cached_property def inference(self) -> inference.AsyncInferenceResourceWithStreamingResponse: @@ -1121,9 +1091,7 @@ def load_balancers( AsyncLoadBalancersResourceWithStreamingResponse, ) - return AsyncLoadBalancersResourceWithStreamingResponse( - self._client.load_balancers - ) + return AsyncLoadBalancersResourceWithStreamingResponse(self._client.load_balancers) @cached_property def sizes(self) -> sizes.AsyncSizesResourceWithStreamingResponse: diff --git a/src/gradientai/resources/agents/chat/completions.py b/src/gradientai/resources/agents/chat/completions.py index 96a6d843..23b17011 100644 --- a/src/gradientai/resources/agents/chat/completions.py +++ b/src/gradientai/resources/agents/chat/completions.py @@ -62,9 +62,7 @@ def create( presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, - stream_options: ( - Optional[completion_create_params.StreamOptions] | NotGiven - ) = NOT_GIVEN, + stream_options: (Optional[completion_create_params.StreamOptions] | NotGiven) = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, @@ -192,9 +190,7 @@ def create( n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, - stream_options: ( - Optional[completion_create_params.StreamOptions] | NotGiven - ) = NOT_GIVEN, + stream_options: (Optional[completion_create_params.StreamOptions] | NotGiven) = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, @@ -322,9 +318,7 @@ def create( n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, - stream_options: ( - Optional[completion_create_params.StreamOptions] | NotGiven - ) = NOT_GIVEN, + stream_options: (Optional[completion_create_params.StreamOptions] | NotGiven) = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, @@ -455,9 +449,7 @@ def create( presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, - stream_options: ( - Optional[completion_create_params.StreamOptions] | NotGiven - ) = NOT_GIVEN, + stream_options: (Optional[completion_create_params.StreamOptions] | NotGiven) = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, @@ -561,9 +553,7 @@ async def create( presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, - stream_options: ( - Optional[completion_create_params.StreamOptions] | NotGiven - ) = NOT_GIVEN, + stream_options: (Optional[completion_create_params.StreamOptions] | NotGiven) = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, @@ -691,9 +681,7 @@ async def create( n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, - stream_options: ( - Optional[completion_create_params.StreamOptions] | NotGiven - ) = NOT_GIVEN, + stream_options: (Optional[completion_create_params.StreamOptions] | NotGiven) = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, @@ -821,9 +809,7 @@ async def create( n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, - stream_options: ( - Optional[completion_create_params.StreamOptions] | NotGiven - ) = NOT_GIVEN, + stream_options: (Optional[completion_create_params.StreamOptions] | NotGiven) = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, @@ -951,9 +937,7 @@ async def create( presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, - stream_options: ( - Optional[completion_create_params.StreamOptions] | NotGiven - ) = NOT_GIVEN, + stream_options: (Optional[completion_create_params.StreamOptions] | NotGiven) = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: completion_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[completion_create_params.Tool] | NotGiven = NOT_GIVEN, diff --git a/tests/api_resources/agents/chat/test_completions.py b/tests/api_resources/agents/chat/test_completions.py index 6533a423..06342867 100644 --- a/tests/api_resources/agents/chat/test_completions.py +++ b/tests/api_resources/agents/chat/test_completions.py @@ -15,9 +15,7 @@ class TestCompletions: - parametrize = pytest.mark.parametrize( - "client", [False, True], indirect=True, ids=["loose", "strict"] - ) + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) @pytest.mark.skip() @parametrize @@ -216,9 +214,7 @@ class TestAsyncCompletions: @pytest.mark.skip() @parametrize - async def test_method_create_overload_1( - self, async_client: AsyncGradientAI - ) -> None: + async def test_method_create_overload_1(self, async_client: AsyncGradientAI) -> None: completion = await async_client.agents.chat.completions.create( messages=[ { @@ -232,9 +228,7 @@ async def test_method_create_overload_1( @pytest.mark.skip() @parametrize - async def test_method_create_with_all_params_overload_1( - self, async_client: AsyncGradientAI - ) -> None: + async def test_method_create_with_all_params_overload_1(self, async_client: AsyncGradientAI) -> None: completion = await async_client.agents.chat.completions.create( messages=[ { @@ -274,9 +268,7 @@ async def test_method_create_with_all_params_overload_1( @pytest.mark.skip() @parametrize - async def test_raw_response_create_overload_1( - self, async_client: AsyncGradientAI - ) -> None: + async def test_raw_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.chat.completions.with_raw_response.create( messages=[ { @@ -294,9 +286,7 @@ async def test_raw_response_create_overload_1( @pytest.mark.skip() @parametrize - async def test_streaming_response_create_overload_1( - self, async_client: AsyncGradientAI - ) -> None: + async def test_streaming_response_create_overload_1(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.chat.completions.with_streaming_response.create( messages=[ { @@ -316,9 +306,7 @@ async def test_streaming_response_create_overload_1( @pytest.mark.skip() @parametrize - async def test_method_create_overload_2( - self, async_client: AsyncGradientAI - ) -> None: + async def test_method_create_overload_2(self, async_client: AsyncGradientAI) -> None: completion_stream = await async_client.agents.chat.completions.create( messages=[ { @@ -333,9 +321,7 @@ async def test_method_create_overload_2( @pytest.mark.skip() @parametrize - async def test_method_create_with_all_params_overload_2( - self, async_client: AsyncGradientAI - ) -> None: + async def test_method_create_with_all_params_overload_2(self, async_client: AsyncGradientAI) -> None: completion_stream = await async_client.agents.chat.completions.create( messages=[ { @@ -375,9 +361,7 @@ async def test_method_create_with_all_params_overload_2( @pytest.mark.skip() @parametrize - async def test_raw_response_create_overload_2( - self, async_client: AsyncGradientAI - ) -> None: + async def test_raw_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: response = await async_client.agents.chat.completions.with_raw_response.create( messages=[ { @@ -395,9 +379,7 @@ async def test_raw_response_create_overload_2( @pytest.mark.skip() @parametrize - async def test_streaming_response_create_overload_2( - self, async_client: AsyncGradientAI - ) -> None: + async def test_streaming_response_create_overload_2(self, async_client: AsyncGradientAI) -> None: async with async_client.agents.chat.completions.with_streaming_response.create( messages=[ { From b207e9a69ddf821522f5d9e9f10502850220585f Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 18 Jul 2025 15:29:04 +0000 Subject: [PATCH 18/19] feat(api): add gpu droplets --- .stats.yml | 2 +- CONTRIBUTING.md | 2 +- README.md | 49 +- api.md | 418 +++++++++--------- mypy.ini | 2 +- pyproject.toml | 8 +- release-please-config.json | 2 +- scripts/lint | 2 +- src/{gradientai => do_gradientai}/__init__.py | 4 +- .../_base_client.py | 2 +- src/{gradientai => do_gradientai}/_client.py | 228 ++++++---- src/{gradientai => do_gradientai}/_compat.py | 0 .../_constants.py | 0 .../_exceptions.py | 0 src/{gradientai => do_gradientai}/_files.py | 0 src/{gradientai => do_gradientai}/_models.py | 0 src/{gradientai => do_gradientai}/_qs.py | 0 .../_resource.py | 0 .../_response.py | 8 +- .../_streaming.py | 0 src/{gradientai => do_gradientai}/_types.py | 2 +- .../_utils/__init__.py | 0 .../_utils/_logs.py | 4 +- .../_utils/_proxy.py | 0 .../_utils/_reflection.py | 0 .../_utils/_resources_proxy.py | 8 +- .../_utils/_streams.py | 0 .../_utils/_sync.py | 0 .../_utils/_transform.py | 0 .../_utils/_typing.py | 0 .../_utils/_utils.py | 0 src/{gradientai => do_gradientai}/_version.py | 0 src/{gradientai => do_gradientai}/py.typed | 0 .../resources/__init__.py | 0 .../resources/agents/__init__.py | 0 .../resources/agents/agents.py | 0 .../resources/agents/api_keys.py | 0 .../resources/agents/chat/__init__.py | 0 .../resources/agents/chat/chat.py | 0 .../resources/agents/chat/completions.py | 0 .../resources/agents/evaluation_datasets.py | 0 .../agents/evaluation_metrics/__init__.py | 0 .../evaluation_metrics/evaluation_metrics.py | 0 .../agents/evaluation_metrics/models.py | 0 .../evaluation_metrics/workspaces/__init__.py | 0 .../evaluation_metrics/workspaces/agents.py | 0 .../workspaces/workspaces.py | 0 .../resources/agents/evaluation_runs.py | 0 .../resources/agents/evaluation_test_cases.py | 0 .../resources/agents/functions.py | 0 .../resources/agents/knowledge_bases.py | 0 .../resources/agents/routes.py | 0 .../resources/agents/versions.py | 0 .../resources/chat/__init__.py | 0 .../resources/chat/chat.py | 0 .../resources/chat/completions.py | 0 .../resources/gpu_droplets/__init__.py | 0 .../gpu_droplets/account/__init__.py | 0 .../resources/gpu_droplets/account/account.py | 0 .../resources/gpu_droplets/account/keys.py | 0 .../resources/gpu_droplets/actions.py | 0 .../resources/gpu_droplets/autoscale.py | 0 .../resources/gpu_droplets/backups.py | 0 .../destroy_with_associated_resources.py | 0 .../gpu_droplets/firewalls/__init__.py | 0 .../gpu_droplets/firewalls/droplets.py | 0 .../gpu_droplets/firewalls/firewalls.py | 0 .../resources/gpu_droplets/firewalls/rules.py | 0 .../resources/gpu_droplets/firewalls/tags.py | 0 .../gpu_droplets/floating_ips/__init__.py | 0 .../gpu_droplets/floating_ips/actions.py | 0 .../gpu_droplets/floating_ips/floating_ips.py | 0 .../resources/gpu_droplets/gpu_droplets.py | 0 .../resources/gpu_droplets/images/__init__.py | 0 .../resources/gpu_droplets/images/actions.py | 0 .../resources/gpu_droplets/images/images.py | 0 .../gpu_droplets/load_balancers/__init__.py | 0 .../gpu_droplets/load_balancers/droplets.py | 0 .../load_balancers/forwarding_rules.py | 0 .../load_balancers/load_balancers.py | 0 .../resources/gpu_droplets/sizes.py | 0 .../resources/gpu_droplets/snapshots.py | 0 .../gpu_droplets/volumes/__init__.py | 0 .../resources/gpu_droplets/volumes/actions.py | 0 .../gpu_droplets/volumes/snapshots.py | 0 .../resources/gpu_droplets/volumes/volumes.py | 0 .../resources/inference/__init__.py | 0 .../resources/inference/api_keys.py | 0 .../resources/inference/inference.py | 0 .../resources/knowledge_bases/__init__.py | 0 .../resources/knowledge_bases/data_sources.py | 0 .../knowledge_bases/indexing_jobs.py | 0 .../knowledge_bases/knowledge_bases.py | 0 .../resources/models/__init__.py | 0 .../resources/models/models.py | 0 .../resources/models/providers/__init__.py | 0 .../resources/models/providers/anthropic.py | 0 .../resources/models/providers/openai.py | 0 .../resources/models/providers/providers.py | 0 .../resources/regions.py | 0 .../types/__init__.py | 0 .../types/agent_create_params.py | 0 .../types/agent_create_response.py | 0 .../types/agent_delete_response.py | 0 .../types/agent_list_params.py | 0 .../types/agent_list_response.py | 0 .../types/agent_retrieve_response.py | 0 .../types/agent_update_params.py | 0 .../types/agent_update_response.py | 0 .../types/agent_update_status_params.py | 0 .../types/agent_update_status_response.py | 0 .../types/agents/__init__.py | 0 .../types/agents/api_evaluation_metric.py | 0 .../agents/api_evaluation_metric_result.py | 0 .../types/agents/api_evaluation_prompt.py | 0 .../types/agents/api_evaluation_run.py | 0 .../types/agents/api_evaluation_test_case.py | 0 .../types/agents/api_key_create_params.py | 0 .../types/agents/api_key_create_response.py | 0 .../types/agents/api_key_delete_response.py | 0 .../types/agents/api_key_list_params.py | 0 .../types/agents/api_key_list_response.py | 0 .../agents/api_key_regenerate_response.py | 0 .../types/agents/api_key_update_params.py | 0 .../types/agents/api_key_update_response.py | 0 .../agents/api_link_knowledge_base_output.py | 0 .../types/agents/api_star_metric.py | 0 .../types/agents/api_star_metric_param.py | 0 .../types/agents/chat/__init__.py | 0 .../agents/chat/completion_create_params.py | 0 .../agents/chat/completion_create_response.py | 0 ...reate_file_upload_presigned_urls_params.py | 0 ...ate_file_upload_presigned_urls_response.py | 0 .../evaluation_dataset_create_params.py | 0 .../evaluation_dataset_create_response.py | 0 .../evaluation_metric_list_regions_params.py | 0 ...evaluation_metric_list_regions_response.py | 0 .../agents/evaluation_metric_list_response.py | 0 .../agents/evaluation_metrics/__init__.py | 0 .../evaluation_metrics/model_list_params.py | 0 .../evaluation_metrics/model_list_response.py | 0 .../workspace_create_params.py | 0 .../workspace_create_response.py | 0 .../workspace_delete_response.py | 0 ...ace_list_evaluation_test_cases_response.py | 0 .../workspace_list_response.py | 0 .../workspace_retrieve_response.py | 0 .../workspace_update_params.py | 0 .../workspace_update_response.py | 0 .../evaluation_metrics/workspaces/__init__.py | 0 .../workspaces/agent_list_params.py | 0 .../workspaces/agent_list_response.py | 0 .../workspaces/agent_move_params.py | 0 .../workspaces/agent_move_response.py | 0 .../agents/evaluation_run_create_params.py | 0 .../agents/evaluation_run_create_response.py | 0 .../evaluation_run_list_results_params.py | 0 .../evaluation_run_list_results_response.py | 0 .../evaluation_run_retrieve_response.py | 0 ...valuation_run_retrieve_results_response.py | 0 .../evaluation_test_case_create_params.py | 0 .../evaluation_test_case_create_response.py | 0 ...n_test_case_list_evaluation_runs_params.py | 0 ...test_case_list_evaluation_runs_response.py | 0 .../evaluation_test_case_list_response.py | 0 .../evaluation_test_case_retrieve_params.py | 0 .../evaluation_test_case_retrieve_response.py | 0 .../evaluation_test_case_update_params.py | 0 .../evaluation_test_case_update_response.py | 0 .../types/agents/function_create_params.py | 0 .../types/agents/function_create_response.py | 0 .../types/agents/function_delete_response.py | 0 .../types/agents/function_update_params.py | 0 .../types/agents/function_update_response.py | 0 .../agents/knowledge_base_detach_response.py | 0 .../types/agents/route_add_params.py | 0 .../types/agents/route_add_response.py | 0 .../types/agents/route_delete_response.py | 0 .../types/agents/route_update_params.py | 0 .../types/agents/route_update_response.py | 0 .../types/agents/route_view_response.py | 0 .../types/agents/version_list_params.py | 0 .../types/agents/version_list_response.py | 0 .../types/agents/version_update_params.py | 0 .../types/agents/version_update_response.py | 0 .../types/api_agent.py | 0 .../types/api_agent_api_key_info.py | 0 .../types/api_agent_model.py | 0 .../types/api_agreement.py | 0 .../types/api_anthropic_api_key_info.py | 0 .../types/api_deployment_visibility.py | 0 .../types/api_knowledge_base.py | 0 .../types/api_model.py | 0 .../types/api_model_version.py | 0 .../types/api_openai_api_key_info.py | 0 .../types/api_retrieval_method.py | 0 .../types/api_workspace.py | 0 .../types/chat/__init__.py | 0 .../types/chat/completion_create_params.py | 0 .../types/chat/completion_create_response.py | 0 .../types/droplet_backup_policy.py | 0 .../types/droplet_backup_policy_param.py | 0 .../types/gpu_droplet_create_params.py | 0 .../types/gpu_droplet_create_response.py | 0 .../types/gpu_droplet_delete_by_tag_params.py | 0 .../gpu_droplet_list_firewalls_params.py | 0 .../gpu_droplet_list_firewalls_response.py | 0 .../types/gpu_droplet_list_kernels_params.py | 0 .../gpu_droplet_list_kernels_response.py | 0 .../gpu_droplet_list_neighbors_response.py | 0 .../types/gpu_droplet_list_params.py | 0 .../types/gpu_droplet_list_response.py | 0 .../gpu_droplet_list_snapshots_params.py | 0 .../gpu_droplet_list_snapshots_response.py | 0 .../types/gpu_droplet_retrieve_response.py | 0 .../types/gpu_droplets/__init__.py | 0 .../types/gpu_droplets/account/__init__.py | 0 .../gpu_droplets/account/key_create_params.py | 0 .../account/key_create_response.py | 0 .../gpu_droplets/account/key_list_params.py | 0 .../gpu_droplets/account/key_list_response.py | 0 .../account/key_retrieve_response.py | 0 .../gpu_droplets/account/key_update_params.py | 0 .../account/key_update_response.py | 0 .../action_bulk_initiate_params.py | 0 .../action_bulk_initiate_response.py | 0 .../gpu_droplets/action_initiate_params.py | 0 .../gpu_droplets/action_initiate_response.py | 0 .../types/gpu_droplets/action_list_params.py | 0 .../gpu_droplets/action_list_response.py | 0 .../gpu_droplets/action_retrieve_response.py | 0 .../types/gpu_droplets/associated_resource.py | 0 .../gpu_droplets/autoscale_create_params.py | 0 .../gpu_droplets/autoscale_create_response.py | 0 .../autoscale_list_history_params.py | 0 .../autoscale_list_history_response.py | 0 .../autoscale_list_members_params.py | 0 .../autoscale_list_members_response.py | 0 .../gpu_droplets/autoscale_list_params.py | 0 .../gpu_droplets/autoscale_list_response.py | 0 .../types/gpu_droplets/autoscale_pool.py | 0 .../autoscale_pool_droplet_template.py | 0 .../autoscale_pool_droplet_template_param.py | 0 .../autoscale_pool_dynamic_config.py | 0 .../autoscale_pool_dynamic_config_param.py | 0 .../autoscale_pool_static_config.py | 0 .../autoscale_pool_static_config_param.py | 0 .../autoscale_retrieve_response.py | 0 .../gpu_droplets/autoscale_update_params.py | 0 .../gpu_droplets/autoscale_update_response.py | 0 .../types/gpu_droplets/backup_list_params.py | 0 .../backup_list_policies_params.py | 0 .../backup_list_policies_response.py | 0 .../gpu_droplets/backup_list_response.py | 0 ...backup_list_supported_policies_response.py | 0 .../backup_retrieve_policy_response.py | 0 .../types/gpu_droplets/current_utilization.py | 0 ...sociated_resource_check_status_response.py | 0 ...ciated_resource_delete_selective_params.py | 0 ..._with_associated_resource_list_response.py | 0 .../destroyed_associated_resource.py | 0 .../types/gpu_droplets/domains.py | 0 .../types/gpu_droplets/domains_param.py | 0 .../types/gpu_droplets/firewall.py | 0 .../gpu_droplets/firewall_create_params.py | 0 .../gpu_droplets/firewall_create_response.py | 0 .../gpu_droplets/firewall_list_params.py | 0 .../gpu_droplets/firewall_list_response.py | 0 .../types/gpu_droplets/firewall_param.py | 0 .../firewall_retrieve_response.py | 0 .../gpu_droplets/firewall_update_params.py | 0 .../gpu_droplets/firewall_update_response.py | 0 .../types/gpu_droplets/firewalls/__init__.py | 0 .../firewalls/droplet_add_params.py | 0 .../firewalls/droplet_remove_params.py | 0 .../gpu_droplets/firewalls/rule_add_params.py | 0 .../firewalls/rule_remove_params.py | 0 .../gpu_droplets/firewalls/tag_add_params.py | 0 .../firewalls/tag_remove_params.py | 0 .../types/gpu_droplets/floating_ip.py | 0 .../gpu_droplets/floating_ip_create_params.py | 0 .../floating_ip_create_response.py | 0 .../gpu_droplets/floating_ip_list_params.py | 0 .../gpu_droplets/floating_ip_list_response.py | 0 .../floating_ip_retrieve_response.py | 0 .../gpu_droplets/floating_ips/__init__.py | 0 .../floating_ips/action_create_params.py | 0 .../floating_ips/action_create_response.py | 0 .../floating_ips/action_list_response.py | 0 .../floating_ips/action_retrieve_response.py | 0 .../types/gpu_droplets/forwarding_rule.py | 0 .../gpu_droplets/forwarding_rule_param.py | 0 .../types/gpu_droplets/glb_settings.py | 0 .../types/gpu_droplets/glb_settings_param.py | 0 .../types/gpu_droplets/health_check.py | 0 .../types/gpu_droplets/health_check_param.py | 0 .../types/gpu_droplets/image_create_params.py | 0 .../gpu_droplets/image_create_response.py | 0 .../types/gpu_droplets/image_list_params.py | 0 .../types/gpu_droplets/image_list_response.py | 0 .../gpu_droplets/image_retrieve_response.py | 0 .../types/gpu_droplets/image_update_params.py | 0 .../gpu_droplets/image_update_response.py | 0 .../types/gpu_droplets/images/__init__.py | 0 .../images/action_create_params.py | 0 .../images/action_list_response.py | 0 .../types/gpu_droplets/lb_firewall.py | 0 .../types/gpu_droplets/lb_firewall_param.py | 0 .../types/gpu_droplets/load_balancer.py | 0 .../load_balancer_create_params.py | 0 .../load_balancer_create_response.py | 0 .../gpu_droplets/load_balancer_list_params.py | 0 .../load_balancer_list_response.py | 0 .../load_balancer_retrieve_response.py | 0 .../load_balancer_update_params.py | 0 .../load_balancer_update_response.py | 0 .../gpu_droplets/load_balancers/__init__.py | 0 .../load_balancers/droplet_add_params.py | 0 .../load_balancers/droplet_remove_params.py | 0 .../forwarding_rule_add_params.py | 0 .../forwarding_rule_remove_params.py | 0 .../types/gpu_droplets/size_list_params.py | 0 .../types/gpu_droplets/size_list_response.py | 0 .../gpu_droplets/snapshot_list_params.py | 0 .../gpu_droplets/snapshot_list_response.py | 0 .../snapshot_retrieve_response.py | 0 .../types/gpu_droplets/sticky_sessions.py | 0 .../gpu_droplets/sticky_sessions_param.py | 0 .../gpu_droplets/volume_create_params.py | 0 .../gpu_droplets/volume_create_response.py | 0 .../volume_delete_by_name_params.py | 0 .../types/gpu_droplets/volume_list_params.py | 0 .../gpu_droplets/volume_list_response.py | 0 .../gpu_droplets/volume_retrieve_response.py | 0 .../types/gpu_droplets/volumes/__init__.py | 0 .../volumes/action_initiate_by_id_params.py | 0 .../volumes/action_initiate_by_id_response.py | 0 .../volumes/action_initiate_by_name_params.py | 0 .../action_initiate_by_name_response.py | 0 .../volumes/action_list_params.py | 0 .../volumes/action_list_response.py | 0 .../volumes/action_retrieve_params.py | 0 .../volumes/action_retrieve_response.py | 0 .../volumes/snapshot_create_params.py | 0 .../volumes/snapshot_create_response.py | 0 .../volumes/snapshot_list_params.py | 0 .../volumes/snapshot_list_response.py | 0 .../volumes/snapshot_retrieve_response.py | 0 .../gpu_droplets/volumes/volume_action.py | 0 .../types/inference/__init__.py | 0 .../types/inference/api_key_create_params.py | 0 .../inference/api_key_create_response.py | 0 .../inference/api_key_delete_response.py | 0 .../types/inference/api_key_list_params.py | 0 .../types/inference/api_key_list_response.py | 0 .../types/inference/api_key_update_params.py | 0 .../api_key_update_regenerate_response.py | 0 .../inference/api_key_update_response.py | 0 .../types/inference/api_model_api_key_info.py | 0 .../types/knowledge_base_create_params.py | 0 .../types/knowledge_base_create_response.py | 0 .../types/knowledge_base_delete_response.py | 0 .../types/knowledge_base_list_params.py | 0 .../types/knowledge_base_list_response.py | 0 .../types/knowledge_base_retrieve_response.py | 0 .../types/knowledge_base_update_params.py | 0 .../types/knowledge_base_update_response.py | 0 .../types/knowledge_bases/__init__.py | 0 .../api_file_upload_data_source.py | 0 .../api_file_upload_data_source_param.py | 0 .../api_indexed_data_source.py | 0 .../types/knowledge_bases/api_indexing_job.py | 0 .../api_knowledge_base_data_source.py | 0 .../knowledge_bases/api_spaces_data_source.py | 0 .../api_spaces_data_source_param.py | 0 .../api_web_crawler_data_source.py | 0 .../api_web_crawler_data_source_param.py | 0 .../knowledge_bases/aws_data_source_param.py | 0 .../data_source_create_params.py | 0 .../data_source_create_response.py | 0 .../data_source_delete_response.py | 0 .../data_source_list_params.py | 0 .../data_source_list_response.py | 0 .../indexing_job_create_params.py | 0 .../indexing_job_create_response.py | 0 .../indexing_job_list_params.py | 0 .../indexing_job_list_response.py | 0 ...xing_job_retrieve_data_sources_response.py | 0 .../indexing_job_retrieve_response.py | 0 .../indexing_job_update_cancel_params.py | 0 .../indexing_job_update_cancel_response.py | 0 .../types/model_list_response.py | 0 .../types/model_retrieve_response.py | 0 .../types/models/__init__.py | 0 .../types/models/providers/__init__.py | 0 .../providers/anthropic_create_params.py | 0 .../providers/anthropic_create_response.py | 0 .../providers/anthropic_delete_response.py | 0 .../providers/anthropic_list_agents_params.py | 0 .../anthropic_list_agents_response.py | 0 .../models/providers/anthropic_list_params.py | 0 .../providers/anthropic_list_response.py | 0 .../providers/anthropic_retrieve_response.py | 0 .../providers/anthropic_update_params.py | 0 .../providers/anthropic_update_response.py | 0 .../models/providers/openai_create_params.py | 0 .../providers/openai_create_response.py | 0 .../providers/openai_delete_response.py | 0 .../models/providers/openai_list_params.py | 0 .../models/providers/openai_list_response.py | 0 .../openai_retrieve_agents_params.py | 0 .../openai_retrieve_agents_response.py | 0 .../providers/openai_retrieve_response.py | 0 .../models/providers/openai_update_params.py | 0 .../providers/openai_update_response.py | 0 .../types/region_list_params.py | 0 .../types/region_list_response.py | 0 .../types/shared/__init__.py | 0 .../types/shared/action.py | 0 .../types/shared/action_link.py | 0 .../types/shared/api_links.py | 0 .../types/shared/api_meta.py | 0 .../types/shared/backward_links.py | 0 .../types/shared/chat_completion_chunk.py | 0 .../shared/chat_completion_token_logprob.py | 0 .../types/shared/completion_usage.py | 0 .../types/shared/disk_info.py | 0 .../types/shared/droplet.py | 0 .../shared/droplet_next_backup_window.py | 0 .../types/shared/firewall_rule_target.py | 0 .../types/shared/forward_links.py | 0 .../types/shared/garbage_collection.py | 0 .../types/shared/gpu_info.py | 0 .../types/shared/image.py | 0 .../types/shared/kernel.py | 0 .../types/shared/meta_properties.py | 0 .../types/shared/network_v4.py | 0 .../types/shared/network_v6.py | 0 .../types/shared/page_links.py | 0 .../types/shared/region.py | 0 .../types/shared/size.py | 0 .../types/shared/snapshots.py | 0 .../types/shared/subscription.py | 0 .../types/shared/subscription_tier_base.py | 0 .../types/shared/vpc_peering.py | 0 .../types/shared_params/__init__.py | 0 .../shared_params/firewall_rule_target.py | 0 .../agents/chat/test_completions.py | 4 +- .../agents/evaluation_metrics/test_models.py | 4 +- .../evaluation_metrics/test_workspaces.py | 4 +- .../workspaces/test_agents.py | 4 +- tests/api_resources/agents/test_api_keys.py | 4 +- .../agents/test_evaluation_datasets.py | 4 +- .../agents/test_evaluation_metrics.py | 4 +- .../agents/test_evaluation_runs.py | 4 +- .../agents/test_evaluation_test_cases.py | 4 +- tests/api_resources/agents/test_functions.py | 4 +- .../agents/test_knowledge_bases.py | 4 +- tests/api_resources/agents/test_routes.py | 4 +- tests/api_resources/agents/test_versions.py | 4 +- tests/api_resources/chat/test_completions.py | 4 +- .../gpu_droplets/account/test_keys.py | 4 +- .../gpu_droplets/firewalls/test_droplets.py | 2 +- .../gpu_droplets/firewalls/test_rules.py | 2 +- .../gpu_droplets/firewalls/test_tags.py | 2 +- .../gpu_droplets/floating_ips/test_actions.py | 4 +- .../gpu_droplets/images/test_actions.py | 6 +- .../load_balancers/test_droplets.py | 2 +- .../load_balancers/test_forwarding_rules.py | 2 +- .../gpu_droplets/test_actions.py | 4 +- .../gpu_droplets/test_autoscale.py | 4 +- .../gpu_droplets/test_backups.py | 4 +- .../test_destroy_with_associated_resources.py | 4 +- .../gpu_droplets/test_firewalls.py | 4 +- .../gpu_droplets/test_floating_ips.py | 4 +- .../api_resources/gpu_droplets/test_images.py | 4 +- .../gpu_droplets/test_load_balancers.py | 4 +- .../api_resources/gpu_droplets/test_sizes.py | 4 +- .../gpu_droplets/test_snapshots.py | 4 +- .../gpu_droplets/test_volumes.py | 4 +- .../gpu_droplets/volumes/test_actions.py | 4 +- .../gpu_droplets/volumes/test_snapshots.py | 4 +- .../api_resources/inference/test_api_keys.py | 4 +- .../knowledge_bases/test_data_sources.py | 4 +- .../knowledge_bases/test_indexing_jobs.py | 4 +- .../models/providers/test_anthropic.py | 4 +- .../models/providers/test_openai.py | 4 +- tests/api_resources/test_agents.py | 4 +- tests/api_resources/test_gpu_droplets.py | 4 +- tests/api_resources/test_knowledge_bases.py | 4 +- tests/api_resources/test_models.py | 4 +- tests/api_resources/test_regions.py | 4 +- tests/conftest.py | 6 +- tests/test_client.py | 48 +- tests/test_deepcopy.py | 2 +- tests/test_extract_files.py | 4 +- tests/test_files.py | 2 +- tests/test_models.py | 6 +- tests/test_qs.py | 2 +- tests/test_required_args.py | 2 +- tests/test_response.py | 14 +- tests/test_streaming.py | 4 +- tests/test_transform.py | 8 +- tests/test_utils/test_proxy.py | 2 +- tests/test_utils/test_typing.py | 2 +- tests/utils.py | 8 +- 506 files changed, 539 insertions(+), 484 deletions(-) rename src/{gradientai => do_gradientai}/__init__.py (95%) rename src/{gradientai => do_gradientai}/_base_client.py (99%) rename src/{gradientai => do_gradientai}/_client.py (84%) rename src/{gradientai => do_gradientai}/_compat.py (100%) rename src/{gradientai => do_gradientai}/_constants.py (100%) rename src/{gradientai => do_gradientai}/_exceptions.py (100%) rename src/{gradientai => do_gradientai}/_files.py (100%) rename src/{gradientai => do_gradientai}/_models.py (100%) rename src/{gradientai => do_gradientai}/_qs.py (100%) rename src/{gradientai => do_gradientai}/_resource.py (100%) rename src/{gradientai => do_gradientai}/_response.py (99%) rename src/{gradientai => do_gradientai}/_streaming.py (100%) rename src/{gradientai => do_gradientai}/_types.py (99%) rename src/{gradientai => do_gradientai}/_utils/__init__.py (100%) rename src/{gradientai => do_gradientai}/_utils/_logs.py (75%) rename src/{gradientai => do_gradientai}/_utils/_proxy.py (100%) rename src/{gradientai => do_gradientai}/_utils/_reflection.py (100%) rename src/{gradientai => do_gradientai}/_utils/_resources_proxy.py (50%) rename src/{gradientai => do_gradientai}/_utils/_streams.py (100%) rename src/{gradientai => do_gradientai}/_utils/_sync.py (100%) rename src/{gradientai => do_gradientai}/_utils/_transform.py (100%) rename src/{gradientai => do_gradientai}/_utils/_typing.py (100%) rename src/{gradientai => do_gradientai}/_utils/_utils.py (100%) rename src/{gradientai => do_gradientai}/_version.py (100%) rename src/{gradientai => do_gradientai}/py.typed (100%) rename src/{gradientai => do_gradientai}/resources/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/agents.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/api_keys.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/chat/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/chat/chat.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/chat/completions.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/evaluation_datasets.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/evaluation_metrics/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/evaluation_metrics/evaluation_metrics.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/evaluation_metrics/models.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/evaluation_metrics/workspaces/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/evaluation_metrics/workspaces/agents.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/evaluation_metrics/workspaces/workspaces.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/evaluation_runs.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/evaluation_test_cases.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/functions.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/knowledge_bases.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/routes.py (100%) rename src/{gradientai => do_gradientai}/resources/agents/versions.py (100%) rename src/{gradientai => do_gradientai}/resources/chat/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/chat/chat.py (100%) rename src/{gradientai => do_gradientai}/resources/chat/completions.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/account/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/account/account.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/account/keys.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/actions.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/autoscale.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/backups.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/destroy_with_associated_resources.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/firewalls/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/firewalls/droplets.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/firewalls/firewalls.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/firewalls/rules.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/firewalls/tags.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/floating_ips/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/floating_ips/actions.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/floating_ips/floating_ips.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/gpu_droplets.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/images/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/images/actions.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/images/images.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/load_balancers/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/load_balancers/droplets.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/load_balancers/forwarding_rules.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/load_balancers/load_balancers.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/sizes.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/snapshots.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/volumes/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/volumes/actions.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/volumes/snapshots.py (100%) rename src/{gradientai => do_gradientai}/resources/gpu_droplets/volumes/volumes.py (100%) rename src/{gradientai => do_gradientai}/resources/inference/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/inference/api_keys.py (100%) rename src/{gradientai => do_gradientai}/resources/inference/inference.py (100%) rename src/{gradientai => do_gradientai}/resources/knowledge_bases/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/knowledge_bases/data_sources.py (100%) rename src/{gradientai => do_gradientai}/resources/knowledge_bases/indexing_jobs.py (100%) rename src/{gradientai => do_gradientai}/resources/knowledge_bases/knowledge_bases.py (100%) rename src/{gradientai => do_gradientai}/resources/models/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/models/models.py (100%) rename src/{gradientai => do_gradientai}/resources/models/providers/__init__.py (100%) rename src/{gradientai => do_gradientai}/resources/models/providers/anthropic.py (100%) rename src/{gradientai => do_gradientai}/resources/models/providers/openai.py (100%) rename src/{gradientai => do_gradientai}/resources/models/providers/providers.py (100%) rename src/{gradientai => do_gradientai}/resources/regions.py (100%) rename src/{gradientai => do_gradientai}/types/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/agent_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/agent_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/agent_delete_response.py (100%) rename src/{gradientai => do_gradientai}/types/agent_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/agent_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/agent_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/agent_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/agent_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/agent_update_status_params.py (100%) rename src/{gradientai => do_gradientai}/types/agent_update_status_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_evaluation_metric.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_evaluation_metric_result.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_evaluation_prompt.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_evaluation_run.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_evaluation_test_case.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_key_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_key_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_key_delete_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_key_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_key_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_key_regenerate_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_key_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_key_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_link_knowledge_base_output.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_star_metric.py (100%) rename src/{gradientai => do_gradientai}/types/agents/api_star_metric_param.py (100%) rename src/{gradientai => do_gradientai}/types/agents/chat/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/agents/chat/completion_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/chat/completion_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_dataset_create_file_upload_presigned_urls_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_dataset_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_dataset_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metric_list_regions_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metric_list_regions_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metric_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metrics/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metrics/model_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metrics/model_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metrics/workspace_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metrics/workspace_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metrics/workspace_delete_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metrics/workspace_list_evaluation_test_cases_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metrics/workspace_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metrics/workspace_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metrics/workspace_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metrics/workspace_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metrics/workspaces/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metrics/workspaces/agent_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metrics/workspaces/agent_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metrics/workspaces/agent_move_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_metrics/workspaces/agent_move_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_run_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_run_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_run_list_results_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_run_list_results_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_run_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_run_retrieve_results_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_test_case_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_test_case_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_test_case_list_evaluation_runs_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_test_case_list_evaluation_runs_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_test_case_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_test_case_retrieve_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_test_case_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_test_case_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/evaluation_test_case_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/function_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/function_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/function_delete_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/function_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/function_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/knowledge_base_detach_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/route_add_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/route_add_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/route_delete_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/route_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/route_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/route_view_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/version_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/version_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/agents/version_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/agents/version_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/api_agent.py (100%) rename src/{gradientai => do_gradientai}/types/api_agent_api_key_info.py (100%) rename src/{gradientai => do_gradientai}/types/api_agent_model.py (100%) rename src/{gradientai => do_gradientai}/types/api_agreement.py (100%) rename src/{gradientai => do_gradientai}/types/api_anthropic_api_key_info.py (100%) rename src/{gradientai => do_gradientai}/types/api_deployment_visibility.py (100%) rename src/{gradientai => do_gradientai}/types/api_knowledge_base.py (100%) rename src/{gradientai => do_gradientai}/types/api_model.py (100%) rename src/{gradientai => do_gradientai}/types/api_model_version.py (100%) rename src/{gradientai => do_gradientai}/types/api_openai_api_key_info.py (100%) rename src/{gradientai => do_gradientai}/types/api_retrieval_method.py (100%) rename src/{gradientai => do_gradientai}/types/api_workspace.py (100%) rename src/{gradientai => do_gradientai}/types/chat/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/chat/completion_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/chat/completion_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/droplet_backup_policy.py (100%) rename src/{gradientai => do_gradientai}/types/droplet_backup_policy_param.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplet_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplet_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplet_delete_by_tag_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplet_list_firewalls_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplet_list_firewalls_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplet_list_kernels_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplet_list_kernels_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplet_list_neighbors_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplet_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplet_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplet_list_snapshots_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplet_list_snapshots_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplet_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/account/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/account/key_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/account/key_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/account/key_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/account/key_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/account/key_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/account/key_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/account/key_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/action_bulk_initiate_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/action_bulk_initiate_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/action_initiate_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/action_initiate_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/action_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/action_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/action_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/associated_resource.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_list_history_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_list_history_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_list_members_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_list_members_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_pool.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_pool_droplet_template.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_pool_droplet_template_param.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_pool_dynamic_config.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_pool_dynamic_config_param.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_pool_static_config.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_pool_static_config_param.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/autoscale_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/backup_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/backup_list_policies_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/backup_list_policies_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/backup_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/backup_list_supported_policies_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/backup_retrieve_policy_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/current_utilization.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/destroy_with_associated_resource_check_status_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/destroy_with_associated_resource_delete_selective_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/destroy_with_associated_resource_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/destroyed_associated_resource.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/domains.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/domains_param.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/firewall.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/firewall_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/firewall_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/firewall_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/firewall_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/firewall_param.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/firewall_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/firewall_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/firewall_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/firewalls/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/firewalls/droplet_add_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/firewalls/droplet_remove_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/firewalls/rule_add_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/firewalls/rule_remove_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/firewalls/tag_add_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/firewalls/tag_remove_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/floating_ip.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/floating_ip_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/floating_ip_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/floating_ip_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/floating_ip_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/floating_ip_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/floating_ips/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/floating_ips/action_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/floating_ips/action_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/floating_ips/action_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/floating_ips/action_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/forwarding_rule.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/forwarding_rule_param.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/glb_settings.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/glb_settings_param.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/health_check.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/health_check_param.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/image_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/image_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/image_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/image_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/image_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/image_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/image_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/images/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/images/action_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/images/action_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/lb_firewall.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/lb_firewall_param.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/load_balancer.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/load_balancer_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/load_balancer_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/load_balancer_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/load_balancer_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/load_balancer_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/load_balancer_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/load_balancer_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/load_balancers/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/load_balancers/droplet_add_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/load_balancers/droplet_remove_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/load_balancers/forwarding_rule_add_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/load_balancers/forwarding_rule_remove_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/size_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/size_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/snapshot_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/snapshot_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/snapshot_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/sticky_sessions.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/sticky_sessions_param.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volume_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volume_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volume_delete_by_name_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volume_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volume_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volume_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volumes/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volumes/action_initiate_by_id_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volumes/action_initiate_by_id_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volumes/action_initiate_by_name_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volumes/action_initiate_by_name_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volumes/action_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volumes/action_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volumes/action_retrieve_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volumes/action_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volumes/snapshot_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volumes/snapshot_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volumes/snapshot_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volumes/snapshot_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volumes/snapshot_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/gpu_droplets/volumes/volume_action.py (100%) rename src/{gradientai => do_gradientai}/types/inference/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/inference/api_key_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/inference/api_key_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/inference/api_key_delete_response.py (100%) rename src/{gradientai => do_gradientai}/types/inference/api_key_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/inference/api_key_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/inference/api_key_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/inference/api_key_update_regenerate_response.py (100%) rename src/{gradientai => do_gradientai}/types/inference/api_key_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/inference/api_model_api_key_info.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_base_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_base_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_base_delete_response.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_base_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_base_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_base_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_base_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_base_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/api_file_upload_data_source.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/api_file_upload_data_source_param.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/api_indexed_data_source.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/api_indexing_job.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/api_knowledge_base_data_source.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/api_spaces_data_source.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/api_spaces_data_source_param.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/api_web_crawler_data_source.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/api_web_crawler_data_source_param.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/aws_data_source_param.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/data_source_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/data_source_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/data_source_delete_response.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/data_source_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/data_source_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/indexing_job_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/indexing_job_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/indexing_job_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/indexing_job_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/indexing_job_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/indexing_job_update_cancel_params.py (100%) rename src/{gradientai => do_gradientai}/types/knowledge_bases/indexing_job_update_cancel_response.py (100%) rename src/{gradientai => do_gradientai}/types/model_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/model_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/models/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/anthropic_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/anthropic_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/anthropic_delete_response.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/anthropic_list_agents_params.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/anthropic_list_agents_response.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/anthropic_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/anthropic_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/anthropic_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/anthropic_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/anthropic_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/openai_create_params.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/openai_create_response.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/openai_delete_response.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/openai_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/openai_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/openai_retrieve_agents_params.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/openai_retrieve_agents_response.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/openai_retrieve_response.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/openai_update_params.py (100%) rename src/{gradientai => do_gradientai}/types/models/providers/openai_update_response.py (100%) rename src/{gradientai => do_gradientai}/types/region_list_params.py (100%) rename src/{gradientai => do_gradientai}/types/region_list_response.py (100%) rename src/{gradientai => do_gradientai}/types/shared/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/shared/action.py (100%) rename src/{gradientai => do_gradientai}/types/shared/action_link.py (100%) rename src/{gradientai => do_gradientai}/types/shared/api_links.py (100%) rename src/{gradientai => do_gradientai}/types/shared/api_meta.py (100%) rename src/{gradientai => do_gradientai}/types/shared/backward_links.py (100%) rename src/{gradientai => do_gradientai}/types/shared/chat_completion_chunk.py (100%) rename src/{gradientai => do_gradientai}/types/shared/chat_completion_token_logprob.py (100%) rename src/{gradientai => do_gradientai}/types/shared/completion_usage.py (100%) rename src/{gradientai => do_gradientai}/types/shared/disk_info.py (100%) rename src/{gradientai => do_gradientai}/types/shared/droplet.py (100%) rename src/{gradientai => do_gradientai}/types/shared/droplet_next_backup_window.py (100%) rename src/{gradientai => do_gradientai}/types/shared/firewall_rule_target.py (100%) rename src/{gradientai => do_gradientai}/types/shared/forward_links.py (100%) rename src/{gradientai => do_gradientai}/types/shared/garbage_collection.py (100%) rename src/{gradientai => do_gradientai}/types/shared/gpu_info.py (100%) rename src/{gradientai => do_gradientai}/types/shared/image.py (100%) rename src/{gradientai => do_gradientai}/types/shared/kernel.py (100%) rename src/{gradientai => do_gradientai}/types/shared/meta_properties.py (100%) rename src/{gradientai => do_gradientai}/types/shared/network_v4.py (100%) rename src/{gradientai => do_gradientai}/types/shared/network_v6.py (100%) rename src/{gradientai => do_gradientai}/types/shared/page_links.py (100%) rename src/{gradientai => do_gradientai}/types/shared/region.py (100%) rename src/{gradientai => do_gradientai}/types/shared/size.py (100%) rename src/{gradientai => do_gradientai}/types/shared/snapshots.py (100%) rename src/{gradientai => do_gradientai}/types/shared/subscription.py (100%) rename src/{gradientai => do_gradientai}/types/shared/subscription_tier_base.py (100%) rename src/{gradientai => do_gradientai}/types/shared/vpc_peering.py (100%) rename src/{gradientai => do_gradientai}/types/shared_params/__init__.py (100%) rename src/{gradientai => do_gradientai}/types/shared_params/firewall_rule_target.py (100%) diff --git a/.stats.yml b/.stats.yml index 5f9d16dd..c001b5ff 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 168 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradientai-f8e8c290636c1e218efcf7bfe92ba7570c11690754d21287d838919fbc943a80.yml openapi_spec_hash: 1eddf488ecbe415efb45445697716f5d -config_hash: 683ea6ba4d63037c1c72484e5936e73c +config_hash: 0a72b6161859b504ed3b5a2a142ba5a5 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 086907ef..4f59c83a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -36,7 +36,7 @@ $ pip install -r requirements-dev.lock Most of the SDK is generated code. Modifications to code will be persisted between generations, but may result in merge conflicts between manual patches and changes from the generator. The generator will never -modify the contents of the `src/gradientai/lib/` and `examples/` directories. +modify the contents of the `src/do_gradientai/lib/` and `examples/` directories. ## Adding and running examples diff --git a/README.md b/README.md index 67ca8b6c..bebfbb0e 100644 --- a/README.md +++ b/README.md @@ -26,7 +26,7 @@ The full API of this library can be found in [api.md](api.md). ```python import os -from gradientai import GradientAI +from do_gradientai import GradientAI api_client = GradientAI( api_key=os.environ.get("GRADIENTAI_API_KEY"), # This is the default and can be omitted @@ -38,6 +38,7 @@ inference_client = GradientAI( ) agent_client = GradientAI( agent_key=os.environ.get("GRADIENTAI_AGENT_KEY"), # This is the default and can be omitted + agent_endpoint="https://my-cool-agent.agents.do-ai.run", ) print(api_client.agents.list()) @@ -67,7 +68,7 @@ Simply import `AsyncGradientAI` instead of `GradientAI` and use `await` with eac ```python import os import asyncio -from gradientai import AsyncGradientAI +from do_gradientai import AsyncGradientAI client = AsyncGradientAI( api_key=os.environ.get("GRADIENTAI_API_KEY"), # This is the default and can be omitted @@ -107,8 +108,8 @@ Then you can enable it by instantiating the client with `http_client=DefaultAioH ```python import asyncio -from gradientai import DefaultAioHttpClient -from gradientai import AsyncGradientAI +from do_gradientai import DefaultAioHttpClient +from do_gradientai import AsyncGradientAI async def main() -> None: @@ -136,7 +137,7 @@ asyncio.run(main()) We provide support for streaming responses using Server Side Events (SSE). ```python -from gradientai import GradientAI +from do_gradientai import GradientAI client = GradientAI() @@ -157,7 +158,7 @@ for completion in stream: The async client uses the exact same interface. ```python -from gradientai import AsyncGradientAI +from do_gradientai import AsyncGradientAI client = AsyncGradientAI() @@ -189,7 +190,7 @@ Typed requests and responses provide autocomplete and documentation within your Nested parameters are dictionaries, typed using `TypedDict`, for example: ```python -from gradientai import GradientAI +from do_gradientai import GradientAI client = GradientAI() @@ -208,16 +209,16 @@ print(completion.stream_options) ## Handling errors -When the library is unable to connect to the API (for example, due to network connection problems or a timeout), a subclass of `gradientai.APIConnectionError` is raised. +When the library is unable to connect to the API (for example, due to network connection problems or a timeout), a subclass of `do_gradientai.APIConnectionError` is raised. When the API returns a non-success status code (that is, 4xx or 5xx -response), a subclass of `gradientai.APIStatusError` is raised, containing `status_code` and `response` properties. +response), a subclass of `do_gradientai.APIStatusError` is raised, containing `status_code` and `response` properties. -All errors inherit from `gradientai.APIError`. +All errors inherit from `do_gradientai.APIError`. ```python -import gradientai -from gradientai import GradientAI +import do_gradientai +from do_gradientai import GradientAI client = GradientAI() @@ -231,12 +232,12 @@ try: ], model="llama3.3-70b-instruct", ) -except gradientai.APIConnectionError as e: +except do_gradientai.APIConnectionError as e: print("The server could not be reached") print(e.__cause__) # an underlying Exception, likely raised within httpx. -except gradientai.RateLimitError as e: +except do_gradientai.RateLimitError as e: print("A 429 status code was received; we should back off a bit.") -except gradientai.APIStatusError as e: +except do_gradientai.APIStatusError as e: print("Another non-200-range status code was received") print(e.status_code) print(e.response) @@ -264,7 +265,7 @@ Connection errors (for example, due to a network connectivity problem), 408 Requ You can use the `max_retries` option to configure or disable retry settings: ```python -from gradientai import GradientAI +from do_gradientai import GradientAI # Configure the default for all requests: client = GradientAI( @@ -290,7 +291,7 @@ By default requests time out after 1 minute. You can configure this with a `time which accepts a float or an [`httpx.Timeout`](https://www.python-httpx.org/advanced/timeouts/#fine-tuning-the-configuration) object: ```python -from gradientai import GradientAI +from do_gradientai import GradientAI # Configure the default for all requests: client = GradientAI( @@ -350,7 +351,7 @@ if response.my_field is None: The "raw" Response object can be accessed by prefixing `.with_raw_response.` to any HTTP method call, e.g., ```py -from gradientai import GradientAI +from do_gradientai import GradientAI client = GradientAI() response = client.chat.completions.with_raw_response.create( @@ -366,9 +367,9 @@ completion = response.parse() # get the object that `chat.completions.create()` print(completion.choices) ``` -These methods return an [`APIResponse`](https://github.com/digitalocean/gradientai-python/tree/main/src/gradientai/_response.py) object. +These methods return an [`APIResponse`](https://github.com/digitalocean/gradientai-python/tree/main/src/do_gradientai/_response.py) object. -The async client returns an [`AsyncAPIResponse`](https://github.com/digitalocean/gradientai-python/tree/main/src/gradientai/_response.py) with the same structure, the only difference being `await`able methods for reading the response content. +The async client returns an [`AsyncAPIResponse`](https://github.com/digitalocean/gradientai-python/tree/main/src/do_gradientai/_response.py) with the same structure, the only difference being `await`able methods for reading the response content. #### `.with_streaming_response` @@ -438,7 +439,7 @@ You can directly override the [httpx client](https://www.python-httpx.org/api/#c ```python import httpx -from gradientai import GradientAI, DefaultHttpxClient +from do_gradientai import GradientAI, DefaultHttpxClient client = GradientAI( # Or use the `GRADIENT_AI_BASE_URL` env var @@ -461,7 +462,7 @@ client.with_options(http_client=DefaultHttpxClient(...)) By default the library closes underlying HTTP connections whenever the client is [garbage collected](https://docs.python.org/3/reference/datamodel.html#object.__del__). You can manually close the client using the `.close()` method if desired, or with a context manager that closes when exiting. ```py -from gradientai import GradientAI +from do_gradientai import GradientAI with GradientAI() as client: # make requests here @@ -489,8 +490,8 @@ If you've upgraded to the latest version but aren't seeing any new features you You can determine the version that is being used at runtime with: ```py -import gradientai -print(gradientai.__version__) +import do_gradientai +print(do_gradientai.__version__) ``` ## Requirements diff --git a/api.md b/api.md index 8682940b..fa4e0edb 100644 --- a/api.md +++ b/api.md @@ -1,7 +1,7 @@ # Shared Types ```python -from gradientai.types import ( +from do_gradientai.types import ( Action, ActionLink, APILinks, @@ -37,7 +37,7 @@ from gradientai.types import ( Types: ```python -from gradientai.types import ( +from do_gradientai.types import ( APIAgent, APIAgentAPIKeyInfo, APIAgentModel, @@ -57,19 +57,19 @@ from gradientai.types import ( Methods: -- client.agents.create(\*\*params) -> AgentCreateResponse -- client.agents.retrieve(uuid) -> AgentRetrieveResponse -- client.agents.update(path_uuid, \*\*params) -> AgentUpdateResponse -- client.agents.list(\*\*params) -> AgentListResponse -- client.agents.delete(uuid) -> AgentDeleteResponse -- client.agents.update_status(path_uuid, \*\*params) -> AgentUpdateStatusResponse +- client.agents.create(\*\*params) -> AgentCreateResponse +- client.agents.retrieve(uuid) -> AgentRetrieveResponse +- client.agents.update(path_uuid, \*\*params) -> AgentUpdateResponse +- client.agents.list(\*\*params) -> AgentListResponse +- client.agents.delete(uuid) -> AgentDeleteResponse +- client.agents.update_status(path_uuid, \*\*params) -> AgentUpdateStatusResponse ## APIKeys Types: ```python -from gradientai.types.agents import ( +from do_gradientai.types.agents import ( APIKeyCreateResponse, APIKeyUpdateResponse, APIKeyListResponse, @@ -80,11 +80,11 @@ from gradientai.types.agents import ( Methods: -- client.agents.api_keys.create(path_agent_uuid, \*\*params) -> APIKeyCreateResponse -- client.agents.api_keys.update(path_api_key_uuid, \*, path_agent_uuid, \*\*params) -> APIKeyUpdateResponse -- client.agents.api_keys.list(agent_uuid, \*\*params) -> APIKeyListResponse -- client.agents.api_keys.delete(api_key_uuid, \*, agent_uuid) -> APIKeyDeleteResponse -- client.agents.api_keys.regenerate(api_key_uuid, \*, agent_uuid) -> APIKeyRegenerateResponse +- client.agents.api_keys.create(path_agent_uuid, \*\*params) -> APIKeyCreateResponse +- client.agents.api_keys.update(path_api_key_uuid, \*, path_agent_uuid, \*\*params) -> APIKeyUpdateResponse +- client.agents.api_keys.list(agent_uuid, \*\*params) -> APIKeyListResponse +- client.agents.api_keys.delete(api_key_uuid, \*, agent_uuid) -> APIKeyDeleteResponse +- client.agents.api_keys.regenerate(api_key_uuid, \*, agent_uuid) -> APIKeyRegenerateResponse ## Chat @@ -93,19 +93,19 @@ Methods: Types: ```python -from gradientai.types.agents.chat import CompletionCreateResponse +from do_gradientai.types.agents.chat import CompletionCreateResponse ``` Methods: -- client.agents.chat.completions.create(\*\*params) -> CompletionCreateResponse +- client.agents.chat.completions.create(\*\*params) -> CompletionCreateResponse ## EvaluationMetrics Types: ```python -from gradientai.types.agents import ( +from do_gradientai.types.agents import ( EvaluationMetricListResponse, EvaluationMetricListRegionsResponse, ) @@ -113,15 +113,15 @@ from gradientai.types.agents import ( Methods: -- client.agents.evaluation_metrics.list() -> EvaluationMetricListResponse -- client.agents.evaluation_metrics.list_regions(\*\*params) -> EvaluationMetricListRegionsResponse +- client.agents.evaluation_metrics.list() -> EvaluationMetricListResponse +- client.agents.evaluation_metrics.list_regions(\*\*params) -> EvaluationMetricListRegionsResponse ### Workspaces Types: ```python -from gradientai.types.agents.evaluation_metrics import ( +from do_gradientai.types.agents.evaluation_metrics import ( WorkspaceCreateResponse, WorkspaceRetrieveResponse, WorkspaceUpdateResponse, @@ -133,19 +133,19 @@ from gradientai.types.agents.evaluation_metrics import ( Methods: -- client.agents.evaluation_metrics.workspaces.create(\*\*params) -> WorkspaceCreateResponse -- client.agents.evaluation_metrics.workspaces.retrieve(workspace_uuid) -> WorkspaceRetrieveResponse -- client.agents.evaluation_metrics.workspaces.update(path_workspace_uuid, \*\*params) -> WorkspaceUpdateResponse -- client.agents.evaluation_metrics.workspaces.list() -> WorkspaceListResponse -- client.agents.evaluation_metrics.workspaces.delete(workspace_uuid) -> WorkspaceDeleteResponse -- client.agents.evaluation_metrics.workspaces.list_evaluation_test_cases(workspace_uuid) -> WorkspaceListEvaluationTestCasesResponse +- client.agents.evaluation_metrics.workspaces.create(\*\*params) -> WorkspaceCreateResponse +- client.agents.evaluation_metrics.workspaces.retrieve(workspace_uuid) -> WorkspaceRetrieveResponse +- client.agents.evaluation_metrics.workspaces.update(path_workspace_uuid, \*\*params) -> WorkspaceUpdateResponse +- client.agents.evaluation_metrics.workspaces.list() -> WorkspaceListResponse +- client.agents.evaluation_metrics.workspaces.delete(workspace_uuid) -> WorkspaceDeleteResponse +- client.agents.evaluation_metrics.workspaces.list_evaluation_test_cases(workspace_uuid) -> WorkspaceListEvaluationTestCasesResponse #### Agents Types: ```python -from gradientai.types.agents.evaluation_metrics.workspaces import ( +from do_gradientai.types.agents.evaluation_metrics.workspaces import ( AgentListResponse, AgentMoveResponse, ) @@ -153,27 +153,27 @@ from gradientai.types.agents.evaluation_metrics.workspaces import ( Methods: -- client.agents.evaluation_metrics.workspaces.agents.list(workspace_uuid, \*\*params) -> AgentListResponse -- client.agents.evaluation_metrics.workspaces.agents.move(path_workspace_uuid, \*\*params) -> AgentMoveResponse +- client.agents.evaluation_metrics.workspaces.agents.list(workspace_uuid, \*\*params) -> AgentListResponse +- client.agents.evaluation_metrics.workspaces.agents.move(path_workspace_uuid, \*\*params) -> AgentMoveResponse ### Models Types: ```python -from gradientai.types.agents.evaluation_metrics import ModelListResponse +from do_gradientai.types.agents.evaluation_metrics import ModelListResponse ``` Methods: -- client.agents.evaluation_metrics.models.list(\*\*params) -> ModelListResponse +- client.agents.evaluation_metrics.models.list(\*\*params) -> ModelListResponse ## EvaluationRuns Types: ```python -from gradientai.types.agents import ( +from do_gradientai.types.agents import ( APIEvaluationMetric, APIEvaluationMetricResult, APIEvaluationPrompt, @@ -187,17 +187,17 @@ from gradientai.types.agents import ( Methods: -- client.agents.evaluation_runs.create(\*\*params) -> EvaluationRunCreateResponse -- client.agents.evaluation_runs.retrieve(evaluation_run_uuid) -> EvaluationRunRetrieveResponse -- client.agents.evaluation_runs.list_results(evaluation_run_uuid, \*\*params) -> EvaluationRunListResultsResponse -- client.agents.evaluation_runs.retrieve_results(prompt_id, \*, evaluation_run_uuid) -> EvaluationRunRetrieveResultsResponse +- client.agents.evaluation_runs.create(\*\*params) -> EvaluationRunCreateResponse +- client.agents.evaluation_runs.retrieve(evaluation_run_uuid) -> EvaluationRunRetrieveResponse +- client.agents.evaluation_runs.list_results(evaluation_run_uuid, \*\*params) -> EvaluationRunListResultsResponse +- client.agents.evaluation_runs.retrieve_results(prompt_id, \*, evaluation_run_uuid) -> EvaluationRunRetrieveResultsResponse ## EvaluationTestCases Types: ```python -from gradientai.types.agents import ( +from do_gradientai.types.agents import ( APIEvaluationTestCase, APIStarMetric, EvaluationTestCaseCreateResponse, @@ -210,18 +210,18 @@ from gradientai.types.agents import ( Methods: -- client.agents.evaluation_test_cases.create(\*\*params) -> EvaluationTestCaseCreateResponse -- client.agents.evaluation_test_cases.retrieve(test_case_uuid, \*\*params) -> EvaluationTestCaseRetrieveResponse -- client.agents.evaluation_test_cases.update(path_test_case_uuid, \*\*params) -> EvaluationTestCaseUpdateResponse -- client.agents.evaluation_test_cases.list() -> EvaluationTestCaseListResponse -- client.agents.evaluation_test_cases.list_evaluation_runs(evaluation_test_case_uuid, \*\*params) -> EvaluationTestCaseListEvaluationRunsResponse +- client.agents.evaluation_test_cases.create(\*\*params) -> EvaluationTestCaseCreateResponse +- client.agents.evaluation_test_cases.retrieve(test_case_uuid, \*\*params) -> EvaluationTestCaseRetrieveResponse +- client.agents.evaluation_test_cases.update(path_test_case_uuid, \*\*params) -> EvaluationTestCaseUpdateResponse +- client.agents.evaluation_test_cases.list() -> EvaluationTestCaseListResponse +- client.agents.evaluation_test_cases.list_evaluation_runs(evaluation_test_case_uuid, \*\*params) -> EvaluationTestCaseListEvaluationRunsResponse ## EvaluationDatasets Types: ```python -from gradientai.types.agents import ( +from do_gradientai.types.agents import ( EvaluationDatasetCreateResponse, EvaluationDatasetCreateFileUploadPresignedURLsResponse, ) @@ -229,15 +229,15 @@ from gradientai.types.agents import ( Methods: -- client.agents.evaluation_datasets.create(\*\*params) -> EvaluationDatasetCreateResponse -- client.agents.evaluation_datasets.create_file_upload_presigned_urls(\*\*params) -> EvaluationDatasetCreateFileUploadPresignedURLsResponse +- client.agents.evaluation_datasets.create(\*\*params) -> EvaluationDatasetCreateResponse +- client.agents.evaluation_datasets.create_file_upload_presigned_urls(\*\*params) -> EvaluationDatasetCreateFileUploadPresignedURLsResponse ## Functions Types: ```python -from gradientai.types.agents import ( +from do_gradientai.types.agents import ( FunctionCreateResponse, FunctionUpdateResponse, FunctionDeleteResponse, @@ -246,43 +246,43 @@ from gradientai.types.agents import ( Methods: -- client.agents.functions.create(path_agent_uuid, \*\*params) -> FunctionCreateResponse -- client.agents.functions.update(path_function_uuid, \*, path_agent_uuid, \*\*params) -> FunctionUpdateResponse -- client.agents.functions.delete(function_uuid, \*, agent_uuid) -> FunctionDeleteResponse +- client.agents.functions.create(path_agent_uuid, \*\*params) -> FunctionCreateResponse +- client.agents.functions.update(path_function_uuid, \*, path_agent_uuid, \*\*params) -> FunctionUpdateResponse +- client.agents.functions.delete(function_uuid, \*, agent_uuid) -> FunctionDeleteResponse ## Versions Types: ```python -from gradientai.types.agents import VersionUpdateResponse, VersionListResponse +from do_gradientai.types.agents import VersionUpdateResponse, VersionListResponse ``` Methods: -- client.agents.versions.update(path_uuid, \*\*params) -> VersionUpdateResponse -- client.agents.versions.list(uuid, \*\*params) -> VersionListResponse +- client.agents.versions.update(path_uuid, \*\*params) -> VersionUpdateResponse +- client.agents.versions.list(uuid, \*\*params) -> VersionListResponse ## KnowledgeBases Types: ```python -from gradientai.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse +from do_gradientai.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse ``` Methods: -- client.agents.knowledge_bases.attach(agent_uuid) -> APILinkKnowledgeBaseOutput -- client.agents.knowledge_bases.attach_single(knowledge_base_uuid, \*, agent_uuid) -> APILinkKnowledgeBaseOutput -- client.agents.knowledge_bases.detach(knowledge_base_uuid, \*, agent_uuid) -> KnowledgeBaseDetachResponse +- client.agents.knowledge_bases.attach(agent_uuid) -> APILinkKnowledgeBaseOutput +- client.agents.knowledge_bases.attach_single(knowledge_base_uuid, \*, agent_uuid) -> APILinkKnowledgeBaseOutput +- client.agents.knowledge_bases.detach(knowledge_base_uuid, \*, agent_uuid) -> KnowledgeBaseDetachResponse ## Routes Types: ```python -from gradientai.types.agents import ( +from do_gradientai.types.agents import ( RouteUpdateResponse, RouteDeleteResponse, RouteAddResponse, @@ -292,10 +292,10 @@ from gradientai.types.agents import ( Methods: -- client.agents.routes.update(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> RouteUpdateResponse -- client.agents.routes.delete(child_agent_uuid, \*, parent_agent_uuid) -> RouteDeleteResponse -- client.agents.routes.add(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> RouteAddResponse -- client.agents.routes.view(uuid) -> RouteViewResponse +- client.agents.routes.update(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> RouteUpdateResponse +- client.agents.routes.delete(child_agent_uuid, \*, parent_agent_uuid) -> RouteDeleteResponse +- client.agents.routes.add(path_child_agent_uuid, \*, path_parent_agent_uuid, \*\*params) -> RouteAddResponse +- client.agents.routes.view(uuid) -> RouteViewResponse # Chat @@ -304,31 +304,31 @@ Methods: Types: ```python -from gradientai.types.chat import CompletionCreateResponse +from do_gradientai.types.chat import CompletionCreateResponse ``` Methods: -- client.chat.completions.create(\*\*params) -> CompletionCreateResponse +- client.chat.completions.create(\*\*params) -> CompletionCreateResponse # Regions Types: ```python -from gradientai.types import RegionListResponse +from do_gradientai.types import RegionListResponse ``` Methods: -- client.regions.list(\*\*params) -> RegionListResponse +- client.regions.list(\*\*params) -> RegionListResponse # KnowledgeBases Types: ```python -from gradientai.types import ( +from do_gradientai.types import ( APIKnowledgeBase, KnowledgeBaseCreateResponse, KnowledgeBaseRetrieveResponse, @@ -340,18 +340,18 @@ from gradientai.types import ( Methods: -- client.knowledge_bases.create(\*\*params) -> KnowledgeBaseCreateResponse -- client.knowledge_bases.retrieve(uuid) -> KnowledgeBaseRetrieveResponse -- client.knowledge_bases.update(path_uuid, \*\*params) -> KnowledgeBaseUpdateResponse -- client.knowledge_bases.list(\*\*params) -> KnowledgeBaseListResponse -- client.knowledge_bases.delete(uuid) -> KnowledgeBaseDeleteResponse +- client.knowledge_bases.create(\*\*params) -> KnowledgeBaseCreateResponse +- client.knowledge_bases.retrieve(uuid) -> KnowledgeBaseRetrieveResponse +- client.knowledge_bases.update(path_uuid, \*\*params) -> KnowledgeBaseUpdateResponse +- client.knowledge_bases.list(\*\*params) -> KnowledgeBaseListResponse +- client.knowledge_bases.delete(uuid) -> KnowledgeBaseDeleteResponse ## DataSources Types: ```python -from gradientai.types.knowledge_bases import ( +from do_gradientai.types.knowledge_bases import ( APIFileUploadDataSource, APIKnowledgeBaseDataSource, APISpacesDataSource, @@ -365,16 +365,16 @@ from gradientai.types.knowledge_bases import ( Methods: -- client.knowledge_bases.data_sources.create(path_knowledge_base_uuid, \*\*params) -> DataSourceCreateResponse -- client.knowledge_bases.data_sources.list(knowledge_base_uuid, \*\*params) -> DataSourceListResponse -- client.knowledge_bases.data_sources.delete(data_source_uuid, \*, knowledge_base_uuid) -> DataSourceDeleteResponse +- client.knowledge_bases.data_sources.create(path_knowledge_base_uuid, \*\*params) -> DataSourceCreateResponse +- client.knowledge_bases.data_sources.list(knowledge_base_uuid, \*\*params) -> DataSourceListResponse +- client.knowledge_bases.data_sources.delete(data_source_uuid, \*, knowledge_base_uuid) -> DataSourceDeleteResponse ## IndexingJobs Types: ```python -from gradientai.types.knowledge_bases import ( +from do_gradientai.types.knowledge_bases import ( APIIndexedDataSource, APIIndexingJob, IndexingJobCreateResponse, @@ -387,11 +387,11 @@ from gradientai.types.knowledge_bases import ( Methods: -- client.knowledge_bases.indexing_jobs.create(\*\*params) -> IndexingJobCreateResponse -- client.knowledge_bases.indexing_jobs.retrieve(uuid) -> IndexingJobRetrieveResponse -- client.knowledge_bases.indexing_jobs.list(\*\*params) -> IndexingJobListResponse -- client.knowledge_bases.indexing_jobs.retrieve_data_sources(indexing_job_uuid) -> IndexingJobRetrieveDataSourcesResponse -- client.knowledge_bases.indexing_jobs.update_cancel(path_uuid, \*\*params) -> IndexingJobUpdateCancelResponse +- client.knowledge_bases.indexing_jobs.create(\*\*params) -> IndexingJobCreateResponse +- client.knowledge_bases.indexing_jobs.retrieve(uuid) -> IndexingJobRetrieveResponse +- client.knowledge_bases.indexing_jobs.list(\*\*params) -> IndexingJobListResponse +- client.knowledge_bases.indexing_jobs.retrieve_data_sources(indexing_job_uuid) -> IndexingJobRetrieveDataSourcesResponse +- client.knowledge_bases.indexing_jobs.update_cancel(path_uuid, \*\*params) -> IndexingJobUpdateCancelResponse # Inference @@ -400,7 +400,7 @@ Methods: Types: ```python -from gradientai.types.inference import ( +from do_gradientai.types.inference import ( APIModelAPIKeyInfo, APIKeyCreateResponse, APIKeyUpdateResponse, @@ -412,18 +412,18 @@ from gradientai.types.inference import ( Methods: -- client.inference.api_keys.create(\*\*params) -> APIKeyCreateResponse -- client.inference.api_keys.update(path_api_key_uuid, \*\*params) -> APIKeyUpdateResponse -- client.inference.api_keys.list(\*\*params) -> APIKeyListResponse -- client.inference.api_keys.delete(api_key_uuid) -> APIKeyDeleteResponse -- client.inference.api_keys.update_regenerate(api_key_uuid) -> APIKeyUpdateRegenerateResponse +- client.inference.api_keys.create(\*\*params) -> APIKeyCreateResponse +- client.inference.api_keys.update(path_api_key_uuid, \*\*params) -> APIKeyUpdateResponse +- client.inference.api_keys.list(\*\*params) -> APIKeyListResponse +- client.inference.api_keys.delete(api_key_uuid) -> APIKeyDeleteResponse +- client.inference.api_keys.update_regenerate(api_key_uuid) -> APIKeyUpdateRegenerateResponse # Models Types: ```python -from gradientai.types import ( +from do_gradientai.types import ( APIAgreement, APIModel, APIModelVersion, @@ -434,8 +434,8 @@ from gradientai.types import ( Methods: -- client.models.retrieve(model) -> ModelRetrieveResponse -- client.models.list() -> ModelListResponse +- client.models.retrieve(model) -> ModelRetrieveResponse +- client.models.list() -> ModelListResponse ## Providers @@ -444,7 +444,7 @@ Methods: Types: ```python -from gradientai.types.models.providers import ( +from do_gradientai.types.models.providers import ( AnthropicCreateResponse, AnthropicRetrieveResponse, AnthropicUpdateResponse, @@ -456,19 +456,19 @@ from gradientai.types.models.providers import ( Methods: -- client.models.providers.anthropic.create(\*\*params) -> AnthropicCreateResponse -- client.models.providers.anthropic.retrieve(api_key_uuid) -> AnthropicRetrieveResponse -- client.models.providers.anthropic.update(path_api_key_uuid, \*\*params) -> AnthropicUpdateResponse -- client.models.providers.anthropic.list(\*\*params) -> AnthropicListResponse -- client.models.providers.anthropic.delete(api_key_uuid) -> AnthropicDeleteResponse -- client.models.providers.anthropic.list_agents(uuid, \*\*params) -> AnthropicListAgentsResponse +- client.models.providers.anthropic.create(\*\*params) -> AnthropicCreateResponse +- client.models.providers.anthropic.retrieve(api_key_uuid) -> AnthropicRetrieveResponse +- client.models.providers.anthropic.update(path_api_key_uuid, \*\*params) -> AnthropicUpdateResponse +- client.models.providers.anthropic.list(\*\*params) -> AnthropicListResponse +- client.models.providers.anthropic.delete(api_key_uuid) -> AnthropicDeleteResponse +- client.models.providers.anthropic.list_agents(uuid, \*\*params) -> AnthropicListAgentsResponse ### OpenAI Types: ```python -from gradientai.types.models.providers import ( +from do_gradientai.types.models.providers import ( OpenAICreateResponse, OpenAIRetrieveResponse, OpenAIUpdateResponse, @@ -480,19 +480,19 @@ from gradientai.types.models.providers import ( Methods: -- client.models.providers.openai.create(\*\*params) -> OpenAICreateResponse -- client.models.providers.openai.retrieve(api_key_uuid) -> OpenAIRetrieveResponse -- client.models.providers.openai.update(path_api_key_uuid, \*\*params) -> OpenAIUpdateResponse -- client.models.providers.openai.list(\*\*params) -> OpenAIListResponse -- client.models.providers.openai.delete(api_key_uuid) -> OpenAIDeleteResponse -- client.models.providers.openai.retrieve_agents(uuid, \*\*params) -> OpenAIRetrieveAgentsResponse +- client.models.providers.openai.create(\*\*params) -> OpenAICreateResponse +- client.models.providers.openai.retrieve(api_key_uuid) -> OpenAIRetrieveResponse +- client.models.providers.openai.update(path_api_key_uuid, \*\*params) -> OpenAIUpdateResponse +- client.models.providers.openai.list(\*\*params) -> OpenAIListResponse +- client.models.providers.openai.delete(api_key_uuid) -> OpenAIDeleteResponse +- client.models.providers.openai.retrieve_agents(uuid, \*\*params) -> OpenAIRetrieveAgentsResponse # GPUDroplets Types: ```python -from gradientai.types import ( +from do_gradientai.types import ( DropletBackupPolicy, GPUDropletCreateResponse, GPUDropletRetrieveResponse, @@ -506,22 +506,22 @@ from gradientai.types import ( Methods: -- client.gpu_droplets.create(\*\*params) -> GPUDropletCreateResponse -- client.gpu_droplets.retrieve(droplet_id) -> GPUDropletRetrieveResponse -- client.gpu_droplets.list(\*\*params) -> GPUDropletListResponse -- client.gpu_droplets.delete(droplet_id) -> None -- client.gpu_droplets.delete_by_tag(\*\*params) -> None -- client.gpu_droplets.list_firewalls(droplet_id, \*\*params) -> GPUDropletListFirewallsResponse -- client.gpu_droplets.list_kernels(droplet_id, \*\*params) -> GPUDropletListKernelsResponse -- client.gpu_droplets.list_neighbors(droplet_id) -> GPUDropletListNeighborsResponse -- client.gpu_droplets.list_snapshots(droplet_id, \*\*params) -> GPUDropletListSnapshotsResponse +- client.gpu_droplets.create(\*\*params) -> GPUDropletCreateResponse +- client.gpu_droplets.retrieve(droplet_id) -> GPUDropletRetrieveResponse +- client.gpu_droplets.list(\*\*params) -> GPUDropletListResponse +- client.gpu_droplets.delete(droplet_id) -> None +- client.gpu_droplets.delete_by_tag(\*\*params) -> None +- client.gpu_droplets.list_firewalls(droplet_id, \*\*params) -> GPUDropletListFirewallsResponse +- client.gpu_droplets.list_kernels(droplet_id, \*\*params) -> GPUDropletListKernelsResponse +- client.gpu_droplets.list_neighbors(droplet_id) -> GPUDropletListNeighborsResponse +- client.gpu_droplets.list_snapshots(droplet_id, \*\*params) -> GPUDropletListSnapshotsResponse ## Backups Types: ```python -from gradientai.types.gpu_droplets import ( +from do_gradientai.types.gpu_droplets import ( BackupListResponse, BackupListPoliciesResponse, BackupListSupportedPoliciesResponse, @@ -531,17 +531,17 @@ from gradientai.types.gpu_droplets import ( Methods: -- client.gpu_droplets.backups.list(droplet_id, \*\*params) -> BackupListResponse -- client.gpu_droplets.backups.list_policies(\*\*params) -> BackupListPoliciesResponse -- client.gpu_droplets.backups.list_supported_policies() -> BackupListSupportedPoliciesResponse -- client.gpu_droplets.backups.retrieve_policy(droplet_id) -> BackupRetrievePolicyResponse +- client.gpu_droplets.backups.list(droplet_id, \*\*params) -> BackupListResponse +- client.gpu_droplets.backups.list_policies(\*\*params) -> BackupListPoliciesResponse +- client.gpu_droplets.backups.list_supported_policies() -> BackupListSupportedPoliciesResponse +- client.gpu_droplets.backups.retrieve_policy(droplet_id) -> BackupRetrievePolicyResponse ## Actions Types: ```python -from gradientai.types.gpu_droplets import ( +from do_gradientai.types.gpu_droplets import ( ActionRetrieveResponse, ActionListResponse, ActionBulkInitiateResponse, @@ -551,17 +551,17 @@ from gradientai.types.gpu_droplets import ( Methods: -- client.gpu_droplets.actions.retrieve(action_id, \*, droplet_id) -> ActionRetrieveResponse -- client.gpu_droplets.actions.list(droplet_id, \*\*params) -> ActionListResponse -- client.gpu_droplets.actions.bulk_initiate(\*\*params) -> ActionBulkInitiateResponse -- client.gpu_droplets.actions.initiate(droplet_id, \*\*params) -> ActionInitiateResponse +- client.gpu_droplets.actions.retrieve(action_id, \*, droplet_id) -> ActionRetrieveResponse +- client.gpu_droplets.actions.list(droplet_id, \*\*params) -> ActionListResponse +- client.gpu_droplets.actions.bulk_initiate(\*\*params) -> ActionBulkInitiateResponse +- client.gpu_droplets.actions.initiate(droplet_id, \*\*params) -> ActionInitiateResponse ## DestroyWithAssociatedResources Types: ```python -from gradientai.types.gpu_droplets import ( +from do_gradientai.types.gpu_droplets import ( AssociatedResource, DestroyedAssociatedResource, DestroyWithAssociatedResourceListResponse, @@ -571,18 +571,18 @@ from gradientai.types.gpu_droplets import ( Methods: -- client.gpu_droplets.destroy_with_associated_resources.list(droplet_id) -> DestroyWithAssociatedResourceListResponse -- client.gpu_droplets.destroy_with_associated_resources.check_status(droplet_id) -> DestroyWithAssociatedResourceCheckStatusResponse -- client.gpu_droplets.destroy_with_associated_resources.delete_dangerous(droplet_id) -> None -- client.gpu_droplets.destroy_with_associated_resources.delete_selective(droplet_id, \*\*params) -> None -- client.gpu_droplets.destroy_with_associated_resources.retry(droplet_id) -> None +- client.gpu_droplets.destroy_with_associated_resources.list(droplet_id) -> DestroyWithAssociatedResourceListResponse +- client.gpu_droplets.destroy_with_associated_resources.check_status(droplet_id) -> DestroyWithAssociatedResourceCheckStatusResponse +- client.gpu_droplets.destroy_with_associated_resources.delete_dangerous(droplet_id) -> None +- client.gpu_droplets.destroy_with_associated_resources.delete_selective(droplet_id, \*\*params) -> None +- client.gpu_droplets.destroy_with_associated_resources.retry(droplet_id) -> None ## Autoscale Types: ```python -from gradientai.types.gpu_droplets import ( +from do_gradientai.types.gpu_droplets import ( AutoscalePool, AutoscalePoolDropletTemplate, AutoscalePoolDynamicConfig, @@ -599,21 +599,21 @@ from gradientai.types.gpu_droplets import ( Methods: -- client.gpu_droplets.autoscale.create(\*\*params) -> AutoscaleCreateResponse -- client.gpu_droplets.autoscale.retrieve(autoscale_pool_id) -> AutoscaleRetrieveResponse -- client.gpu_droplets.autoscale.update(autoscale_pool_id, \*\*params) -> AutoscaleUpdateResponse -- client.gpu_droplets.autoscale.list(\*\*params) -> AutoscaleListResponse -- client.gpu_droplets.autoscale.delete(autoscale_pool_id) -> None -- client.gpu_droplets.autoscale.delete_dangerous(autoscale_pool_id) -> None -- client.gpu_droplets.autoscale.list_history(autoscale_pool_id, \*\*params) -> AutoscaleListHistoryResponse -- client.gpu_droplets.autoscale.list_members(autoscale_pool_id, \*\*params) -> AutoscaleListMembersResponse +- client.gpu_droplets.autoscale.create(\*\*params) -> AutoscaleCreateResponse +- client.gpu_droplets.autoscale.retrieve(autoscale_pool_id) -> AutoscaleRetrieveResponse +- client.gpu_droplets.autoscale.update(autoscale_pool_id, \*\*params) -> AutoscaleUpdateResponse +- client.gpu_droplets.autoscale.list(\*\*params) -> AutoscaleListResponse +- client.gpu_droplets.autoscale.delete(autoscale_pool_id) -> None +- client.gpu_droplets.autoscale.delete_dangerous(autoscale_pool_id) -> None +- client.gpu_droplets.autoscale.list_history(autoscale_pool_id, \*\*params) -> AutoscaleListHistoryResponse +- client.gpu_droplets.autoscale.list_members(autoscale_pool_id, \*\*params) -> AutoscaleListMembersResponse ## Firewalls Types: ```python -from gradientai.types.gpu_droplets import ( +from do_gradientai.types.gpu_droplets import ( Firewall, FirewallCreateResponse, FirewallRetrieveResponse, @@ -624,39 +624,39 @@ from gradientai.types.gpu_droplets import ( Methods: -- client.gpu_droplets.firewalls.create(\*\*params) -> FirewallCreateResponse -- client.gpu_droplets.firewalls.retrieve(firewall_id) -> FirewallRetrieveResponse -- client.gpu_droplets.firewalls.update(firewall_id, \*\*params) -> FirewallUpdateResponse -- client.gpu_droplets.firewalls.list(\*\*params) -> FirewallListResponse -- client.gpu_droplets.firewalls.delete(firewall_id) -> None +- client.gpu_droplets.firewalls.create(\*\*params) -> FirewallCreateResponse +- client.gpu_droplets.firewalls.retrieve(firewall_id) -> FirewallRetrieveResponse +- client.gpu_droplets.firewalls.update(firewall_id, \*\*params) -> FirewallUpdateResponse +- client.gpu_droplets.firewalls.list(\*\*params) -> FirewallListResponse +- client.gpu_droplets.firewalls.delete(firewall_id) -> None ### Droplets Methods: -- client.gpu_droplets.firewalls.droplets.add(firewall_id, \*\*params) -> None -- client.gpu_droplets.firewalls.droplets.remove(firewall_id, \*\*params) -> None +- client.gpu_droplets.firewalls.droplets.add(firewall_id, \*\*params) -> None +- client.gpu_droplets.firewalls.droplets.remove(firewall_id, \*\*params) -> None ### Tags Methods: -- client.gpu_droplets.firewalls.tags.add(firewall_id, \*\*params) -> None -- client.gpu_droplets.firewalls.tags.remove(firewall_id, \*\*params) -> None +- client.gpu_droplets.firewalls.tags.add(firewall_id, \*\*params) -> None +- client.gpu_droplets.firewalls.tags.remove(firewall_id, \*\*params) -> None ### Rules Methods: -- client.gpu_droplets.firewalls.rules.add(firewall_id, \*\*params) -> None -- client.gpu_droplets.firewalls.rules.remove(firewall_id, \*\*params) -> None +- client.gpu_droplets.firewalls.rules.add(firewall_id, \*\*params) -> None +- client.gpu_droplets.firewalls.rules.remove(firewall_id, \*\*params) -> None ## FloatingIPs Types: ```python -from gradientai.types.gpu_droplets import ( +from do_gradientai.types.gpu_droplets import ( FloatingIP, FloatingIPCreateResponse, FloatingIPRetrieveResponse, @@ -666,17 +666,17 @@ from gradientai.types.gpu_droplets import ( Methods: -- client.gpu_droplets.floating_ips.create(\*\*params) -> FloatingIPCreateResponse -- client.gpu_droplets.floating_ips.retrieve(floating_ip) -> FloatingIPRetrieveResponse -- client.gpu_droplets.floating_ips.list(\*\*params) -> FloatingIPListResponse -- client.gpu_droplets.floating_ips.delete(floating_ip) -> None +- client.gpu_droplets.floating_ips.create(\*\*params) -> FloatingIPCreateResponse +- client.gpu_droplets.floating_ips.retrieve(floating_ip) -> FloatingIPRetrieveResponse +- client.gpu_droplets.floating_ips.list(\*\*params) -> FloatingIPListResponse +- client.gpu_droplets.floating_ips.delete(floating_ip) -> None ### Actions Types: ```python -from gradientai.types.gpu_droplets.floating_ips import ( +from do_gradientai.types.gpu_droplets.floating_ips import ( ActionCreateResponse, ActionRetrieveResponse, ActionListResponse, @@ -685,16 +685,16 @@ from gradientai.types.gpu_droplets.floating_ips import ( Methods: -- client.gpu_droplets.floating_ips.actions.create(floating_ip, \*\*params) -> ActionCreateResponse -- client.gpu_droplets.floating_ips.actions.retrieve(action_id, \*, floating_ip) -> ActionRetrieveResponse -- client.gpu_droplets.floating_ips.actions.list(floating_ip) -> ActionListResponse +- client.gpu_droplets.floating_ips.actions.create(floating_ip, \*\*params) -> ActionCreateResponse +- client.gpu_droplets.floating_ips.actions.retrieve(action_id, \*, floating_ip) -> ActionRetrieveResponse +- client.gpu_droplets.floating_ips.actions.list(floating_ip) -> ActionListResponse ## Images Types: ```python -from gradientai.types.gpu_droplets import ( +from do_gradientai.types.gpu_droplets import ( ImageCreateResponse, ImageRetrieveResponse, ImageUpdateResponse, @@ -704,32 +704,32 @@ from gradientai.types.gpu_droplets import ( Methods: -- client.gpu_droplets.images.create(\*\*params) -> ImageCreateResponse -- client.gpu_droplets.images.retrieve(image_id) -> ImageRetrieveResponse -- client.gpu_droplets.images.update(image_id, \*\*params) -> ImageUpdateResponse -- client.gpu_droplets.images.list(\*\*params) -> ImageListResponse -- client.gpu_droplets.images.delete(image_id) -> None +- client.gpu_droplets.images.create(\*\*params) -> ImageCreateResponse +- client.gpu_droplets.images.retrieve(image_id) -> ImageRetrieveResponse +- client.gpu_droplets.images.update(image_id, \*\*params) -> ImageUpdateResponse +- client.gpu_droplets.images.list(\*\*params) -> ImageListResponse +- client.gpu_droplets.images.delete(image_id) -> None ### Actions Types: ```python -from gradientai.types.gpu_droplets.images import ActionListResponse +from do_gradientai.types.gpu_droplets.images import ActionListResponse ``` Methods: -- client.gpu_droplets.images.actions.create(image_id, \*\*params) -> Action -- client.gpu_droplets.images.actions.retrieve(action_id, \*, image_id) -> Action -- client.gpu_droplets.images.actions.list(image_id) -> ActionListResponse +- client.gpu_droplets.images.actions.create(image_id, \*\*params) -> Action +- client.gpu_droplets.images.actions.retrieve(action_id, \*, image_id) -> Action +- client.gpu_droplets.images.actions.list(image_id) -> ActionListResponse ## LoadBalancers Types: ```python -from gradientai.types.gpu_droplets import ( +from do_gradientai.types.gpu_droplets import ( Domains, ForwardingRule, GlbSettings, @@ -746,59 +746,59 @@ from gradientai.types.gpu_droplets import ( Methods: -- client.gpu_droplets.load_balancers.create(\*\*params) -> LoadBalancerCreateResponse -- client.gpu_droplets.load_balancers.retrieve(lb_id) -> LoadBalancerRetrieveResponse -- client.gpu_droplets.load_balancers.update(lb_id, \*\*params) -> LoadBalancerUpdateResponse -- client.gpu_droplets.load_balancers.list(\*\*params) -> LoadBalancerListResponse -- client.gpu_droplets.load_balancers.delete(lb_id) -> None -- client.gpu_droplets.load_balancers.delete_cache(lb_id) -> None +- client.gpu_droplets.load_balancers.create(\*\*params) -> LoadBalancerCreateResponse +- client.gpu_droplets.load_balancers.retrieve(lb_id) -> LoadBalancerRetrieveResponse +- client.gpu_droplets.load_balancers.update(lb_id, \*\*params) -> LoadBalancerUpdateResponse +- client.gpu_droplets.load_balancers.list(\*\*params) -> LoadBalancerListResponse +- client.gpu_droplets.load_balancers.delete(lb_id) -> None +- client.gpu_droplets.load_balancers.delete_cache(lb_id) -> None ### Droplets Methods: -- client.gpu_droplets.load_balancers.droplets.add(lb_id, \*\*params) -> None -- client.gpu_droplets.load_balancers.droplets.remove(lb_id, \*\*params) -> None +- client.gpu_droplets.load_balancers.droplets.add(lb_id, \*\*params) -> None +- client.gpu_droplets.load_balancers.droplets.remove(lb_id, \*\*params) -> None ### ForwardingRules Methods: -- client.gpu_droplets.load_balancers.forwarding_rules.add(lb_id, \*\*params) -> None -- client.gpu_droplets.load_balancers.forwarding_rules.remove(lb_id, \*\*params) -> None +- client.gpu_droplets.load_balancers.forwarding_rules.add(lb_id, \*\*params) -> None +- client.gpu_droplets.load_balancers.forwarding_rules.remove(lb_id, \*\*params) -> None ## Sizes Types: ```python -from gradientai.types.gpu_droplets import SizeListResponse +from do_gradientai.types.gpu_droplets import SizeListResponse ``` Methods: -- client.gpu_droplets.sizes.list(\*\*params) -> SizeListResponse +- client.gpu_droplets.sizes.list(\*\*params) -> SizeListResponse ## Snapshots Types: ```python -from gradientai.types.gpu_droplets import SnapshotRetrieveResponse, SnapshotListResponse +from do_gradientai.types.gpu_droplets import SnapshotRetrieveResponse, SnapshotListResponse ``` Methods: -- client.gpu_droplets.snapshots.retrieve(snapshot_id) -> SnapshotRetrieveResponse -- client.gpu_droplets.snapshots.list(\*\*params) -> SnapshotListResponse -- client.gpu_droplets.snapshots.delete(snapshot_id) -> None +- client.gpu_droplets.snapshots.retrieve(snapshot_id) -> SnapshotRetrieveResponse +- client.gpu_droplets.snapshots.list(\*\*params) -> SnapshotListResponse +- client.gpu_droplets.snapshots.delete(snapshot_id) -> None ## Volumes Types: ```python -from gradientai.types.gpu_droplets import ( +from do_gradientai.types.gpu_droplets import ( VolumeCreateResponse, VolumeRetrieveResponse, VolumeListResponse, @@ -807,18 +807,18 @@ from gradientai.types.gpu_droplets import ( Methods: -- client.gpu_droplets.volumes.create(\*\*params) -> VolumeCreateResponse -- client.gpu_droplets.volumes.retrieve(volume_id) -> VolumeRetrieveResponse -- client.gpu_droplets.volumes.list(\*\*params) -> VolumeListResponse -- client.gpu_droplets.volumes.delete(volume_id) -> None -- client.gpu_droplets.volumes.delete_by_name(\*\*params) -> None +- client.gpu_droplets.volumes.create(\*\*params) -> VolumeCreateResponse +- client.gpu_droplets.volumes.retrieve(volume_id) -> VolumeRetrieveResponse +- client.gpu_droplets.volumes.list(\*\*params) -> VolumeListResponse +- client.gpu_droplets.volumes.delete(volume_id) -> None +- client.gpu_droplets.volumes.delete_by_name(\*\*params) -> None ### Actions Types: ```python -from gradientai.types.gpu_droplets.volumes import ( +from do_gradientai.types.gpu_droplets.volumes import ( VolumeAction, ActionRetrieveResponse, ActionListResponse, @@ -829,17 +829,17 @@ from gradientai.types.gpu_droplets.volumes import ( Methods: -- client.gpu_droplets.volumes.actions.retrieve(action_id, \*, volume_id, \*\*params) -> ActionRetrieveResponse -- client.gpu_droplets.volumes.actions.list(volume_id, \*\*params) -> ActionListResponse -- client.gpu_droplets.volumes.actions.initiate_by_id(volume_id, \*\*params) -> ActionInitiateByIDResponse -- client.gpu_droplets.volumes.actions.initiate_by_name(\*\*params) -> ActionInitiateByNameResponse +- client.gpu_droplets.volumes.actions.retrieve(action_id, \*, volume_id, \*\*params) -> ActionRetrieveResponse +- client.gpu_droplets.volumes.actions.list(volume_id, \*\*params) -> ActionListResponse +- client.gpu_droplets.volumes.actions.initiate_by_id(volume_id, \*\*params) -> ActionInitiateByIDResponse +- client.gpu_droplets.volumes.actions.initiate_by_name(\*\*params) -> ActionInitiateByNameResponse ### Snapshots Types: ```python -from gradientai.types.gpu_droplets.volumes import ( +from do_gradientai.types.gpu_droplets.volumes import ( SnapshotCreateResponse, SnapshotRetrieveResponse, SnapshotListResponse, @@ -848,10 +848,10 @@ from gradientai.types.gpu_droplets.volumes import ( Methods: -- client.gpu_droplets.volumes.snapshots.create(volume_id, \*\*params) -> SnapshotCreateResponse -- client.gpu_droplets.volumes.snapshots.retrieve(snapshot_id) -> SnapshotRetrieveResponse -- client.gpu_droplets.volumes.snapshots.list(volume_id, \*\*params) -> SnapshotListResponse -- client.gpu_droplets.volumes.snapshots.delete(snapshot_id) -> None +- client.gpu_droplets.volumes.snapshots.create(volume_id, \*\*params) -> SnapshotCreateResponse +- client.gpu_droplets.volumes.snapshots.retrieve(snapshot_id) -> SnapshotRetrieveResponse +- client.gpu_droplets.volumes.snapshots.list(volume_id, \*\*params) -> SnapshotListResponse +- client.gpu_droplets.volumes.snapshots.delete(snapshot_id) -> None ## Account @@ -860,7 +860,7 @@ Methods: Types: ```python -from gradientai.types.gpu_droplets.account import ( +from do_gradientai.types.gpu_droplets.account import ( KeyCreateResponse, KeyRetrieveResponse, KeyUpdateResponse, @@ -870,8 +870,8 @@ from gradientai.types.gpu_droplets.account import ( Methods: -- client.gpu_droplets.account.keys.create(\*\*params) -> KeyCreateResponse -- client.gpu_droplets.account.keys.retrieve(ssh_key_identifier) -> KeyRetrieveResponse -- client.gpu_droplets.account.keys.update(ssh_key_identifier, \*\*params) -> KeyUpdateResponse -- client.gpu_droplets.account.keys.list(\*\*params) -> KeyListResponse -- client.gpu_droplets.account.keys.delete(ssh_key_identifier) -> None +- client.gpu_droplets.account.keys.create(\*\*params) -> KeyCreateResponse +- client.gpu_droplets.account.keys.retrieve(ssh_key_identifier) -> KeyRetrieveResponse +- client.gpu_droplets.account.keys.update(ssh_key_identifier, \*\*params) -> KeyUpdateResponse +- client.gpu_droplets.account.keys.list(\*\*params) -> KeyListResponse +- client.gpu_droplets.account.keys.delete(ssh_key_identifier) -> None diff --git a/mypy.ini b/mypy.ini index 748d8234..82b0c891 100644 --- a/mypy.ini +++ b/mypy.ini @@ -8,7 +8,7 @@ show_error_codes = True # # We also exclude our `tests` as mypy doesn't always infer # types correctly and Pyright will still catch any type errors. -exclude = ^(src/gradientai/_files\.py|_dev/.*\.py|tests/.*)$ +exclude = ^(src/do_gradientai/_files\.py|_dev/.*\.py|tests/.*)$ strict_equality = True implicit_reexport = True diff --git a/pyproject.toml b/pyproject.toml index 2cd02155..f5e5770a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -79,14 +79,14 @@ format = { chain = [ "check:ruff" = "ruff check ." "fix:ruff" = "ruff check --fix ." -"check:importable" = "python -c 'import gradientai'" +"check:importable" = "python -c 'import do_gradientai'" typecheck = { chain = [ "typecheck:pyright", "typecheck:mypy" ]} "typecheck:pyright" = "pyright" -"typecheck:verify-types" = "pyright --verifytypes gradientai --ignoreexternal" +"typecheck:verify-types" = "pyright --verifytypes do_gradientai --ignoreexternal" "typecheck:mypy" = "mypy ." [build-system] @@ -99,7 +99,7 @@ include = [ ] [tool.hatch.build.targets.wheel] -packages = ["src/gradientai"] +packages = ["src/do_gradientai"] [tool.hatch.build.targets.sdist] # Basically everything except hidden files/directories (such as .github, .devcontainers, .python-version, etc) @@ -202,7 +202,7 @@ length-sort = true length-sort-straight = true combine-as-imports = true extra-standard-library = ["typing_extensions"] -known-first-party = ["gradientai", "tests"] +known-first-party = ["do_gradientai", "tests"] [tool.ruff.lint.per-file-ignores] "bin/**.py" = ["T201", "T203"] diff --git a/release-please-config.json b/release-please-config.json index 2ff9a58c..a320c1a8 100644 --- a/release-please-config.json +++ b/release-please-config.json @@ -61,6 +61,6 @@ ], "release-type": "python", "extra-files": [ - "src/gradientai/_version.py" + "src/do_gradientai/_version.py" ] } \ No newline at end of file diff --git a/scripts/lint b/scripts/lint index 37b38f6f..e46e909b 100755 --- a/scripts/lint +++ b/scripts/lint @@ -8,4 +8,4 @@ echo "==> Running lints" rye run lint echo "==> Making sure it imports" -rye run python -c 'import gradientai' +rye run python -c 'import do_gradientai' diff --git a/src/gradientai/__init__.py b/src/do_gradientai/__init__.py similarity index 95% rename from src/gradientai/__init__.py rename to src/do_gradientai/__init__.py index 3316fe47..41b943b2 100644 --- a/src/gradientai/__init__.py +++ b/src/do_gradientai/__init__.py @@ -89,12 +89,12 @@ # Update the __module__ attribute for exported symbols so that # error messages point to this module instead of the module # it was originally defined in, e.g. -# gradientai._exceptions.NotFoundError -> gradientai.NotFoundError +# do_gradientai._exceptions.NotFoundError -> do_gradientai.NotFoundError __locals = locals() for __name in __all__: if not __name.startswith("__"): try: - __locals[__name].__module__ = "gradientai" + __locals[__name].__module__ = "do_gradientai" except (TypeError, AttributeError): # Some of our exported symbols are builtins which we can't set attributes for. pass diff --git a/src/gradientai/_base_client.py b/src/do_gradientai/_base_client.py similarity index 99% rename from src/gradientai/_base_client.py rename to src/do_gradientai/_base_client.py index 379c27d1..326c662c 100644 --- a/src/gradientai/_base_client.py +++ b/src/do_gradientai/_base_client.py @@ -389,7 +389,7 @@ def __init__( if max_retries is None: # pyright: ignore[reportUnnecessaryComparison] raise TypeError( - "max_retries cannot be None. If you want to disable retries, pass `0`; if you want unlimited retries, pass `math.inf` or a very high number; if you want the default behavior, pass `gradientai.DEFAULT_MAX_RETRIES`" + "max_retries cannot be None. If you want to disable retries, pass `0`; if you want unlimited retries, pass `math.inf` or a very high number; if you want the default behavior, pass `do_gradientai.DEFAULT_MAX_RETRIES`" ) def _enforce_trailing_slash(self, url: URL) -> URL: diff --git a/src/gradientai/_client.py b/src/do_gradientai/_client.py similarity index 84% rename from src/gradientai/_client.py rename to src/do_gradientai/_client.py index 53d2a4e4..27287ad9 100644 --- a/src/gradientai/_client.py +++ b/src/do_gradientai/_client.py @@ -34,45 +34,63 @@ if TYPE_CHECKING: from .resources import ( chat, - sizes, agents, - images, models, - account, regions, + inference, + gpu_droplets, + knowledge_bases, + ) + from .resources.regions import RegionsResource, AsyncRegionsResource + from .resources.chat.chat import ChatResource, AsyncChatResource + from .resources.gpu_droplets import ( + GPUDropletsResource, + AsyncGPUDropletsResource, + sizes, + images, + account, volumes, - droplets, firewalls, - inference, snapshots, floating_ips, load_balancers, - knowledge_bases, ) - from .resources.sizes import SizesResource, AsyncSizesResource - from .resources.regions import RegionsResource, AsyncRegionsResource - from .resources.chat.chat import ChatResource, AsyncChatResource - from .resources.snapshots import SnapshotsResource, AsyncSnapshotsResource from .resources.agents.agents import AgentsResource, AsyncAgentsResource - from .resources.images.images import ImagesResource, AsyncImagesResource from .resources.models.models import ModelsResource, AsyncModelsResource - from .resources.account.account import AccountResource, AsyncAccountResource - from .resources.volumes.volumes import VolumesResource, AsyncVolumesResource - from .resources.droplets.droplets import DropletsResource, AsyncDropletsResource - from .resources.firewalls.firewalls import FirewallsResource, AsyncFirewallsResource + from .resources.gpu_droplets.sizes import SizesResource, AsyncSizesResource from .resources.inference.inference import InferenceResource, AsyncInferenceResource - from .resources.floating_ips.floating_ips import ( - FloatingIPsResource, - AsyncFloatingIPsResource, + from .resources.gpu_droplets.snapshots import ( + SnapshotsResource, + AsyncSnapshotsResource, ) - from .resources.load_balancers.load_balancers import ( - LoadBalancersResource, - AsyncLoadBalancersResource, + from .resources.gpu_droplets.images.images import ( + ImagesResource, + AsyncImagesResource, + ) + from .resources.gpu_droplets.account.account import ( + AccountResource, + AsyncAccountResource, + ) + from .resources.gpu_droplets.volumes.volumes import ( + VolumesResource, + AsyncVolumesResource, ) from .resources.knowledge_bases.knowledge_bases import ( KnowledgeBasesResource, AsyncKnowledgeBasesResource, ) + from .resources.gpu_droplets.firewalls.firewalls import ( + FirewallsResource, + AsyncFirewallsResource, + ) + from .resources.gpu_droplets.floating_ips.floating_ips import ( + FloatingIPsResource, + AsyncFloatingIPsResource, + ) + from .resources.gpu_droplets.load_balancers.load_balancers import ( + LoadBalancersResource, + AsyncLoadBalancersResource, + ) __all__ = [ "Timeout", @@ -209,56 +227,56 @@ def models(self) -> ModelsResource: return ModelsResource(self) @cached_property - def droplets(self) -> DropletsResource: - from .resources.droplets import DropletsResource + def gpu_droplets(self) -> GPUDropletsResource: + from .resources.gpu_droplets import GPUDropletsResource - return DropletsResource(self) + return GPUDropletsResource(self) @cached_property def firewalls(self) -> FirewallsResource: - from .resources.firewalls import FirewallsResource + from .resources.gpu_droplets.firewalls import FirewallsResource return FirewallsResource(self) @cached_property def floating_ips(self) -> FloatingIPsResource: - from .resources.floating_ips import FloatingIPsResource + from .resources.gpu_droplets.floating_ips import FloatingIPsResource return FloatingIPsResource(self) @cached_property def images(self) -> ImagesResource: - from .resources.images import ImagesResource + from .resources.gpu_droplets.images import ImagesResource return ImagesResource(self) @cached_property def load_balancers(self) -> LoadBalancersResource: - from .resources.load_balancers import LoadBalancersResource + from .resources.gpu_droplets.load_balancers import LoadBalancersResource return LoadBalancersResource(self) @cached_property def sizes(self) -> SizesResource: - from .resources.sizes import SizesResource + from .resources.gpu_droplets.sizes import SizesResource return SizesResource(self) @cached_property def snapshots(self) -> SnapshotsResource: - from .resources.snapshots import SnapshotsResource + from .resources.gpu_droplets.snapshots import SnapshotsResource return SnapshotsResource(self) @cached_property def volumes(self) -> VolumesResource: - from .resources.volumes import VolumesResource + from .resources.gpu_droplets.volumes import VolumesResource return VolumesResource(self) @cached_property def account(self) -> AccountResource: - from .resources.account import AccountResource + from .resources.gpu_droplets.account import AccountResource return AccountResource(self) @@ -519,56 +537,56 @@ def models(self) -> AsyncModelsResource: return AsyncModelsResource(self) @cached_property - def droplets(self) -> AsyncDropletsResource: - from .resources.droplets import AsyncDropletsResource + def gpu_droplets(self) -> AsyncGPUDropletsResource: + from .resources.gpu_droplets import AsyncGPUDropletsResource - return AsyncDropletsResource(self) + return AsyncGPUDropletsResource(self) @cached_property def firewalls(self) -> AsyncFirewallsResource: - from .resources.firewalls import AsyncFirewallsResource + from .resources.gpu_droplets.firewalls import AsyncFirewallsResource return AsyncFirewallsResource(self) @cached_property def floating_ips(self) -> AsyncFloatingIPsResource: - from .resources.floating_ips import AsyncFloatingIPsResource + from .resources.gpu_droplets.floating_ips import AsyncFloatingIPsResource return AsyncFloatingIPsResource(self) @cached_property def images(self) -> AsyncImagesResource: - from .resources.images import AsyncImagesResource + from .resources.gpu_droplets.images import AsyncImagesResource return AsyncImagesResource(self) @cached_property def load_balancers(self) -> AsyncLoadBalancersResource: - from .resources.load_balancers import AsyncLoadBalancersResource + from .resources.gpu_droplets.load_balancers import AsyncLoadBalancersResource return AsyncLoadBalancersResource(self) @cached_property def sizes(self) -> AsyncSizesResource: - from .resources.sizes import AsyncSizesResource + from .resources.gpu_droplets.sizes import AsyncSizesResource return AsyncSizesResource(self) @cached_property def snapshots(self) -> AsyncSnapshotsResource: - from .resources.snapshots import AsyncSnapshotsResource + from .resources.gpu_droplets.snapshots import AsyncSnapshotsResource return AsyncSnapshotsResource(self) @cached_property def volumes(self) -> AsyncVolumesResource: - from .resources.volumes import AsyncVolumesResource + from .resources.gpu_droplets.volumes import AsyncVolumesResource return AsyncVolumesResource(self) @cached_property def account(self) -> AsyncAccountResource: - from .resources.account import AsyncAccountResource + from .resources.gpu_droplets.account import AsyncAccountResource return AsyncAccountResource(self) @@ -749,56 +767,60 @@ def models(self) -> models.ModelsResourceWithRawResponse: return ModelsResourceWithRawResponse(self._client.models) @cached_property - def droplets(self) -> droplets.DropletsResourceWithRawResponse: - from .resources.droplets import DropletsResourceWithRawResponse + def gpu_droplets(self) -> gpu_droplets.GPUDropletsResourceWithRawResponse: + from .resources.gpu_droplets import GPUDropletsResourceWithRawResponse - return DropletsResourceWithRawResponse(self._client.droplets) + return GPUDropletsResourceWithRawResponse(self._client.gpu_droplets) @cached_property def firewalls(self) -> firewalls.FirewallsResourceWithRawResponse: - from .resources.firewalls import FirewallsResourceWithRawResponse + from .resources.gpu_droplets.firewalls import FirewallsResourceWithRawResponse return FirewallsResourceWithRawResponse(self._client.firewalls) @cached_property def floating_ips(self) -> floating_ips.FloatingIPsResourceWithRawResponse: - from .resources.floating_ips import FloatingIPsResourceWithRawResponse + from .resources.gpu_droplets.floating_ips import ( + FloatingIPsResourceWithRawResponse, + ) return FloatingIPsResourceWithRawResponse(self._client.floating_ips) @cached_property def images(self) -> images.ImagesResourceWithRawResponse: - from .resources.images import ImagesResourceWithRawResponse + from .resources.gpu_droplets.images import ImagesResourceWithRawResponse return ImagesResourceWithRawResponse(self._client.images) @cached_property def load_balancers(self) -> load_balancers.LoadBalancersResourceWithRawResponse: - from .resources.load_balancers import LoadBalancersResourceWithRawResponse + from .resources.gpu_droplets.load_balancers import ( + LoadBalancersResourceWithRawResponse, + ) return LoadBalancersResourceWithRawResponse(self._client.load_balancers) @cached_property def sizes(self) -> sizes.SizesResourceWithRawResponse: - from .resources.sizes import SizesResourceWithRawResponse + from .resources.gpu_droplets.sizes import SizesResourceWithRawResponse return SizesResourceWithRawResponse(self._client.sizes) @cached_property def snapshots(self) -> snapshots.SnapshotsResourceWithRawResponse: - from .resources.snapshots import SnapshotsResourceWithRawResponse + from .resources.gpu_droplets.snapshots import SnapshotsResourceWithRawResponse return SnapshotsResourceWithRawResponse(self._client.snapshots) @cached_property def volumes(self) -> volumes.VolumesResourceWithRawResponse: - from .resources.volumes import VolumesResourceWithRawResponse + from .resources.gpu_droplets.volumes import VolumesResourceWithRawResponse return VolumesResourceWithRawResponse(self._client.volumes) @cached_property def account(self) -> account.AccountResourceWithRawResponse: - from .resources.account import AccountResourceWithRawResponse + from .resources.gpu_droplets.account import AccountResourceWithRawResponse return AccountResourceWithRawResponse(self._client.account) @@ -850,26 +872,30 @@ def models(self) -> models.AsyncModelsResourceWithRawResponse: return AsyncModelsResourceWithRawResponse(self._client.models) @cached_property - def droplets(self) -> droplets.AsyncDropletsResourceWithRawResponse: - from .resources.droplets import AsyncDropletsResourceWithRawResponse + def gpu_droplets(self) -> gpu_droplets.AsyncGPUDropletsResourceWithRawResponse: + from .resources.gpu_droplets import AsyncGPUDropletsResourceWithRawResponse - return AsyncDropletsResourceWithRawResponse(self._client.droplets) + return AsyncGPUDropletsResourceWithRawResponse(self._client.gpu_droplets) @cached_property def firewalls(self) -> firewalls.AsyncFirewallsResourceWithRawResponse: - from .resources.firewalls import AsyncFirewallsResourceWithRawResponse + from .resources.gpu_droplets.firewalls import ( + AsyncFirewallsResourceWithRawResponse, + ) return AsyncFirewallsResourceWithRawResponse(self._client.firewalls) @cached_property def floating_ips(self) -> floating_ips.AsyncFloatingIPsResourceWithRawResponse: - from .resources.floating_ips import AsyncFloatingIPsResourceWithRawResponse + from .resources.gpu_droplets.floating_ips import ( + AsyncFloatingIPsResourceWithRawResponse, + ) return AsyncFloatingIPsResourceWithRawResponse(self._client.floating_ips) @cached_property def images(self) -> images.AsyncImagesResourceWithRawResponse: - from .resources.images import AsyncImagesResourceWithRawResponse + from .resources.gpu_droplets.images import AsyncImagesResourceWithRawResponse return AsyncImagesResourceWithRawResponse(self._client.images) @@ -877,31 +903,35 @@ def images(self) -> images.AsyncImagesResourceWithRawResponse: def load_balancers( self, ) -> load_balancers.AsyncLoadBalancersResourceWithRawResponse: - from .resources.load_balancers import AsyncLoadBalancersResourceWithRawResponse + from .resources.gpu_droplets.load_balancers import ( + AsyncLoadBalancersResourceWithRawResponse, + ) return AsyncLoadBalancersResourceWithRawResponse(self._client.load_balancers) @cached_property def sizes(self) -> sizes.AsyncSizesResourceWithRawResponse: - from .resources.sizes import AsyncSizesResourceWithRawResponse + from .resources.gpu_droplets.sizes import AsyncSizesResourceWithRawResponse return AsyncSizesResourceWithRawResponse(self._client.sizes) @cached_property def snapshots(self) -> snapshots.AsyncSnapshotsResourceWithRawResponse: - from .resources.snapshots import AsyncSnapshotsResourceWithRawResponse + from .resources.gpu_droplets.snapshots import ( + AsyncSnapshotsResourceWithRawResponse, + ) return AsyncSnapshotsResourceWithRawResponse(self._client.snapshots) @cached_property def volumes(self) -> volumes.AsyncVolumesResourceWithRawResponse: - from .resources.volumes import AsyncVolumesResourceWithRawResponse + from .resources.gpu_droplets.volumes import AsyncVolumesResourceWithRawResponse return AsyncVolumesResourceWithRawResponse(self._client.volumes) @cached_property def account(self) -> account.AsyncAccountResourceWithRawResponse: - from .resources.account import AsyncAccountResourceWithRawResponse + from .resources.gpu_droplets.account import AsyncAccountResourceWithRawResponse return AsyncAccountResourceWithRawResponse(self._client.account) @@ -953,26 +983,30 @@ def models(self) -> models.ModelsResourceWithStreamingResponse: return ModelsResourceWithStreamingResponse(self._client.models) @cached_property - def droplets(self) -> droplets.DropletsResourceWithStreamingResponse: - from .resources.droplets import DropletsResourceWithStreamingResponse + def droplets(self) -> gpu_droplets.GPUDropletsResourceWithStreamingResponse: + from .resources.gpu_droplets import GPUDropletsResourceWithStreamingResponse - return DropletsResourceWithStreamingResponse(self._client.droplets) + return GPUDropletsResourceWithStreamingResponse(self._client.gpu_droplets) @cached_property def firewalls(self) -> firewalls.FirewallsResourceWithStreamingResponse: - from .resources.firewalls import FirewallsResourceWithStreamingResponse + from .resources.gpu_droplets.firewalls import ( + FirewallsResourceWithStreamingResponse, + ) return FirewallsResourceWithStreamingResponse(self._client.firewalls) @cached_property def floating_ips(self) -> floating_ips.FloatingIPsResourceWithStreamingResponse: - from .resources.floating_ips import FloatingIPsResourceWithStreamingResponse + from .resources.gpu_droplets.floating_ips import ( + FloatingIPsResourceWithStreamingResponse, + ) return FloatingIPsResourceWithStreamingResponse(self._client.floating_ips) @cached_property def images(self) -> images.ImagesResourceWithStreamingResponse: - from .resources.images import ImagesResourceWithStreamingResponse + from .resources.gpu_droplets.images import ImagesResourceWithStreamingResponse return ImagesResourceWithStreamingResponse(self._client.images) @@ -980,31 +1014,35 @@ def images(self) -> images.ImagesResourceWithStreamingResponse: def load_balancers( self, ) -> load_balancers.LoadBalancersResourceWithStreamingResponse: - from .resources.load_balancers import LoadBalancersResourceWithStreamingResponse + from .resources.gpu_droplets.load_balancers import ( + LoadBalancersResourceWithStreamingResponse, + ) return LoadBalancersResourceWithStreamingResponse(self._client.load_balancers) @cached_property def sizes(self) -> sizes.SizesResourceWithStreamingResponse: - from .resources.sizes import SizesResourceWithStreamingResponse + from .resources.gpu_droplets.sizes import SizesResourceWithStreamingResponse return SizesResourceWithStreamingResponse(self._client.sizes) @cached_property def snapshots(self) -> snapshots.SnapshotsResourceWithStreamingResponse: - from .resources.snapshots import SnapshotsResourceWithStreamingResponse + from .resources.gpu_droplets.snapshots import ( + SnapshotsResourceWithStreamingResponse, + ) return SnapshotsResourceWithStreamingResponse(self._client.snapshots) @cached_property def volumes(self) -> volumes.VolumesResourceWithStreamingResponse: - from .resources.volumes import VolumesResourceWithStreamingResponse + from .resources.gpu_droplets.volumes import VolumesResourceWithStreamingResponse return VolumesResourceWithStreamingResponse(self._client.volumes) @cached_property def account(self) -> account.AccountResourceWithStreamingResponse: - from .resources.account import AccountResourceWithStreamingResponse + from .resources.gpu_droplets.account import AccountResourceWithStreamingResponse return AccountResourceWithStreamingResponse(self._client.account) @@ -1056,14 +1094,20 @@ def models(self) -> models.AsyncModelsResourceWithStreamingResponse: return AsyncModelsResourceWithStreamingResponse(self._client.models) @cached_property - def droplets(self) -> droplets.AsyncDropletsResourceWithStreamingResponse: - from .resources.droplets import AsyncDropletsResourceWithStreamingResponse + def gpu_droplets( + self, + ) -> gpu_droplets.AsyncGPUDropletsResourceWithStreamingResponse: + from .resources.gpu_droplets import ( + AsyncGPUDropletsResourceWithStreamingResponse, + ) - return AsyncDropletsResourceWithStreamingResponse(self._client.droplets) + return AsyncGPUDropletsResourceWithStreamingResponse(self._client.gpu_droplets) @cached_property def firewalls(self) -> firewalls.AsyncFirewallsResourceWithStreamingResponse: - from .resources.firewalls import AsyncFirewallsResourceWithStreamingResponse + from .resources.gpu_droplets.firewalls import ( + AsyncFirewallsResourceWithStreamingResponse, + ) return AsyncFirewallsResourceWithStreamingResponse(self._client.firewalls) @@ -1071,7 +1115,7 @@ def firewalls(self) -> firewalls.AsyncFirewallsResourceWithStreamingResponse: def floating_ips( self, ) -> floating_ips.AsyncFloatingIPsResourceWithStreamingResponse: - from .resources.floating_ips import ( + from .resources.gpu_droplets.floating_ips import ( AsyncFloatingIPsResourceWithStreamingResponse, ) @@ -1079,7 +1123,9 @@ def floating_ips( @cached_property def images(self) -> images.AsyncImagesResourceWithStreamingResponse: - from .resources.images import AsyncImagesResourceWithStreamingResponse + from .resources.gpu_droplets.images import ( + AsyncImagesResourceWithStreamingResponse, + ) return AsyncImagesResourceWithStreamingResponse(self._client.images) @@ -1087,7 +1133,7 @@ def images(self) -> images.AsyncImagesResourceWithStreamingResponse: def load_balancers( self, ) -> load_balancers.AsyncLoadBalancersResourceWithStreamingResponse: - from .resources.load_balancers import ( + from .resources.gpu_droplets.load_balancers import ( AsyncLoadBalancersResourceWithStreamingResponse, ) @@ -1095,25 +1141,33 @@ def load_balancers( @cached_property def sizes(self) -> sizes.AsyncSizesResourceWithStreamingResponse: - from .resources.sizes import AsyncSizesResourceWithStreamingResponse + from .resources.gpu_droplets.sizes import ( + AsyncSizesResourceWithStreamingResponse, + ) return AsyncSizesResourceWithStreamingResponse(self._client.sizes) @cached_property def snapshots(self) -> snapshots.AsyncSnapshotsResourceWithStreamingResponse: - from .resources.snapshots import AsyncSnapshotsResourceWithStreamingResponse + from .resources.gpu_droplets.snapshots import ( + AsyncSnapshotsResourceWithStreamingResponse, + ) return AsyncSnapshotsResourceWithStreamingResponse(self._client.snapshots) @cached_property def volumes(self) -> volumes.AsyncVolumesResourceWithStreamingResponse: - from .resources.volumes import AsyncVolumesResourceWithStreamingResponse + from .resources.gpu_droplets.volumes import ( + AsyncVolumesResourceWithStreamingResponse, + ) return AsyncVolumesResourceWithStreamingResponse(self._client.volumes) @cached_property def account(self) -> account.AsyncAccountResourceWithStreamingResponse: - from .resources.account import AsyncAccountResourceWithStreamingResponse + from .resources.gpu_droplets.account import ( + AsyncAccountResourceWithStreamingResponse, + ) return AsyncAccountResourceWithStreamingResponse(self._client.account) diff --git a/src/gradientai/_compat.py b/src/do_gradientai/_compat.py similarity index 100% rename from src/gradientai/_compat.py rename to src/do_gradientai/_compat.py diff --git a/src/gradientai/_constants.py b/src/do_gradientai/_constants.py similarity index 100% rename from src/gradientai/_constants.py rename to src/do_gradientai/_constants.py diff --git a/src/gradientai/_exceptions.py b/src/do_gradientai/_exceptions.py similarity index 100% rename from src/gradientai/_exceptions.py rename to src/do_gradientai/_exceptions.py diff --git a/src/gradientai/_files.py b/src/do_gradientai/_files.py similarity index 100% rename from src/gradientai/_files.py rename to src/do_gradientai/_files.py diff --git a/src/gradientai/_models.py b/src/do_gradientai/_models.py similarity index 100% rename from src/gradientai/_models.py rename to src/do_gradientai/_models.py diff --git a/src/gradientai/_qs.py b/src/do_gradientai/_qs.py similarity index 100% rename from src/gradientai/_qs.py rename to src/do_gradientai/_qs.py diff --git a/src/gradientai/_resource.py b/src/do_gradientai/_resource.py similarity index 100% rename from src/gradientai/_resource.py rename to src/do_gradientai/_resource.py diff --git a/src/gradientai/_response.py b/src/do_gradientai/_response.py similarity index 99% rename from src/gradientai/_response.py rename to src/do_gradientai/_response.py index 2037e4ca..8ca43971 100644 --- a/src/gradientai/_response.py +++ b/src/do_gradientai/_response.py @@ -218,7 +218,7 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T: and issubclass(origin, pydantic.BaseModel) ): raise TypeError( - "Pydantic models must subclass our base model type, e.g. `from gradientai import BaseModel`" + "Pydantic models must subclass our base model type, e.g. `from do_gradientai import BaseModel`" ) if ( @@ -285,7 +285,7 @@ def parse(self, *, to: type[_T] | None = None) -> R | _T: the `to` argument, e.g. ```py - from gradientai import BaseModel + from do_gradientai import BaseModel class MyModel(BaseModel): @@ -387,7 +387,7 @@ async def parse(self, *, to: type[_T] | None = None) -> R | _T: the `to` argument, e.g. ```py - from gradientai import BaseModel + from do_gradientai import BaseModel class MyModel(BaseModel): @@ -558,7 +558,7 @@ async def stream_to_file( class MissingStreamClassError(TypeError): def __init__(self) -> None: super().__init__( - "The `stream` argument was set to `True` but the `stream_cls` argument was not given. See `gradientai._streaming` for reference", + "The `stream` argument was set to `True` but the `stream_cls` argument was not given. See `do_gradientai._streaming` for reference", ) diff --git a/src/gradientai/_streaming.py b/src/do_gradientai/_streaming.py similarity index 100% rename from src/gradientai/_streaming.py rename to src/do_gradientai/_streaming.py diff --git a/src/gradientai/_types.py b/src/do_gradientai/_types.py similarity index 99% rename from src/gradientai/_types.py rename to src/do_gradientai/_types.py index 1bac876d..c356c700 100644 --- a/src/gradientai/_types.py +++ b/src/do_gradientai/_types.py @@ -81,7 +81,7 @@ # This unfortunately means that you will either have # to import this type and pass it explicitly: # -# from gradientai import NoneType +# from do_gradientai import NoneType # client.get('/foo', cast_to=NoneType) # # or build it yourself: diff --git a/src/gradientai/_utils/__init__.py b/src/do_gradientai/_utils/__init__.py similarity index 100% rename from src/gradientai/_utils/__init__.py rename to src/do_gradientai/_utils/__init__.py diff --git a/src/gradientai/_utils/_logs.py b/src/do_gradientai/_utils/_logs.py similarity index 75% rename from src/gradientai/_utils/_logs.py rename to src/do_gradientai/_utils/_logs.py index 9047e5c8..ac45b1a5 100644 --- a/src/gradientai/_utils/_logs.py +++ b/src/do_gradientai/_utils/_logs.py @@ -1,12 +1,12 @@ import os import logging -logger: logging.Logger = logging.getLogger("gradientai") +logger: logging.Logger = logging.getLogger("do_gradientai") httpx_logger: logging.Logger = logging.getLogger("httpx") def _basic_config() -> None: - # e.g. [2023-10-05 14:12:26 - gradientai._base_client:818 - DEBUG] HTTP Request: POST http://127.0.0.1:4010/foo/bar "200 OK" + # e.g. [2023-10-05 14:12:26 - do_gradientai._base_client:818 - DEBUG] HTTP Request: POST http://127.0.0.1:4010/foo/bar "200 OK" logging.basicConfig( format="[%(asctime)s - %(name)s:%(lineno)d - %(levelname)s] %(message)s", datefmt="%Y-%m-%d %H:%M:%S", diff --git a/src/gradientai/_utils/_proxy.py b/src/do_gradientai/_utils/_proxy.py similarity index 100% rename from src/gradientai/_utils/_proxy.py rename to src/do_gradientai/_utils/_proxy.py diff --git a/src/gradientai/_utils/_reflection.py b/src/do_gradientai/_utils/_reflection.py similarity index 100% rename from src/gradientai/_utils/_reflection.py rename to src/do_gradientai/_utils/_reflection.py diff --git a/src/gradientai/_utils/_resources_proxy.py b/src/do_gradientai/_utils/_resources_proxy.py similarity index 50% rename from src/gradientai/_utils/_resources_proxy.py rename to src/do_gradientai/_utils/_resources_proxy.py index b3bc4931..03763c3b 100644 --- a/src/gradientai/_utils/_resources_proxy.py +++ b/src/do_gradientai/_utils/_resources_proxy.py @@ -7,17 +7,17 @@ class ResourcesProxy(LazyProxy[Any]): - """A proxy for the `gradientai.resources` module. + """A proxy for the `do_gradientai.resources` module. - This is used so that we can lazily import `gradientai.resources` only when - needed *and* so that users can just import `gradientai` and reference `gradientai.resources` + This is used so that we can lazily import `do_gradientai.resources` only when + needed *and* so that users can just import `do_gradientai` and reference `do_gradientai.resources` """ @override def __load__(self) -> Any: import importlib - mod = importlib.import_module("gradientai.resources") + mod = importlib.import_module("do_gradientai.resources") return mod diff --git a/src/gradientai/_utils/_streams.py b/src/do_gradientai/_utils/_streams.py similarity index 100% rename from src/gradientai/_utils/_streams.py rename to src/do_gradientai/_utils/_streams.py diff --git a/src/gradientai/_utils/_sync.py b/src/do_gradientai/_utils/_sync.py similarity index 100% rename from src/gradientai/_utils/_sync.py rename to src/do_gradientai/_utils/_sync.py diff --git a/src/gradientai/_utils/_transform.py b/src/do_gradientai/_utils/_transform.py similarity index 100% rename from src/gradientai/_utils/_transform.py rename to src/do_gradientai/_utils/_transform.py diff --git a/src/gradientai/_utils/_typing.py b/src/do_gradientai/_utils/_typing.py similarity index 100% rename from src/gradientai/_utils/_typing.py rename to src/do_gradientai/_utils/_typing.py diff --git a/src/gradientai/_utils/_utils.py b/src/do_gradientai/_utils/_utils.py similarity index 100% rename from src/gradientai/_utils/_utils.py rename to src/do_gradientai/_utils/_utils.py diff --git a/src/gradientai/_version.py b/src/do_gradientai/_version.py similarity index 100% rename from src/gradientai/_version.py rename to src/do_gradientai/_version.py diff --git a/src/gradientai/py.typed b/src/do_gradientai/py.typed similarity index 100% rename from src/gradientai/py.typed rename to src/do_gradientai/py.typed diff --git a/src/gradientai/resources/__init__.py b/src/do_gradientai/resources/__init__.py similarity index 100% rename from src/gradientai/resources/__init__.py rename to src/do_gradientai/resources/__init__.py diff --git a/src/gradientai/resources/agents/__init__.py b/src/do_gradientai/resources/agents/__init__.py similarity index 100% rename from src/gradientai/resources/agents/__init__.py rename to src/do_gradientai/resources/agents/__init__.py diff --git a/src/gradientai/resources/agents/agents.py b/src/do_gradientai/resources/agents/agents.py similarity index 100% rename from src/gradientai/resources/agents/agents.py rename to src/do_gradientai/resources/agents/agents.py diff --git a/src/gradientai/resources/agents/api_keys.py b/src/do_gradientai/resources/agents/api_keys.py similarity index 100% rename from src/gradientai/resources/agents/api_keys.py rename to src/do_gradientai/resources/agents/api_keys.py diff --git a/src/gradientai/resources/agents/chat/__init__.py b/src/do_gradientai/resources/agents/chat/__init__.py similarity index 100% rename from src/gradientai/resources/agents/chat/__init__.py rename to src/do_gradientai/resources/agents/chat/__init__.py diff --git a/src/gradientai/resources/agents/chat/chat.py b/src/do_gradientai/resources/agents/chat/chat.py similarity index 100% rename from src/gradientai/resources/agents/chat/chat.py rename to src/do_gradientai/resources/agents/chat/chat.py diff --git a/src/gradientai/resources/agents/chat/completions.py b/src/do_gradientai/resources/agents/chat/completions.py similarity index 100% rename from src/gradientai/resources/agents/chat/completions.py rename to src/do_gradientai/resources/agents/chat/completions.py diff --git a/src/gradientai/resources/agents/evaluation_datasets.py b/src/do_gradientai/resources/agents/evaluation_datasets.py similarity index 100% rename from src/gradientai/resources/agents/evaluation_datasets.py rename to src/do_gradientai/resources/agents/evaluation_datasets.py diff --git a/src/gradientai/resources/agents/evaluation_metrics/__init__.py b/src/do_gradientai/resources/agents/evaluation_metrics/__init__.py similarity index 100% rename from src/gradientai/resources/agents/evaluation_metrics/__init__.py rename to src/do_gradientai/resources/agents/evaluation_metrics/__init__.py diff --git a/src/gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py b/src/do_gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py similarity index 100% rename from src/gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py rename to src/do_gradientai/resources/agents/evaluation_metrics/evaluation_metrics.py diff --git a/src/gradientai/resources/agents/evaluation_metrics/models.py b/src/do_gradientai/resources/agents/evaluation_metrics/models.py similarity index 100% rename from src/gradientai/resources/agents/evaluation_metrics/models.py rename to src/do_gradientai/resources/agents/evaluation_metrics/models.py diff --git a/src/gradientai/resources/agents/evaluation_metrics/workspaces/__init__.py b/src/do_gradientai/resources/agents/evaluation_metrics/workspaces/__init__.py similarity index 100% rename from src/gradientai/resources/agents/evaluation_metrics/workspaces/__init__.py rename to src/do_gradientai/resources/agents/evaluation_metrics/workspaces/__init__.py diff --git a/src/gradientai/resources/agents/evaluation_metrics/workspaces/agents.py b/src/do_gradientai/resources/agents/evaluation_metrics/workspaces/agents.py similarity index 100% rename from src/gradientai/resources/agents/evaluation_metrics/workspaces/agents.py rename to src/do_gradientai/resources/agents/evaluation_metrics/workspaces/agents.py diff --git a/src/gradientai/resources/agents/evaluation_metrics/workspaces/workspaces.py b/src/do_gradientai/resources/agents/evaluation_metrics/workspaces/workspaces.py similarity index 100% rename from src/gradientai/resources/agents/evaluation_metrics/workspaces/workspaces.py rename to src/do_gradientai/resources/agents/evaluation_metrics/workspaces/workspaces.py diff --git a/src/gradientai/resources/agents/evaluation_runs.py b/src/do_gradientai/resources/agents/evaluation_runs.py similarity index 100% rename from src/gradientai/resources/agents/evaluation_runs.py rename to src/do_gradientai/resources/agents/evaluation_runs.py diff --git a/src/gradientai/resources/agents/evaluation_test_cases.py b/src/do_gradientai/resources/agents/evaluation_test_cases.py similarity index 100% rename from src/gradientai/resources/agents/evaluation_test_cases.py rename to src/do_gradientai/resources/agents/evaluation_test_cases.py diff --git a/src/gradientai/resources/agents/functions.py b/src/do_gradientai/resources/agents/functions.py similarity index 100% rename from src/gradientai/resources/agents/functions.py rename to src/do_gradientai/resources/agents/functions.py diff --git a/src/gradientai/resources/agents/knowledge_bases.py b/src/do_gradientai/resources/agents/knowledge_bases.py similarity index 100% rename from src/gradientai/resources/agents/knowledge_bases.py rename to src/do_gradientai/resources/agents/knowledge_bases.py diff --git a/src/gradientai/resources/agents/routes.py b/src/do_gradientai/resources/agents/routes.py similarity index 100% rename from src/gradientai/resources/agents/routes.py rename to src/do_gradientai/resources/agents/routes.py diff --git a/src/gradientai/resources/agents/versions.py b/src/do_gradientai/resources/agents/versions.py similarity index 100% rename from src/gradientai/resources/agents/versions.py rename to src/do_gradientai/resources/agents/versions.py diff --git a/src/gradientai/resources/chat/__init__.py b/src/do_gradientai/resources/chat/__init__.py similarity index 100% rename from src/gradientai/resources/chat/__init__.py rename to src/do_gradientai/resources/chat/__init__.py diff --git a/src/gradientai/resources/chat/chat.py b/src/do_gradientai/resources/chat/chat.py similarity index 100% rename from src/gradientai/resources/chat/chat.py rename to src/do_gradientai/resources/chat/chat.py diff --git a/src/gradientai/resources/chat/completions.py b/src/do_gradientai/resources/chat/completions.py similarity index 100% rename from src/gradientai/resources/chat/completions.py rename to src/do_gradientai/resources/chat/completions.py diff --git a/src/gradientai/resources/gpu_droplets/__init__.py b/src/do_gradientai/resources/gpu_droplets/__init__.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/__init__.py rename to src/do_gradientai/resources/gpu_droplets/__init__.py diff --git a/src/gradientai/resources/gpu_droplets/account/__init__.py b/src/do_gradientai/resources/gpu_droplets/account/__init__.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/account/__init__.py rename to src/do_gradientai/resources/gpu_droplets/account/__init__.py diff --git a/src/gradientai/resources/gpu_droplets/account/account.py b/src/do_gradientai/resources/gpu_droplets/account/account.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/account/account.py rename to src/do_gradientai/resources/gpu_droplets/account/account.py diff --git a/src/gradientai/resources/gpu_droplets/account/keys.py b/src/do_gradientai/resources/gpu_droplets/account/keys.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/account/keys.py rename to src/do_gradientai/resources/gpu_droplets/account/keys.py diff --git a/src/gradientai/resources/gpu_droplets/actions.py b/src/do_gradientai/resources/gpu_droplets/actions.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/actions.py rename to src/do_gradientai/resources/gpu_droplets/actions.py diff --git a/src/gradientai/resources/gpu_droplets/autoscale.py b/src/do_gradientai/resources/gpu_droplets/autoscale.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/autoscale.py rename to src/do_gradientai/resources/gpu_droplets/autoscale.py diff --git a/src/gradientai/resources/gpu_droplets/backups.py b/src/do_gradientai/resources/gpu_droplets/backups.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/backups.py rename to src/do_gradientai/resources/gpu_droplets/backups.py diff --git a/src/gradientai/resources/gpu_droplets/destroy_with_associated_resources.py b/src/do_gradientai/resources/gpu_droplets/destroy_with_associated_resources.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/destroy_with_associated_resources.py rename to src/do_gradientai/resources/gpu_droplets/destroy_with_associated_resources.py diff --git a/src/gradientai/resources/gpu_droplets/firewalls/__init__.py b/src/do_gradientai/resources/gpu_droplets/firewalls/__init__.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/firewalls/__init__.py rename to src/do_gradientai/resources/gpu_droplets/firewalls/__init__.py diff --git a/src/gradientai/resources/gpu_droplets/firewalls/droplets.py b/src/do_gradientai/resources/gpu_droplets/firewalls/droplets.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/firewalls/droplets.py rename to src/do_gradientai/resources/gpu_droplets/firewalls/droplets.py diff --git a/src/gradientai/resources/gpu_droplets/firewalls/firewalls.py b/src/do_gradientai/resources/gpu_droplets/firewalls/firewalls.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/firewalls/firewalls.py rename to src/do_gradientai/resources/gpu_droplets/firewalls/firewalls.py diff --git a/src/gradientai/resources/gpu_droplets/firewalls/rules.py b/src/do_gradientai/resources/gpu_droplets/firewalls/rules.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/firewalls/rules.py rename to src/do_gradientai/resources/gpu_droplets/firewalls/rules.py diff --git a/src/gradientai/resources/gpu_droplets/firewalls/tags.py b/src/do_gradientai/resources/gpu_droplets/firewalls/tags.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/firewalls/tags.py rename to src/do_gradientai/resources/gpu_droplets/firewalls/tags.py diff --git a/src/gradientai/resources/gpu_droplets/floating_ips/__init__.py b/src/do_gradientai/resources/gpu_droplets/floating_ips/__init__.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/floating_ips/__init__.py rename to src/do_gradientai/resources/gpu_droplets/floating_ips/__init__.py diff --git a/src/gradientai/resources/gpu_droplets/floating_ips/actions.py b/src/do_gradientai/resources/gpu_droplets/floating_ips/actions.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/floating_ips/actions.py rename to src/do_gradientai/resources/gpu_droplets/floating_ips/actions.py diff --git a/src/gradientai/resources/gpu_droplets/floating_ips/floating_ips.py b/src/do_gradientai/resources/gpu_droplets/floating_ips/floating_ips.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/floating_ips/floating_ips.py rename to src/do_gradientai/resources/gpu_droplets/floating_ips/floating_ips.py diff --git a/src/gradientai/resources/gpu_droplets/gpu_droplets.py b/src/do_gradientai/resources/gpu_droplets/gpu_droplets.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/gpu_droplets.py rename to src/do_gradientai/resources/gpu_droplets/gpu_droplets.py diff --git a/src/gradientai/resources/gpu_droplets/images/__init__.py b/src/do_gradientai/resources/gpu_droplets/images/__init__.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/images/__init__.py rename to src/do_gradientai/resources/gpu_droplets/images/__init__.py diff --git a/src/gradientai/resources/gpu_droplets/images/actions.py b/src/do_gradientai/resources/gpu_droplets/images/actions.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/images/actions.py rename to src/do_gradientai/resources/gpu_droplets/images/actions.py diff --git a/src/gradientai/resources/gpu_droplets/images/images.py b/src/do_gradientai/resources/gpu_droplets/images/images.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/images/images.py rename to src/do_gradientai/resources/gpu_droplets/images/images.py diff --git a/src/gradientai/resources/gpu_droplets/load_balancers/__init__.py b/src/do_gradientai/resources/gpu_droplets/load_balancers/__init__.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/load_balancers/__init__.py rename to src/do_gradientai/resources/gpu_droplets/load_balancers/__init__.py diff --git a/src/gradientai/resources/gpu_droplets/load_balancers/droplets.py b/src/do_gradientai/resources/gpu_droplets/load_balancers/droplets.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/load_balancers/droplets.py rename to src/do_gradientai/resources/gpu_droplets/load_balancers/droplets.py diff --git a/src/gradientai/resources/gpu_droplets/load_balancers/forwarding_rules.py b/src/do_gradientai/resources/gpu_droplets/load_balancers/forwarding_rules.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/load_balancers/forwarding_rules.py rename to src/do_gradientai/resources/gpu_droplets/load_balancers/forwarding_rules.py diff --git a/src/gradientai/resources/gpu_droplets/load_balancers/load_balancers.py b/src/do_gradientai/resources/gpu_droplets/load_balancers/load_balancers.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/load_balancers/load_balancers.py rename to src/do_gradientai/resources/gpu_droplets/load_balancers/load_balancers.py diff --git a/src/gradientai/resources/gpu_droplets/sizes.py b/src/do_gradientai/resources/gpu_droplets/sizes.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/sizes.py rename to src/do_gradientai/resources/gpu_droplets/sizes.py diff --git a/src/gradientai/resources/gpu_droplets/snapshots.py b/src/do_gradientai/resources/gpu_droplets/snapshots.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/snapshots.py rename to src/do_gradientai/resources/gpu_droplets/snapshots.py diff --git a/src/gradientai/resources/gpu_droplets/volumes/__init__.py b/src/do_gradientai/resources/gpu_droplets/volumes/__init__.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/volumes/__init__.py rename to src/do_gradientai/resources/gpu_droplets/volumes/__init__.py diff --git a/src/gradientai/resources/gpu_droplets/volumes/actions.py b/src/do_gradientai/resources/gpu_droplets/volumes/actions.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/volumes/actions.py rename to src/do_gradientai/resources/gpu_droplets/volumes/actions.py diff --git a/src/gradientai/resources/gpu_droplets/volumes/snapshots.py b/src/do_gradientai/resources/gpu_droplets/volumes/snapshots.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/volumes/snapshots.py rename to src/do_gradientai/resources/gpu_droplets/volumes/snapshots.py diff --git a/src/gradientai/resources/gpu_droplets/volumes/volumes.py b/src/do_gradientai/resources/gpu_droplets/volumes/volumes.py similarity index 100% rename from src/gradientai/resources/gpu_droplets/volumes/volumes.py rename to src/do_gradientai/resources/gpu_droplets/volumes/volumes.py diff --git a/src/gradientai/resources/inference/__init__.py b/src/do_gradientai/resources/inference/__init__.py similarity index 100% rename from src/gradientai/resources/inference/__init__.py rename to src/do_gradientai/resources/inference/__init__.py diff --git a/src/gradientai/resources/inference/api_keys.py b/src/do_gradientai/resources/inference/api_keys.py similarity index 100% rename from src/gradientai/resources/inference/api_keys.py rename to src/do_gradientai/resources/inference/api_keys.py diff --git a/src/gradientai/resources/inference/inference.py b/src/do_gradientai/resources/inference/inference.py similarity index 100% rename from src/gradientai/resources/inference/inference.py rename to src/do_gradientai/resources/inference/inference.py diff --git a/src/gradientai/resources/knowledge_bases/__init__.py b/src/do_gradientai/resources/knowledge_bases/__init__.py similarity index 100% rename from src/gradientai/resources/knowledge_bases/__init__.py rename to src/do_gradientai/resources/knowledge_bases/__init__.py diff --git a/src/gradientai/resources/knowledge_bases/data_sources.py b/src/do_gradientai/resources/knowledge_bases/data_sources.py similarity index 100% rename from src/gradientai/resources/knowledge_bases/data_sources.py rename to src/do_gradientai/resources/knowledge_bases/data_sources.py diff --git a/src/gradientai/resources/knowledge_bases/indexing_jobs.py b/src/do_gradientai/resources/knowledge_bases/indexing_jobs.py similarity index 100% rename from src/gradientai/resources/knowledge_bases/indexing_jobs.py rename to src/do_gradientai/resources/knowledge_bases/indexing_jobs.py diff --git a/src/gradientai/resources/knowledge_bases/knowledge_bases.py b/src/do_gradientai/resources/knowledge_bases/knowledge_bases.py similarity index 100% rename from src/gradientai/resources/knowledge_bases/knowledge_bases.py rename to src/do_gradientai/resources/knowledge_bases/knowledge_bases.py diff --git a/src/gradientai/resources/models/__init__.py b/src/do_gradientai/resources/models/__init__.py similarity index 100% rename from src/gradientai/resources/models/__init__.py rename to src/do_gradientai/resources/models/__init__.py diff --git a/src/gradientai/resources/models/models.py b/src/do_gradientai/resources/models/models.py similarity index 100% rename from src/gradientai/resources/models/models.py rename to src/do_gradientai/resources/models/models.py diff --git a/src/gradientai/resources/models/providers/__init__.py b/src/do_gradientai/resources/models/providers/__init__.py similarity index 100% rename from src/gradientai/resources/models/providers/__init__.py rename to src/do_gradientai/resources/models/providers/__init__.py diff --git a/src/gradientai/resources/models/providers/anthropic.py b/src/do_gradientai/resources/models/providers/anthropic.py similarity index 100% rename from src/gradientai/resources/models/providers/anthropic.py rename to src/do_gradientai/resources/models/providers/anthropic.py diff --git a/src/gradientai/resources/models/providers/openai.py b/src/do_gradientai/resources/models/providers/openai.py similarity index 100% rename from src/gradientai/resources/models/providers/openai.py rename to src/do_gradientai/resources/models/providers/openai.py diff --git a/src/gradientai/resources/models/providers/providers.py b/src/do_gradientai/resources/models/providers/providers.py similarity index 100% rename from src/gradientai/resources/models/providers/providers.py rename to src/do_gradientai/resources/models/providers/providers.py diff --git a/src/gradientai/resources/regions.py b/src/do_gradientai/resources/regions.py similarity index 100% rename from src/gradientai/resources/regions.py rename to src/do_gradientai/resources/regions.py diff --git a/src/gradientai/types/__init__.py b/src/do_gradientai/types/__init__.py similarity index 100% rename from src/gradientai/types/__init__.py rename to src/do_gradientai/types/__init__.py diff --git a/src/gradientai/types/agent_create_params.py b/src/do_gradientai/types/agent_create_params.py similarity index 100% rename from src/gradientai/types/agent_create_params.py rename to src/do_gradientai/types/agent_create_params.py diff --git a/src/gradientai/types/agent_create_response.py b/src/do_gradientai/types/agent_create_response.py similarity index 100% rename from src/gradientai/types/agent_create_response.py rename to src/do_gradientai/types/agent_create_response.py diff --git a/src/gradientai/types/agent_delete_response.py b/src/do_gradientai/types/agent_delete_response.py similarity index 100% rename from src/gradientai/types/agent_delete_response.py rename to src/do_gradientai/types/agent_delete_response.py diff --git a/src/gradientai/types/agent_list_params.py b/src/do_gradientai/types/agent_list_params.py similarity index 100% rename from src/gradientai/types/agent_list_params.py rename to src/do_gradientai/types/agent_list_params.py diff --git a/src/gradientai/types/agent_list_response.py b/src/do_gradientai/types/agent_list_response.py similarity index 100% rename from src/gradientai/types/agent_list_response.py rename to src/do_gradientai/types/agent_list_response.py diff --git a/src/gradientai/types/agent_retrieve_response.py b/src/do_gradientai/types/agent_retrieve_response.py similarity index 100% rename from src/gradientai/types/agent_retrieve_response.py rename to src/do_gradientai/types/agent_retrieve_response.py diff --git a/src/gradientai/types/agent_update_params.py b/src/do_gradientai/types/agent_update_params.py similarity index 100% rename from src/gradientai/types/agent_update_params.py rename to src/do_gradientai/types/agent_update_params.py diff --git a/src/gradientai/types/agent_update_response.py b/src/do_gradientai/types/agent_update_response.py similarity index 100% rename from src/gradientai/types/agent_update_response.py rename to src/do_gradientai/types/agent_update_response.py diff --git a/src/gradientai/types/agent_update_status_params.py b/src/do_gradientai/types/agent_update_status_params.py similarity index 100% rename from src/gradientai/types/agent_update_status_params.py rename to src/do_gradientai/types/agent_update_status_params.py diff --git a/src/gradientai/types/agent_update_status_response.py b/src/do_gradientai/types/agent_update_status_response.py similarity index 100% rename from src/gradientai/types/agent_update_status_response.py rename to src/do_gradientai/types/agent_update_status_response.py diff --git a/src/gradientai/types/agents/__init__.py b/src/do_gradientai/types/agents/__init__.py similarity index 100% rename from src/gradientai/types/agents/__init__.py rename to src/do_gradientai/types/agents/__init__.py diff --git a/src/gradientai/types/agents/api_evaluation_metric.py b/src/do_gradientai/types/agents/api_evaluation_metric.py similarity index 100% rename from src/gradientai/types/agents/api_evaluation_metric.py rename to src/do_gradientai/types/agents/api_evaluation_metric.py diff --git a/src/gradientai/types/agents/api_evaluation_metric_result.py b/src/do_gradientai/types/agents/api_evaluation_metric_result.py similarity index 100% rename from src/gradientai/types/agents/api_evaluation_metric_result.py rename to src/do_gradientai/types/agents/api_evaluation_metric_result.py diff --git a/src/gradientai/types/agents/api_evaluation_prompt.py b/src/do_gradientai/types/agents/api_evaluation_prompt.py similarity index 100% rename from src/gradientai/types/agents/api_evaluation_prompt.py rename to src/do_gradientai/types/agents/api_evaluation_prompt.py diff --git a/src/gradientai/types/agents/api_evaluation_run.py b/src/do_gradientai/types/agents/api_evaluation_run.py similarity index 100% rename from src/gradientai/types/agents/api_evaluation_run.py rename to src/do_gradientai/types/agents/api_evaluation_run.py diff --git a/src/gradientai/types/agents/api_evaluation_test_case.py b/src/do_gradientai/types/agents/api_evaluation_test_case.py similarity index 100% rename from src/gradientai/types/agents/api_evaluation_test_case.py rename to src/do_gradientai/types/agents/api_evaluation_test_case.py diff --git a/src/gradientai/types/agents/api_key_create_params.py b/src/do_gradientai/types/agents/api_key_create_params.py similarity index 100% rename from src/gradientai/types/agents/api_key_create_params.py rename to src/do_gradientai/types/agents/api_key_create_params.py diff --git a/src/gradientai/types/agents/api_key_create_response.py b/src/do_gradientai/types/agents/api_key_create_response.py similarity index 100% rename from src/gradientai/types/agents/api_key_create_response.py rename to src/do_gradientai/types/agents/api_key_create_response.py diff --git a/src/gradientai/types/agents/api_key_delete_response.py b/src/do_gradientai/types/agents/api_key_delete_response.py similarity index 100% rename from src/gradientai/types/agents/api_key_delete_response.py rename to src/do_gradientai/types/agents/api_key_delete_response.py diff --git a/src/gradientai/types/agents/api_key_list_params.py b/src/do_gradientai/types/agents/api_key_list_params.py similarity index 100% rename from src/gradientai/types/agents/api_key_list_params.py rename to src/do_gradientai/types/agents/api_key_list_params.py diff --git a/src/gradientai/types/agents/api_key_list_response.py b/src/do_gradientai/types/agents/api_key_list_response.py similarity index 100% rename from src/gradientai/types/agents/api_key_list_response.py rename to src/do_gradientai/types/agents/api_key_list_response.py diff --git a/src/gradientai/types/agents/api_key_regenerate_response.py b/src/do_gradientai/types/agents/api_key_regenerate_response.py similarity index 100% rename from src/gradientai/types/agents/api_key_regenerate_response.py rename to src/do_gradientai/types/agents/api_key_regenerate_response.py diff --git a/src/gradientai/types/agents/api_key_update_params.py b/src/do_gradientai/types/agents/api_key_update_params.py similarity index 100% rename from src/gradientai/types/agents/api_key_update_params.py rename to src/do_gradientai/types/agents/api_key_update_params.py diff --git a/src/gradientai/types/agents/api_key_update_response.py b/src/do_gradientai/types/agents/api_key_update_response.py similarity index 100% rename from src/gradientai/types/agents/api_key_update_response.py rename to src/do_gradientai/types/agents/api_key_update_response.py diff --git a/src/gradientai/types/agents/api_link_knowledge_base_output.py b/src/do_gradientai/types/agents/api_link_knowledge_base_output.py similarity index 100% rename from src/gradientai/types/agents/api_link_knowledge_base_output.py rename to src/do_gradientai/types/agents/api_link_knowledge_base_output.py diff --git a/src/gradientai/types/agents/api_star_metric.py b/src/do_gradientai/types/agents/api_star_metric.py similarity index 100% rename from src/gradientai/types/agents/api_star_metric.py rename to src/do_gradientai/types/agents/api_star_metric.py diff --git a/src/gradientai/types/agents/api_star_metric_param.py b/src/do_gradientai/types/agents/api_star_metric_param.py similarity index 100% rename from src/gradientai/types/agents/api_star_metric_param.py rename to src/do_gradientai/types/agents/api_star_metric_param.py diff --git a/src/gradientai/types/agents/chat/__init__.py b/src/do_gradientai/types/agents/chat/__init__.py similarity index 100% rename from src/gradientai/types/agents/chat/__init__.py rename to src/do_gradientai/types/agents/chat/__init__.py diff --git a/src/gradientai/types/agents/chat/completion_create_params.py b/src/do_gradientai/types/agents/chat/completion_create_params.py similarity index 100% rename from src/gradientai/types/agents/chat/completion_create_params.py rename to src/do_gradientai/types/agents/chat/completion_create_params.py diff --git a/src/gradientai/types/agents/chat/completion_create_response.py b/src/do_gradientai/types/agents/chat/completion_create_response.py similarity index 100% rename from src/gradientai/types/agents/chat/completion_create_response.py rename to src/do_gradientai/types/agents/chat/completion_create_response.py diff --git a/src/gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py b/src/do_gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py similarity index 100% rename from src/gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py rename to src/do_gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_params.py diff --git a/src/gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_response.py b/src/do_gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_response.py rename to src/do_gradientai/types/agents/evaluation_dataset_create_file_upload_presigned_urls_response.py diff --git a/src/gradientai/types/agents/evaluation_dataset_create_params.py b/src/do_gradientai/types/agents/evaluation_dataset_create_params.py similarity index 100% rename from src/gradientai/types/agents/evaluation_dataset_create_params.py rename to src/do_gradientai/types/agents/evaluation_dataset_create_params.py diff --git a/src/gradientai/types/agents/evaluation_dataset_create_response.py b/src/do_gradientai/types/agents/evaluation_dataset_create_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_dataset_create_response.py rename to src/do_gradientai/types/agents/evaluation_dataset_create_response.py diff --git a/src/gradientai/types/agents/evaluation_metric_list_regions_params.py b/src/do_gradientai/types/agents/evaluation_metric_list_regions_params.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metric_list_regions_params.py rename to src/do_gradientai/types/agents/evaluation_metric_list_regions_params.py diff --git a/src/gradientai/types/agents/evaluation_metric_list_regions_response.py b/src/do_gradientai/types/agents/evaluation_metric_list_regions_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metric_list_regions_response.py rename to src/do_gradientai/types/agents/evaluation_metric_list_regions_response.py diff --git a/src/gradientai/types/agents/evaluation_metric_list_response.py b/src/do_gradientai/types/agents/evaluation_metric_list_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metric_list_response.py rename to src/do_gradientai/types/agents/evaluation_metric_list_response.py diff --git a/src/gradientai/types/agents/evaluation_metrics/__init__.py b/src/do_gradientai/types/agents/evaluation_metrics/__init__.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metrics/__init__.py rename to src/do_gradientai/types/agents/evaluation_metrics/__init__.py diff --git a/src/gradientai/types/agents/evaluation_metrics/model_list_params.py b/src/do_gradientai/types/agents/evaluation_metrics/model_list_params.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metrics/model_list_params.py rename to src/do_gradientai/types/agents/evaluation_metrics/model_list_params.py diff --git a/src/gradientai/types/agents/evaluation_metrics/model_list_response.py b/src/do_gradientai/types/agents/evaluation_metrics/model_list_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metrics/model_list_response.py rename to src/do_gradientai/types/agents/evaluation_metrics/model_list_response.py diff --git a/src/gradientai/types/agents/evaluation_metrics/workspace_create_params.py b/src/do_gradientai/types/agents/evaluation_metrics/workspace_create_params.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metrics/workspace_create_params.py rename to src/do_gradientai/types/agents/evaluation_metrics/workspace_create_params.py diff --git a/src/gradientai/types/agents/evaluation_metrics/workspace_create_response.py b/src/do_gradientai/types/agents/evaluation_metrics/workspace_create_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metrics/workspace_create_response.py rename to src/do_gradientai/types/agents/evaluation_metrics/workspace_create_response.py diff --git a/src/gradientai/types/agents/evaluation_metrics/workspace_delete_response.py b/src/do_gradientai/types/agents/evaluation_metrics/workspace_delete_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metrics/workspace_delete_response.py rename to src/do_gradientai/types/agents/evaluation_metrics/workspace_delete_response.py diff --git a/src/gradientai/types/agents/evaluation_metrics/workspace_list_evaluation_test_cases_response.py b/src/do_gradientai/types/agents/evaluation_metrics/workspace_list_evaluation_test_cases_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metrics/workspace_list_evaluation_test_cases_response.py rename to src/do_gradientai/types/agents/evaluation_metrics/workspace_list_evaluation_test_cases_response.py diff --git a/src/gradientai/types/agents/evaluation_metrics/workspace_list_response.py b/src/do_gradientai/types/agents/evaluation_metrics/workspace_list_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metrics/workspace_list_response.py rename to src/do_gradientai/types/agents/evaluation_metrics/workspace_list_response.py diff --git a/src/gradientai/types/agents/evaluation_metrics/workspace_retrieve_response.py b/src/do_gradientai/types/agents/evaluation_metrics/workspace_retrieve_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metrics/workspace_retrieve_response.py rename to src/do_gradientai/types/agents/evaluation_metrics/workspace_retrieve_response.py diff --git a/src/gradientai/types/agents/evaluation_metrics/workspace_update_params.py b/src/do_gradientai/types/agents/evaluation_metrics/workspace_update_params.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metrics/workspace_update_params.py rename to src/do_gradientai/types/agents/evaluation_metrics/workspace_update_params.py diff --git a/src/gradientai/types/agents/evaluation_metrics/workspace_update_response.py b/src/do_gradientai/types/agents/evaluation_metrics/workspace_update_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metrics/workspace_update_response.py rename to src/do_gradientai/types/agents/evaluation_metrics/workspace_update_response.py diff --git a/src/gradientai/types/agents/evaluation_metrics/workspaces/__init__.py b/src/do_gradientai/types/agents/evaluation_metrics/workspaces/__init__.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metrics/workspaces/__init__.py rename to src/do_gradientai/types/agents/evaluation_metrics/workspaces/__init__.py diff --git a/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_list_params.py b/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_list_params.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metrics/workspaces/agent_list_params.py rename to src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_list_params.py diff --git a/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_list_response.py b/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_list_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metrics/workspaces/agent_list_response.py rename to src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_list_response.py diff --git a/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_move_params.py b/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_move_params.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metrics/workspaces/agent_move_params.py rename to src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_move_params.py diff --git a/src/gradientai/types/agents/evaluation_metrics/workspaces/agent_move_response.py b/src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_move_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_metrics/workspaces/agent_move_response.py rename to src/do_gradientai/types/agents/evaluation_metrics/workspaces/agent_move_response.py diff --git a/src/gradientai/types/agents/evaluation_run_create_params.py b/src/do_gradientai/types/agents/evaluation_run_create_params.py similarity index 100% rename from src/gradientai/types/agents/evaluation_run_create_params.py rename to src/do_gradientai/types/agents/evaluation_run_create_params.py diff --git a/src/gradientai/types/agents/evaluation_run_create_response.py b/src/do_gradientai/types/agents/evaluation_run_create_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_run_create_response.py rename to src/do_gradientai/types/agents/evaluation_run_create_response.py diff --git a/src/gradientai/types/agents/evaluation_run_list_results_params.py b/src/do_gradientai/types/agents/evaluation_run_list_results_params.py similarity index 100% rename from src/gradientai/types/agents/evaluation_run_list_results_params.py rename to src/do_gradientai/types/agents/evaluation_run_list_results_params.py diff --git a/src/gradientai/types/agents/evaluation_run_list_results_response.py b/src/do_gradientai/types/agents/evaluation_run_list_results_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_run_list_results_response.py rename to src/do_gradientai/types/agents/evaluation_run_list_results_response.py diff --git a/src/gradientai/types/agents/evaluation_run_retrieve_response.py b/src/do_gradientai/types/agents/evaluation_run_retrieve_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_run_retrieve_response.py rename to src/do_gradientai/types/agents/evaluation_run_retrieve_response.py diff --git a/src/gradientai/types/agents/evaluation_run_retrieve_results_response.py b/src/do_gradientai/types/agents/evaluation_run_retrieve_results_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_run_retrieve_results_response.py rename to src/do_gradientai/types/agents/evaluation_run_retrieve_results_response.py diff --git a/src/gradientai/types/agents/evaluation_test_case_create_params.py b/src/do_gradientai/types/agents/evaluation_test_case_create_params.py similarity index 100% rename from src/gradientai/types/agents/evaluation_test_case_create_params.py rename to src/do_gradientai/types/agents/evaluation_test_case_create_params.py diff --git a/src/gradientai/types/agents/evaluation_test_case_create_response.py b/src/do_gradientai/types/agents/evaluation_test_case_create_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_test_case_create_response.py rename to src/do_gradientai/types/agents/evaluation_test_case_create_response.py diff --git a/src/gradientai/types/agents/evaluation_test_case_list_evaluation_runs_params.py b/src/do_gradientai/types/agents/evaluation_test_case_list_evaluation_runs_params.py similarity index 100% rename from src/gradientai/types/agents/evaluation_test_case_list_evaluation_runs_params.py rename to src/do_gradientai/types/agents/evaluation_test_case_list_evaluation_runs_params.py diff --git a/src/gradientai/types/agents/evaluation_test_case_list_evaluation_runs_response.py b/src/do_gradientai/types/agents/evaluation_test_case_list_evaluation_runs_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_test_case_list_evaluation_runs_response.py rename to src/do_gradientai/types/agents/evaluation_test_case_list_evaluation_runs_response.py diff --git a/src/gradientai/types/agents/evaluation_test_case_list_response.py b/src/do_gradientai/types/agents/evaluation_test_case_list_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_test_case_list_response.py rename to src/do_gradientai/types/agents/evaluation_test_case_list_response.py diff --git a/src/gradientai/types/agents/evaluation_test_case_retrieve_params.py b/src/do_gradientai/types/agents/evaluation_test_case_retrieve_params.py similarity index 100% rename from src/gradientai/types/agents/evaluation_test_case_retrieve_params.py rename to src/do_gradientai/types/agents/evaluation_test_case_retrieve_params.py diff --git a/src/gradientai/types/agents/evaluation_test_case_retrieve_response.py b/src/do_gradientai/types/agents/evaluation_test_case_retrieve_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_test_case_retrieve_response.py rename to src/do_gradientai/types/agents/evaluation_test_case_retrieve_response.py diff --git a/src/gradientai/types/agents/evaluation_test_case_update_params.py b/src/do_gradientai/types/agents/evaluation_test_case_update_params.py similarity index 100% rename from src/gradientai/types/agents/evaluation_test_case_update_params.py rename to src/do_gradientai/types/agents/evaluation_test_case_update_params.py diff --git a/src/gradientai/types/agents/evaluation_test_case_update_response.py b/src/do_gradientai/types/agents/evaluation_test_case_update_response.py similarity index 100% rename from src/gradientai/types/agents/evaluation_test_case_update_response.py rename to src/do_gradientai/types/agents/evaluation_test_case_update_response.py diff --git a/src/gradientai/types/agents/function_create_params.py b/src/do_gradientai/types/agents/function_create_params.py similarity index 100% rename from src/gradientai/types/agents/function_create_params.py rename to src/do_gradientai/types/agents/function_create_params.py diff --git a/src/gradientai/types/agents/function_create_response.py b/src/do_gradientai/types/agents/function_create_response.py similarity index 100% rename from src/gradientai/types/agents/function_create_response.py rename to src/do_gradientai/types/agents/function_create_response.py diff --git a/src/gradientai/types/agents/function_delete_response.py b/src/do_gradientai/types/agents/function_delete_response.py similarity index 100% rename from src/gradientai/types/agents/function_delete_response.py rename to src/do_gradientai/types/agents/function_delete_response.py diff --git a/src/gradientai/types/agents/function_update_params.py b/src/do_gradientai/types/agents/function_update_params.py similarity index 100% rename from src/gradientai/types/agents/function_update_params.py rename to src/do_gradientai/types/agents/function_update_params.py diff --git a/src/gradientai/types/agents/function_update_response.py b/src/do_gradientai/types/agents/function_update_response.py similarity index 100% rename from src/gradientai/types/agents/function_update_response.py rename to src/do_gradientai/types/agents/function_update_response.py diff --git a/src/gradientai/types/agents/knowledge_base_detach_response.py b/src/do_gradientai/types/agents/knowledge_base_detach_response.py similarity index 100% rename from src/gradientai/types/agents/knowledge_base_detach_response.py rename to src/do_gradientai/types/agents/knowledge_base_detach_response.py diff --git a/src/gradientai/types/agents/route_add_params.py b/src/do_gradientai/types/agents/route_add_params.py similarity index 100% rename from src/gradientai/types/agents/route_add_params.py rename to src/do_gradientai/types/agents/route_add_params.py diff --git a/src/gradientai/types/agents/route_add_response.py b/src/do_gradientai/types/agents/route_add_response.py similarity index 100% rename from src/gradientai/types/agents/route_add_response.py rename to src/do_gradientai/types/agents/route_add_response.py diff --git a/src/gradientai/types/agents/route_delete_response.py b/src/do_gradientai/types/agents/route_delete_response.py similarity index 100% rename from src/gradientai/types/agents/route_delete_response.py rename to src/do_gradientai/types/agents/route_delete_response.py diff --git a/src/gradientai/types/agents/route_update_params.py b/src/do_gradientai/types/agents/route_update_params.py similarity index 100% rename from src/gradientai/types/agents/route_update_params.py rename to src/do_gradientai/types/agents/route_update_params.py diff --git a/src/gradientai/types/agents/route_update_response.py b/src/do_gradientai/types/agents/route_update_response.py similarity index 100% rename from src/gradientai/types/agents/route_update_response.py rename to src/do_gradientai/types/agents/route_update_response.py diff --git a/src/gradientai/types/agents/route_view_response.py b/src/do_gradientai/types/agents/route_view_response.py similarity index 100% rename from src/gradientai/types/agents/route_view_response.py rename to src/do_gradientai/types/agents/route_view_response.py diff --git a/src/gradientai/types/agents/version_list_params.py b/src/do_gradientai/types/agents/version_list_params.py similarity index 100% rename from src/gradientai/types/agents/version_list_params.py rename to src/do_gradientai/types/agents/version_list_params.py diff --git a/src/gradientai/types/agents/version_list_response.py b/src/do_gradientai/types/agents/version_list_response.py similarity index 100% rename from src/gradientai/types/agents/version_list_response.py rename to src/do_gradientai/types/agents/version_list_response.py diff --git a/src/gradientai/types/agents/version_update_params.py b/src/do_gradientai/types/agents/version_update_params.py similarity index 100% rename from src/gradientai/types/agents/version_update_params.py rename to src/do_gradientai/types/agents/version_update_params.py diff --git a/src/gradientai/types/agents/version_update_response.py b/src/do_gradientai/types/agents/version_update_response.py similarity index 100% rename from src/gradientai/types/agents/version_update_response.py rename to src/do_gradientai/types/agents/version_update_response.py diff --git a/src/gradientai/types/api_agent.py b/src/do_gradientai/types/api_agent.py similarity index 100% rename from src/gradientai/types/api_agent.py rename to src/do_gradientai/types/api_agent.py diff --git a/src/gradientai/types/api_agent_api_key_info.py b/src/do_gradientai/types/api_agent_api_key_info.py similarity index 100% rename from src/gradientai/types/api_agent_api_key_info.py rename to src/do_gradientai/types/api_agent_api_key_info.py diff --git a/src/gradientai/types/api_agent_model.py b/src/do_gradientai/types/api_agent_model.py similarity index 100% rename from src/gradientai/types/api_agent_model.py rename to src/do_gradientai/types/api_agent_model.py diff --git a/src/gradientai/types/api_agreement.py b/src/do_gradientai/types/api_agreement.py similarity index 100% rename from src/gradientai/types/api_agreement.py rename to src/do_gradientai/types/api_agreement.py diff --git a/src/gradientai/types/api_anthropic_api_key_info.py b/src/do_gradientai/types/api_anthropic_api_key_info.py similarity index 100% rename from src/gradientai/types/api_anthropic_api_key_info.py rename to src/do_gradientai/types/api_anthropic_api_key_info.py diff --git a/src/gradientai/types/api_deployment_visibility.py b/src/do_gradientai/types/api_deployment_visibility.py similarity index 100% rename from src/gradientai/types/api_deployment_visibility.py rename to src/do_gradientai/types/api_deployment_visibility.py diff --git a/src/gradientai/types/api_knowledge_base.py b/src/do_gradientai/types/api_knowledge_base.py similarity index 100% rename from src/gradientai/types/api_knowledge_base.py rename to src/do_gradientai/types/api_knowledge_base.py diff --git a/src/gradientai/types/api_model.py b/src/do_gradientai/types/api_model.py similarity index 100% rename from src/gradientai/types/api_model.py rename to src/do_gradientai/types/api_model.py diff --git a/src/gradientai/types/api_model_version.py b/src/do_gradientai/types/api_model_version.py similarity index 100% rename from src/gradientai/types/api_model_version.py rename to src/do_gradientai/types/api_model_version.py diff --git a/src/gradientai/types/api_openai_api_key_info.py b/src/do_gradientai/types/api_openai_api_key_info.py similarity index 100% rename from src/gradientai/types/api_openai_api_key_info.py rename to src/do_gradientai/types/api_openai_api_key_info.py diff --git a/src/gradientai/types/api_retrieval_method.py b/src/do_gradientai/types/api_retrieval_method.py similarity index 100% rename from src/gradientai/types/api_retrieval_method.py rename to src/do_gradientai/types/api_retrieval_method.py diff --git a/src/gradientai/types/api_workspace.py b/src/do_gradientai/types/api_workspace.py similarity index 100% rename from src/gradientai/types/api_workspace.py rename to src/do_gradientai/types/api_workspace.py diff --git a/src/gradientai/types/chat/__init__.py b/src/do_gradientai/types/chat/__init__.py similarity index 100% rename from src/gradientai/types/chat/__init__.py rename to src/do_gradientai/types/chat/__init__.py diff --git a/src/gradientai/types/chat/completion_create_params.py b/src/do_gradientai/types/chat/completion_create_params.py similarity index 100% rename from src/gradientai/types/chat/completion_create_params.py rename to src/do_gradientai/types/chat/completion_create_params.py diff --git a/src/gradientai/types/chat/completion_create_response.py b/src/do_gradientai/types/chat/completion_create_response.py similarity index 100% rename from src/gradientai/types/chat/completion_create_response.py rename to src/do_gradientai/types/chat/completion_create_response.py diff --git a/src/gradientai/types/droplet_backup_policy.py b/src/do_gradientai/types/droplet_backup_policy.py similarity index 100% rename from src/gradientai/types/droplet_backup_policy.py rename to src/do_gradientai/types/droplet_backup_policy.py diff --git a/src/gradientai/types/droplet_backup_policy_param.py b/src/do_gradientai/types/droplet_backup_policy_param.py similarity index 100% rename from src/gradientai/types/droplet_backup_policy_param.py rename to src/do_gradientai/types/droplet_backup_policy_param.py diff --git a/src/gradientai/types/gpu_droplet_create_params.py b/src/do_gradientai/types/gpu_droplet_create_params.py similarity index 100% rename from src/gradientai/types/gpu_droplet_create_params.py rename to src/do_gradientai/types/gpu_droplet_create_params.py diff --git a/src/gradientai/types/gpu_droplet_create_response.py b/src/do_gradientai/types/gpu_droplet_create_response.py similarity index 100% rename from src/gradientai/types/gpu_droplet_create_response.py rename to src/do_gradientai/types/gpu_droplet_create_response.py diff --git a/src/gradientai/types/gpu_droplet_delete_by_tag_params.py b/src/do_gradientai/types/gpu_droplet_delete_by_tag_params.py similarity index 100% rename from src/gradientai/types/gpu_droplet_delete_by_tag_params.py rename to src/do_gradientai/types/gpu_droplet_delete_by_tag_params.py diff --git a/src/gradientai/types/gpu_droplet_list_firewalls_params.py b/src/do_gradientai/types/gpu_droplet_list_firewalls_params.py similarity index 100% rename from src/gradientai/types/gpu_droplet_list_firewalls_params.py rename to src/do_gradientai/types/gpu_droplet_list_firewalls_params.py diff --git a/src/gradientai/types/gpu_droplet_list_firewalls_response.py b/src/do_gradientai/types/gpu_droplet_list_firewalls_response.py similarity index 100% rename from src/gradientai/types/gpu_droplet_list_firewalls_response.py rename to src/do_gradientai/types/gpu_droplet_list_firewalls_response.py diff --git a/src/gradientai/types/gpu_droplet_list_kernels_params.py b/src/do_gradientai/types/gpu_droplet_list_kernels_params.py similarity index 100% rename from src/gradientai/types/gpu_droplet_list_kernels_params.py rename to src/do_gradientai/types/gpu_droplet_list_kernels_params.py diff --git a/src/gradientai/types/gpu_droplet_list_kernels_response.py b/src/do_gradientai/types/gpu_droplet_list_kernels_response.py similarity index 100% rename from src/gradientai/types/gpu_droplet_list_kernels_response.py rename to src/do_gradientai/types/gpu_droplet_list_kernels_response.py diff --git a/src/gradientai/types/gpu_droplet_list_neighbors_response.py b/src/do_gradientai/types/gpu_droplet_list_neighbors_response.py similarity index 100% rename from src/gradientai/types/gpu_droplet_list_neighbors_response.py rename to src/do_gradientai/types/gpu_droplet_list_neighbors_response.py diff --git a/src/gradientai/types/gpu_droplet_list_params.py b/src/do_gradientai/types/gpu_droplet_list_params.py similarity index 100% rename from src/gradientai/types/gpu_droplet_list_params.py rename to src/do_gradientai/types/gpu_droplet_list_params.py diff --git a/src/gradientai/types/gpu_droplet_list_response.py b/src/do_gradientai/types/gpu_droplet_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplet_list_response.py rename to src/do_gradientai/types/gpu_droplet_list_response.py diff --git a/src/gradientai/types/gpu_droplet_list_snapshots_params.py b/src/do_gradientai/types/gpu_droplet_list_snapshots_params.py similarity index 100% rename from src/gradientai/types/gpu_droplet_list_snapshots_params.py rename to src/do_gradientai/types/gpu_droplet_list_snapshots_params.py diff --git a/src/gradientai/types/gpu_droplet_list_snapshots_response.py b/src/do_gradientai/types/gpu_droplet_list_snapshots_response.py similarity index 100% rename from src/gradientai/types/gpu_droplet_list_snapshots_response.py rename to src/do_gradientai/types/gpu_droplet_list_snapshots_response.py diff --git a/src/gradientai/types/gpu_droplet_retrieve_response.py b/src/do_gradientai/types/gpu_droplet_retrieve_response.py similarity index 100% rename from src/gradientai/types/gpu_droplet_retrieve_response.py rename to src/do_gradientai/types/gpu_droplet_retrieve_response.py diff --git a/src/gradientai/types/gpu_droplets/__init__.py b/src/do_gradientai/types/gpu_droplets/__init__.py similarity index 100% rename from src/gradientai/types/gpu_droplets/__init__.py rename to src/do_gradientai/types/gpu_droplets/__init__.py diff --git a/src/gradientai/types/gpu_droplets/account/__init__.py b/src/do_gradientai/types/gpu_droplets/account/__init__.py similarity index 100% rename from src/gradientai/types/gpu_droplets/account/__init__.py rename to src/do_gradientai/types/gpu_droplets/account/__init__.py diff --git a/src/gradientai/types/gpu_droplets/account/key_create_params.py b/src/do_gradientai/types/gpu_droplets/account/key_create_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/account/key_create_params.py rename to src/do_gradientai/types/gpu_droplets/account/key_create_params.py diff --git a/src/gradientai/types/gpu_droplets/account/key_create_response.py b/src/do_gradientai/types/gpu_droplets/account/key_create_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/account/key_create_response.py rename to src/do_gradientai/types/gpu_droplets/account/key_create_response.py diff --git a/src/gradientai/types/gpu_droplets/account/key_list_params.py b/src/do_gradientai/types/gpu_droplets/account/key_list_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/account/key_list_params.py rename to src/do_gradientai/types/gpu_droplets/account/key_list_params.py diff --git a/src/gradientai/types/gpu_droplets/account/key_list_response.py b/src/do_gradientai/types/gpu_droplets/account/key_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/account/key_list_response.py rename to src/do_gradientai/types/gpu_droplets/account/key_list_response.py diff --git a/src/gradientai/types/gpu_droplets/account/key_retrieve_response.py b/src/do_gradientai/types/gpu_droplets/account/key_retrieve_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/account/key_retrieve_response.py rename to src/do_gradientai/types/gpu_droplets/account/key_retrieve_response.py diff --git a/src/gradientai/types/gpu_droplets/account/key_update_params.py b/src/do_gradientai/types/gpu_droplets/account/key_update_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/account/key_update_params.py rename to src/do_gradientai/types/gpu_droplets/account/key_update_params.py diff --git a/src/gradientai/types/gpu_droplets/account/key_update_response.py b/src/do_gradientai/types/gpu_droplets/account/key_update_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/account/key_update_response.py rename to src/do_gradientai/types/gpu_droplets/account/key_update_response.py diff --git a/src/gradientai/types/gpu_droplets/action_bulk_initiate_params.py b/src/do_gradientai/types/gpu_droplets/action_bulk_initiate_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/action_bulk_initiate_params.py rename to src/do_gradientai/types/gpu_droplets/action_bulk_initiate_params.py diff --git a/src/gradientai/types/gpu_droplets/action_bulk_initiate_response.py b/src/do_gradientai/types/gpu_droplets/action_bulk_initiate_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/action_bulk_initiate_response.py rename to src/do_gradientai/types/gpu_droplets/action_bulk_initiate_response.py diff --git a/src/gradientai/types/gpu_droplets/action_initiate_params.py b/src/do_gradientai/types/gpu_droplets/action_initiate_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/action_initiate_params.py rename to src/do_gradientai/types/gpu_droplets/action_initiate_params.py diff --git a/src/gradientai/types/gpu_droplets/action_initiate_response.py b/src/do_gradientai/types/gpu_droplets/action_initiate_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/action_initiate_response.py rename to src/do_gradientai/types/gpu_droplets/action_initiate_response.py diff --git a/src/gradientai/types/gpu_droplets/action_list_params.py b/src/do_gradientai/types/gpu_droplets/action_list_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/action_list_params.py rename to src/do_gradientai/types/gpu_droplets/action_list_params.py diff --git a/src/gradientai/types/gpu_droplets/action_list_response.py b/src/do_gradientai/types/gpu_droplets/action_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/action_list_response.py rename to src/do_gradientai/types/gpu_droplets/action_list_response.py diff --git a/src/gradientai/types/gpu_droplets/action_retrieve_response.py b/src/do_gradientai/types/gpu_droplets/action_retrieve_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/action_retrieve_response.py rename to src/do_gradientai/types/gpu_droplets/action_retrieve_response.py diff --git a/src/gradientai/types/gpu_droplets/associated_resource.py b/src/do_gradientai/types/gpu_droplets/associated_resource.py similarity index 100% rename from src/gradientai/types/gpu_droplets/associated_resource.py rename to src/do_gradientai/types/gpu_droplets/associated_resource.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_create_params.py b/src/do_gradientai/types/gpu_droplets/autoscale_create_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_create_params.py rename to src/do_gradientai/types/gpu_droplets/autoscale_create_params.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_create_response.py b/src/do_gradientai/types/gpu_droplets/autoscale_create_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_create_response.py rename to src/do_gradientai/types/gpu_droplets/autoscale_create_response.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_list_history_params.py b/src/do_gradientai/types/gpu_droplets/autoscale_list_history_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_list_history_params.py rename to src/do_gradientai/types/gpu_droplets/autoscale_list_history_params.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_list_history_response.py b/src/do_gradientai/types/gpu_droplets/autoscale_list_history_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_list_history_response.py rename to src/do_gradientai/types/gpu_droplets/autoscale_list_history_response.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_list_members_params.py b/src/do_gradientai/types/gpu_droplets/autoscale_list_members_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_list_members_params.py rename to src/do_gradientai/types/gpu_droplets/autoscale_list_members_params.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_list_members_response.py b/src/do_gradientai/types/gpu_droplets/autoscale_list_members_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_list_members_response.py rename to src/do_gradientai/types/gpu_droplets/autoscale_list_members_response.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_list_params.py b/src/do_gradientai/types/gpu_droplets/autoscale_list_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_list_params.py rename to src/do_gradientai/types/gpu_droplets/autoscale_list_params.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_list_response.py b/src/do_gradientai/types/gpu_droplets/autoscale_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_list_response.py rename to src/do_gradientai/types/gpu_droplets/autoscale_list_response.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_pool.py b/src/do_gradientai/types/gpu_droplets/autoscale_pool.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_pool.py rename to src/do_gradientai/types/gpu_droplets/autoscale_pool.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_pool_droplet_template.py b/src/do_gradientai/types/gpu_droplets/autoscale_pool_droplet_template.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_pool_droplet_template.py rename to src/do_gradientai/types/gpu_droplets/autoscale_pool_droplet_template.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_pool_droplet_template_param.py b/src/do_gradientai/types/gpu_droplets/autoscale_pool_droplet_template_param.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_pool_droplet_template_param.py rename to src/do_gradientai/types/gpu_droplets/autoscale_pool_droplet_template_param.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_pool_dynamic_config.py b/src/do_gradientai/types/gpu_droplets/autoscale_pool_dynamic_config.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_pool_dynamic_config.py rename to src/do_gradientai/types/gpu_droplets/autoscale_pool_dynamic_config.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_pool_dynamic_config_param.py b/src/do_gradientai/types/gpu_droplets/autoscale_pool_dynamic_config_param.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_pool_dynamic_config_param.py rename to src/do_gradientai/types/gpu_droplets/autoscale_pool_dynamic_config_param.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_pool_static_config.py b/src/do_gradientai/types/gpu_droplets/autoscale_pool_static_config.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_pool_static_config.py rename to src/do_gradientai/types/gpu_droplets/autoscale_pool_static_config.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_pool_static_config_param.py b/src/do_gradientai/types/gpu_droplets/autoscale_pool_static_config_param.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_pool_static_config_param.py rename to src/do_gradientai/types/gpu_droplets/autoscale_pool_static_config_param.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_retrieve_response.py b/src/do_gradientai/types/gpu_droplets/autoscale_retrieve_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_retrieve_response.py rename to src/do_gradientai/types/gpu_droplets/autoscale_retrieve_response.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_update_params.py b/src/do_gradientai/types/gpu_droplets/autoscale_update_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_update_params.py rename to src/do_gradientai/types/gpu_droplets/autoscale_update_params.py diff --git a/src/gradientai/types/gpu_droplets/autoscale_update_response.py b/src/do_gradientai/types/gpu_droplets/autoscale_update_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/autoscale_update_response.py rename to src/do_gradientai/types/gpu_droplets/autoscale_update_response.py diff --git a/src/gradientai/types/gpu_droplets/backup_list_params.py b/src/do_gradientai/types/gpu_droplets/backup_list_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/backup_list_params.py rename to src/do_gradientai/types/gpu_droplets/backup_list_params.py diff --git a/src/gradientai/types/gpu_droplets/backup_list_policies_params.py b/src/do_gradientai/types/gpu_droplets/backup_list_policies_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/backup_list_policies_params.py rename to src/do_gradientai/types/gpu_droplets/backup_list_policies_params.py diff --git a/src/gradientai/types/gpu_droplets/backup_list_policies_response.py b/src/do_gradientai/types/gpu_droplets/backup_list_policies_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/backup_list_policies_response.py rename to src/do_gradientai/types/gpu_droplets/backup_list_policies_response.py diff --git a/src/gradientai/types/gpu_droplets/backup_list_response.py b/src/do_gradientai/types/gpu_droplets/backup_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/backup_list_response.py rename to src/do_gradientai/types/gpu_droplets/backup_list_response.py diff --git a/src/gradientai/types/gpu_droplets/backup_list_supported_policies_response.py b/src/do_gradientai/types/gpu_droplets/backup_list_supported_policies_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/backup_list_supported_policies_response.py rename to src/do_gradientai/types/gpu_droplets/backup_list_supported_policies_response.py diff --git a/src/gradientai/types/gpu_droplets/backup_retrieve_policy_response.py b/src/do_gradientai/types/gpu_droplets/backup_retrieve_policy_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/backup_retrieve_policy_response.py rename to src/do_gradientai/types/gpu_droplets/backup_retrieve_policy_response.py diff --git a/src/gradientai/types/gpu_droplets/current_utilization.py b/src/do_gradientai/types/gpu_droplets/current_utilization.py similarity index 100% rename from src/gradientai/types/gpu_droplets/current_utilization.py rename to src/do_gradientai/types/gpu_droplets/current_utilization.py diff --git a/src/gradientai/types/gpu_droplets/destroy_with_associated_resource_check_status_response.py b/src/do_gradientai/types/gpu_droplets/destroy_with_associated_resource_check_status_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/destroy_with_associated_resource_check_status_response.py rename to src/do_gradientai/types/gpu_droplets/destroy_with_associated_resource_check_status_response.py diff --git a/src/gradientai/types/gpu_droplets/destroy_with_associated_resource_delete_selective_params.py b/src/do_gradientai/types/gpu_droplets/destroy_with_associated_resource_delete_selective_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/destroy_with_associated_resource_delete_selective_params.py rename to src/do_gradientai/types/gpu_droplets/destroy_with_associated_resource_delete_selective_params.py diff --git a/src/gradientai/types/gpu_droplets/destroy_with_associated_resource_list_response.py b/src/do_gradientai/types/gpu_droplets/destroy_with_associated_resource_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/destroy_with_associated_resource_list_response.py rename to src/do_gradientai/types/gpu_droplets/destroy_with_associated_resource_list_response.py diff --git a/src/gradientai/types/gpu_droplets/destroyed_associated_resource.py b/src/do_gradientai/types/gpu_droplets/destroyed_associated_resource.py similarity index 100% rename from src/gradientai/types/gpu_droplets/destroyed_associated_resource.py rename to src/do_gradientai/types/gpu_droplets/destroyed_associated_resource.py diff --git a/src/gradientai/types/gpu_droplets/domains.py b/src/do_gradientai/types/gpu_droplets/domains.py similarity index 100% rename from src/gradientai/types/gpu_droplets/domains.py rename to src/do_gradientai/types/gpu_droplets/domains.py diff --git a/src/gradientai/types/gpu_droplets/domains_param.py b/src/do_gradientai/types/gpu_droplets/domains_param.py similarity index 100% rename from src/gradientai/types/gpu_droplets/domains_param.py rename to src/do_gradientai/types/gpu_droplets/domains_param.py diff --git a/src/gradientai/types/gpu_droplets/firewall.py b/src/do_gradientai/types/gpu_droplets/firewall.py similarity index 100% rename from src/gradientai/types/gpu_droplets/firewall.py rename to src/do_gradientai/types/gpu_droplets/firewall.py diff --git a/src/gradientai/types/gpu_droplets/firewall_create_params.py b/src/do_gradientai/types/gpu_droplets/firewall_create_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/firewall_create_params.py rename to src/do_gradientai/types/gpu_droplets/firewall_create_params.py diff --git a/src/gradientai/types/gpu_droplets/firewall_create_response.py b/src/do_gradientai/types/gpu_droplets/firewall_create_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/firewall_create_response.py rename to src/do_gradientai/types/gpu_droplets/firewall_create_response.py diff --git a/src/gradientai/types/gpu_droplets/firewall_list_params.py b/src/do_gradientai/types/gpu_droplets/firewall_list_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/firewall_list_params.py rename to src/do_gradientai/types/gpu_droplets/firewall_list_params.py diff --git a/src/gradientai/types/gpu_droplets/firewall_list_response.py b/src/do_gradientai/types/gpu_droplets/firewall_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/firewall_list_response.py rename to src/do_gradientai/types/gpu_droplets/firewall_list_response.py diff --git a/src/gradientai/types/gpu_droplets/firewall_param.py b/src/do_gradientai/types/gpu_droplets/firewall_param.py similarity index 100% rename from src/gradientai/types/gpu_droplets/firewall_param.py rename to src/do_gradientai/types/gpu_droplets/firewall_param.py diff --git a/src/gradientai/types/gpu_droplets/firewall_retrieve_response.py b/src/do_gradientai/types/gpu_droplets/firewall_retrieve_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/firewall_retrieve_response.py rename to src/do_gradientai/types/gpu_droplets/firewall_retrieve_response.py diff --git a/src/gradientai/types/gpu_droplets/firewall_update_params.py b/src/do_gradientai/types/gpu_droplets/firewall_update_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/firewall_update_params.py rename to src/do_gradientai/types/gpu_droplets/firewall_update_params.py diff --git a/src/gradientai/types/gpu_droplets/firewall_update_response.py b/src/do_gradientai/types/gpu_droplets/firewall_update_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/firewall_update_response.py rename to src/do_gradientai/types/gpu_droplets/firewall_update_response.py diff --git a/src/gradientai/types/gpu_droplets/firewalls/__init__.py b/src/do_gradientai/types/gpu_droplets/firewalls/__init__.py similarity index 100% rename from src/gradientai/types/gpu_droplets/firewalls/__init__.py rename to src/do_gradientai/types/gpu_droplets/firewalls/__init__.py diff --git a/src/gradientai/types/gpu_droplets/firewalls/droplet_add_params.py b/src/do_gradientai/types/gpu_droplets/firewalls/droplet_add_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/firewalls/droplet_add_params.py rename to src/do_gradientai/types/gpu_droplets/firewalls/droplet_add_params.py diff --git a/src/gradientai/types/gpu_droplets/firewalls/droplet_remove_params.py b/src/do_gradientai/types/gpu_droplets/firewalls/droplet_remove_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/firewalls/droplet_remove_params.py rename to src/do_gradientai/types/gpu_droplets/firewalls/droplet_remove_params.py diff --git a/src/gradientai/types/gpu_droplets/firewalls/rule_add_params.py b/src/do_gradientai/types/gpu_droplets/firewalls/rule_add_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/firewalls/rule_add_params.py rename to src/do_gradientai/types/gpu_droplets/firewalls/rule_add_params.py diff --git a/src/gradientai/types/gpu_droplets/firewalls/rule_remove_params.py b/src/do_gradientai/types/gpu_droplets/firewalls/rule_remove_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/firewalls/rule_remove_params.py rename to src/do_gradientai/types/gpu_droplets/firewalls/rule_remove_params.py diff --git a/src/gradientai/types/gpu_droplets/firewalls/tag_add_params.py b/src/do_gradientai/types/gpu_droplets/firewalls/tag_add_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/firewalls/tag_add_params.py rename to src/do_gradientai/types/gpu_droplets/firewalls/tag_add_params.py diff --git a/src/gradientai/types/gpu_droplets/firewalls/tag_remove_params.py b/src/do_gradientai/types/gpu_droplets/firewalls/tag_remove_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/firewalls/tag_remove_params.py rename to src/do_gradientai/types/gpu_droplets/firewalls/tag_remove_params.py diff --git a/src/gradientai/types/gpu_droplets/floating_ip.py b/src/do_gradientai/types/gpu_droplets/floating_ip.py similarity index 100% rename from src/gradientai/types/gpu_droplets/floating_ip.py rename to src/do_gradientai/types/gpu_droplets/floating_ip.py diff --git a/src/gradientai/types/gpu_droplets/floating_ip_create_params.py b/src/do_gradientai/types/gpu_droplets/floating_ip_create_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/floating_ip_create_params.py rename to src/do_gradientai/types/gpu_droplets/floating_ip_create_params.py diff --git a/src/gradientai/types/gpu_droplets/floating_ip_create_response.py b/src/do_gradientai/types/gpu_droplets/floating_ip_create_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/floating_ip_create_response.py rename to src/do_gradientai/types/gpu_droplets/floating_ip_create_response.py diff --git a/src/gradientai/types/gpu_droplets/floating_ip_list_params.py b/src/do_gradientai/types/gpu_droplets/floating_ip_list_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/floating_ip_list_params.py rename to src/do_gradientai/types/gpu_droplets/floating_ip_list_params.py diff --git a/src/gradientai/types/gpu_droplets/floating_ip_list_response.py b/src/do_gradientai/types/gpu_droplets/floating_ip_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/floating_ip_list_response.py rename to src/do_gradientai/types/gpu_droplets/floating_ip_list_response.py diff --git a/src/gradientai/types/gpu_droplets/floating_ip_retrieve_response.py b/src/do_gradientai/types/gpu_droplets/floating_ip_retrieve_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/floating_ip_retrieve_response.py rename to src/do_gradientai/types/gpu_droplets/floating_ip_retrieve_response.py diff --git a/src/gradientai/types/gpu_droplets/floating_ips/__init__.py b/src/do_gradientai/types/gpu_droplets/floating_ips/__init__.py similarity index 100% rename from src/gradientai/types/gpu_droplets/floating_ips/__init__.py rename to src/do_gradientai/types/gpu_droplets/floating_ips/__init__.py diff --git a/src/gradientai/types/gpu_droplets/floating_ips/action_create_params.py b/src/do_gradientai/types/gpu_droplets/floating_ips/action_create_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/floating_ips/action_create_params.py rename to src/do_gradientai/types/gpu_droplets/floating_ips/action_create_params.py diff --git a/src/gradientai/types/gpu_droplets/floating_ips/action_create_response.py b/src/do_gradientai/types/gpu_droplets/floating_ips/action_create_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/floating_ips/action_create_response.py rename to src/do_gradientai/types/gpu_droplets/floating_ips/action_create_response.py diff --git a/src/gradientai/types/gpu_droplets/floating_ips/action_list_response.py b/src/do_gradientai/types/gpu_droplets/floating_ips/action_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/floating_ips/action_list_response.py rename to src/do_gradientai/types/gpu_droplets/floating_ips/action_list_response.py diff --git a/src/gradientai/types/gpu_droplets/floating_ips/action_retrieve_response.py b/src/do_gradientai/types/gpu_droplets/floating_ips/action_retrieve_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/floating_ips/action_retrieve_response.py rename to src/do_gradientai/types/gpu_droplets/floating_ips/action_retrieve_response.py diff --git a/src/gradientai/types/gpu_droplets/forwarding_rule.py b/src/do_gradientai/types/gpu_droplets/forwarding_rule.py similarity index 100% rename from src/gradientai/types/gpu_droplets/forwarding_rule.py rename to src/do_gradientai/types/gpu_droplets/forwarding_rule.py diff --git a/src/gradientai/types/gpu_droplets/forwarding_rule_param.py b/src/do_gradientai/types/gpu_droplets/forwarding_rule_param.py similarity index 100% rename from src/gradientai/types/gpu_droplets/forwarding_rule_param.py rename to src/do_gradientai/types/gpu_droplets/forwarding_rule_param.py diff --git a/src/gradientai/types/gpu_droplets/glb_settings.py b/src/do_gradientai/types/gpu_droplets/glb_settings.py similarity index 100% rename from src/gradientai/types/gpu_droplets/glb_settings.py rename to src/do_gradientai/types/gpu_droplets/glb_settings.py diff --git a/src/gradientai/types/gpu_droplets/glb_settings_param.py b/src/do_gradientai/types/gpu_droplets/glb_settings_param.py similarity index 100% rename from src/gradientai/types/gpu_droplets/glb_settings_param.py rename to src/do_gradientai/types/gpu_droplets/glb_settings_param.py diff --git a/src/gradientai/types/gpu_droplets/health_check.py b/src/do_gradientai/types/gpu_droplets/health_check.py similarity index 100% rename from src/gradientai/types/gpu_droplets/health_check.py rename to src/do_gradientai/types/gpu_droplets/health_check.py diff --git a/src/gradientai/types/gpu_droplets/health_check_param.py b/src/do_gradientai/types/gpu_droplets/health_check_param.py similarity index 100% rename from src/gradientai/types/gpu_droplets/health_check_param.py rename to src/do_gradientai/types/gpu_droplets/health_check_param.py diff --git a/src/gradientai/types/gpu_droplets/image_create_params.py b/src/do_gradientai/types/gpu_droplets/image_create_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/image_create_params.py rename to src/do_gradientai/types/gpu_droplets/image_create_params.py diff --git a/src/gradientai/types/gpu_droplets/image_create_response.py b/src/do_gradientai/types/gpu_droplets/image_create_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/image_create_response.py rename to src/do_gradientai/types/gpu_droplets/image_create_response.py diff --git a/src/gradientai/types/gpu_droplets/image_list_params.py b/src/do_gradientai/types/gpu_droplets/image_list_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/image_list_params.py rename to src/do_gradientai/types/gpu_droplets/image_list_params.py diff --git a/src/gradientai/types/gpu_droplets/image_list_response.py b/src/do_gradientai/types/gpu_droplets/image_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/image_list_response.py rename to src/do_gradientai/types/gpu_droplets/image_list_response.py diff --git a/src/gradientai/types/gpu_droplets/image_retrieve_response.py b/src/do_gradientai/types/gpu_droplets/image_retrieve_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/image_retrieve_response.py rename to src/do_gradientai/types/gpu_droplets/image_retrieve_response.py diff --git a/src/gradientai/types/gpu_droplets/image_update_params.py b/src/do_gradientai/types/gpu_droplets/image_update_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/image_update_params.py rename to src/do_gradientai/types/gpu_droplets/image_update_params.py diff --git a/src/gradientai/types/gpu_droplets/image_update_response.py b/src/do_gradientai/types/gpu_droplets/image_update_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/image_update_response.py rename to src/do_gradientai/types/gpu_droplets/image_update_response.py diff --git a/src/gradientai/types/gpu_droplets/images/__init__.py b/src/do_gradientai/types/gpu_droplets/images/__init__.py similarity index 100% rename from src/gradientai/types/gpu_droplets/images/__init__.py rename to src/do_gradientai/types/gpu_droplets/images/__init__.py diff --git a/src/gradientai/types/gpu_droplets/images/action_create_params.py b/src/do_gradientai/types/gpu_droplets/images/action_create_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/images/action_create_params.py rename to src/do_gradientai/types/gpu_droplets/images/action_create_params.py diff --git a/src/gradientai/types/gpu_droplets/images/action_list_response.py b/src/do_gradientai/types/gpu_droplets/images/action_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/images/action_list_response.py rename to src/do_gradientai/types/gpu_droplets/images/action_list_response.py diff --git a/src/gradientai/types/gpu_droplets/lb_firewall.py b/src/do_gradientai/types/gpu_droplets/lb_firewall.py similarity index 100% rename from src/gradientai/types/gpu_droplets/lb_firewall.py rename to src/do_gradientai/types/gpu_droplets/lb_firewall.py diff --git a/src/gradientai/types/gpu_droplets/lb_firewall_param.py b/src/do_gradientai/types/gpu_droplets/lb_firewall_param.py similarity index 100% rename from src/gradientai/types/gpu_droplets/lb_firewall_param.py rename to src/do_gradientai/types/gpu_droplets/lb_firewall_param.py diff --git a/src/gradientai/types/gpu_droplets/load_balancer.py b/src/do_gradientai/types/gpu_droplets/load_balancer.py similarity index 100% rename from src/gradientai/types/gpu_droplets/load_balancer.py rename to src/do_gradientai/types/gpu_droplets/load_balancer.py diff --git a/src/gradientai/types/gpu_droplets/load_balancer_create_params.py b/src/do_gradientai/types/gpu_droplets/load_balancer_create_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/load_balancer_create_params.py rename to src/do_gradientai/types/gpu_droplets/load_balancer_create_params.py diff --git a/src/gradientai/types/gpu_droplets/load_balancer_create_response.py b/src/do_gradientai/types/gpu_droplets/load_balancer_create_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/load_balancer_create_response.py rename to src/do_gradientai/types/gpu_droplets/load_balancer_create_response.py diff --git a/src/gradientai/types/gpu_droplets/load_balancer_list_params.py b/src/do_gradientai/types/gpu_droplets/load_balancer_list_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/load_balancer_list_params.py rename to src/do_gradientai/types/gpu_droplets/load_balancer_list_params.py diff --git a/src/gradientai/types/gpu_droplets/load_balancer_list_response.py b/src/do_gradientai/types/gpu_droplets/load_balancer_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/load_balancer_list_response.py rename to src/do_gradientai/types/gpu_droplets/load_balancer_list_response.py diff --git a/src/gradientai/types/gpu_droplets/load_balancer_retrieve_response.py b/src/do_gradientai/types/gpu_droplets/load_balancer_retrieve_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/load_balancer_retrieve_response.py rename to src/do_gradientai/types/gpu_droplets/load_balancer_retrieve_response.py diff --git a/src/gradientai/types/gpu_droplets/load_balancer_update_params.py b/src/do_gradientai/types/gpu_droplets/load_balancer_update_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/load_balancer_update_params.py rename to src/do_gradientai/types/gpu_droplets/load_balancer_update_params.py diff --git a/src/gradientai/types/gpu_droplets/load_balancer_update_response.py b/src/do_gradientai/types/gpu_droplets/load_balancer_update_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/load_balancer_update_response.py rename to src/do_gradientai/types/gpu_droplets/load_balancer_update_response.py diff --git a/src/gradientai/types/gpu_droplets/load_balancers/__init__.py b/src/do_gradientai/types/gpu_droplets/load_balancers/__init__.py similarity index 100% rename from src/gradientai/types/gpu_droplets/load_balancers/__init__.py rename to src/do_gradientai/types/gpu_droplets/load_balancers/__init__.py diff --git a/src/gradientai/types/gpu_droplets/load_balancers/droplet_add_params.py b/src/do_gradientai/types/gpu_droplets/load_balancers/droplet_add_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/load_balancers/droplet_add_params.py rename to src/do_gradientai/types/gpu_droplets/load_balancers/droplet_add_params.py diff --git a/src/gradientai/types/gpu_droplets/load_balancers/droplet_remove_params.py b/src/do_gradientai/types/gpu_droplets/load_balancers/droplet_remove_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/load_balancers/droplet_remove_params.py rename to src/do_gradientai/types/gpu_droplets/load_balancers/droplet_remove_params.py diff --git a/src/gradientai/types/gpu_droplets/load_balancers/forwarding_rule_add_params.py b/src/do_gradientai/types/gpu_droplets/load_balancers/forwarding_rule_add_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/load_balancers/forwarding_rule_add_params.py rename to src/do_gradientai/types/gpu_droplets/load_balancers/forwarding_rule_add_params.py diff --git a/src/gradientai/types/gpu_droplets/load_balancers/forwarding_rule_remove_params.py b/src/do_gradientai/types/gpu_droplets/load_balancers/forwarding_rule_remove_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/load_balancers/forwarding_rule_remove_params.py rename to src/do_gradientai/types/gpu_droplets/load_balancers/forwarding_rule_remove_params.py diff --git a/src/gradientai/types/gpu_droplets/size_list_params.py b/src/do_gradientai/types/gpu_droplets/size_list_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/size_list_params.py rename to src/do_gradientai/types/gpu_droplets/size_list_params.py diff --git a/src/gradientai/types/gpu_droplets/size_list_response.py b/src/do_gradientai/types/gpu_droplets/size_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/size_list_response.py rename to src/do_gradientai/types/gpu_droplets/size_list_response.py diff --git a/src/gradientai/types/gpu_droplets/snapshot_list_params.py b/src/do_gradientai/types/gpu_droplets/snapshot_list_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/snapshot_list_params.py rename to src/do_gradientai/types/gpu_droplets/snapshot_list_params.py diff --git a/src/gradientai/types/gpu_droplets/snapshot_list_response.py b/src/do_gradientai/types/gpu_droplets/snapshot_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/snapshot_list_response.py rename to src/do_gradientai/types/gpu_droplets/snapshot_list_response.py diff --git a/src/gradientai/types/gpu_droplets/snapshot_retrieve_response.py b/src/do_gradientai/types/gpu_droplets/snapshot_retrieve_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/snapshot_retrieve_response.py rename to src/do_gradientai/types/gpu_droplets/snapshot_retrieve_response.py diff --git a/src/gradientai/types/gpu_droplets/sticky_sessions.py b/src/do_gradientai/types/gpu_droplets/sticky_sessions.py similarity index 100% rename from src/gradientai/types/gpu_droplets/sticky_sessions.py rename to src/do_gradientai/types/gpu_droplets/sticky_sessions.py diff --git a/src/gradientai/types/gpu_droplets/sticky_sessions_param.py b/src/do_gradientai/types/gpu_droplets/sticky_sessions_param.py similarity index 100% rename from src/gradientai/types/gpu_droplets/sticky_sessions_param.py rename to src/do_gradientai/types/gpu_droplets/sticky_sessions_param.py diff --git a/src/gradientai/types/gpu_droplets/volume_create_params.py b/src/do_gradientai/types/gpu_droplets/volume_create_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volume_create_params.py rename to src/do_gradientai/types/gpu_droplets/volume_create_params.py diff --git a/src/gradientai/types/gpu_droplets/volume_create_response.py b/src/do_gradientai/types/gpu_droplets/volume_create_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volume_create_response.py rename to src/do_gradientai/types/gpu_droplets/volume_create_response.py diff --git a/src/gradientai/types/gpu_droplets/volume_delete_by_name_params.py b/src/do_gradientai/types/gpu_droplets/volume_delete_by_name_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volume_delete_by_name_params.py rename to src/do_gradientai/types/gpu_droplets/volume_delete_by_name_params.py diff --git a/src/gradientai/types/gpu_droplets/volume_list_params.py b/src/do_gradientai/types/gpu_droplets/volume_list_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volume_list_params.py rename to src/do_gradientai/types/gpu_droplets/volume_list_params.py diff --git a/src/gradientai/types/gpu_droplets/volume_list_response.py b/src/do_gradientai/types/gpu_droplets/volume_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volume_list_response.py rename to src/do_gradientai/types/gpu_droplets/volume_list_response.py diff --git a/src/gradientai/types/gpu_droplets/volume_retrieve_response.py b/src/do_gradientai/types/gpu_droplets/volume_retrieve_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volume_retrieve_response.py rename to src/do_gradientai/types/gpu_droplets/volume_retrieve_response.py diff --git a/src/gradientai/types/gpu_droplets/volumes/__init__.py b/src/do_gradientai/types/gpu_droplets/volumes/__init__.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volumes/__init__.py rename to src/do_gradientai/types/gpu_droplets/volumes/__init__.py diff --git a/src/gradientai/types/gpu_droplets/volumes/action_initiate_by_id_params.py b/src/do_gradientai/types/gpu_droplets/volumes/action_initiate_by_id_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volumes/action_initiate_by_id_params.py rename to src/do_gradientai/types/gpu_droplets/volumes/action_initiate_by_id_params.py diff --git a/src/gradientai/types/gpu_droplets/volumes/action_initiate_by_id_response.py b/src/do_gradientai/types/gpu_droplets/volumes/action_initiate_by_id_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volumes/action_initiate_by_id_response.py rename to src/do_gradientai/types/gpu_droplets/volumes/action_initiate_by_id_response.py diff --git a/src/gradientai/types/gpu_droplets/volumes/action_initiate_by_name_params.py b/src/do_gradientai/types/gpu_droplets/volumes/action_initiate_by_name_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volumes/action_initiate_by_name_params.py rename to src/do_gradientai/types/gpu_droplets/volumes/action_initiate_by_name_params.py diff --git a/src/gradientai/types/gpu_droplets/volumes/action_initiate_by_name_response.py b/src/do_gradientai/types/gpu_droplets/volumes/action_initiate_by_name_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volumes/action_initiate_by_name_response.py rename to src/do_gradientai/types/gpu_droplets/volumes/action_initiate_by_name_response.py diff --git a/src/gradientai/types/gpu_droplets/volumes/action_list_params.py b/src/do_gradientai/types/gpu_droplets/volumes/action_list_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volumes/action_list_params.py rename to src/do_gradientai/types/gpu_droplets/volumes/action_list_params.py diff --git a/src/gradientai/types/gpu_droplets/volumes/action_list_response.py b/src/do_gradientai/types/gpu_droplets/volumes/action_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volumes/action_list_response.py rename to src/do_gradientai/types/gpu_droplets/volumes/action_list_response.py diff --git a/src/gradientai/types/gpu_droplets/volumes/action_retrieve_params.py b/src/do_gradientai/types/gpu_droplets/volumes/action_retrieve_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volumes/action_retrieve_params.py rename to src/do_gradientai/types/gpu_droplets/volumes/action_retrieve_params.py diff --git a/src/gradientai/types/gpu_droplets/volumes/action_retrieve_response.py b/src/do_gradientai/types/gpu_droplets/volumes/action_retrieve_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volumes/action_retrieve_response.py rename to src/do_gradientai/types/gpu_droplets/volumes/action_retrieve_response.py diff --git a/src/gradientai/types/gpu_droplets/volumes/snapshot_create_params.py b/src/do_gradientai/types/gpu_droplets/volumes/snapshot_create_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volumes/snapshot_create_params.py rename to src/do_gradientai/types/gpu_droplets/volumes/snapshot_create_params.py diff --git a/src/gradientai/types/gpu_droplets/volumes/snapshot_create_response.py b/src/do_gradientai/types/gpu_droplets/volumes/snapshot_create_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volumes/snapshot_create_response.py rename to src/do_gradientai/types/gpu_droplets/volumes/snapshot_create_response.py diff --git a/src/gradientai/types/gpu_droplets/volumes/snapshot_list_params.py b/src/do_gradientai/types/gpu_droplets/volumes/snapshot_list_params.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volumes/snapshot_list_params.py rename to src/do_gradientai/types/gpu_droplets/volumes/snapshot_list_params.py diff --git a/src/gradientai/types/gpu_droplets/volumes/snapshot_list_response.py b/src/do_gradientai/types/gpu_droplets/volumes/snapshot_list_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volumes/snapshot_list_response.py rename to src/do_gradientai/types/gpu_droplets/volumes/snapshot_list_response.py diff --git a/src/gradientai/types/gpu_droplets/volumes/snapshot_retrieve_response.py b/src/do_gradientai/types/gpu_droplets/volumes/snapshot_retrieve_response.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volumes/snapshot_retrieve_response.py rename to src/do_gradientai/types/gpu_droplets/volumes/snapshot_retrieve_response.py diff --git a/src/gradientai/types/gpu_droplets/volumes/volume_action.py b/src/do_gradientai/types/gpu_droplets/volumes/volume_action.py similarity index 100% rename from src/gradientai/types/gpu_droplets/volumes/volume_action.py rename to src/do_gradientai/types/gpu_droplets/volumes/volume_action.py diff --git a/src/gradientai/types/inference/__init__.py b/src/do_gradientai/types/inference/__init__.py similarity index 100% rename from src/gradientai/types/inference/__init__.py rename to src/do_gradientai/types/inference/__init__.py diff --git a/src/gradientai/types/inference/api_key_create_params.py b/src/do_gradientai/types/inference/api_key_create_params.py similarity index 100% rename from src/gradientai/types/inference/api_key_create_params.py rename to src/do_gradientai/types/inference/api_key_create_params.py diff --git a/src/gradientai/types/inference/api_key_create_response.py b/src/do_gradientai/types/inference/api_key_create_response.py similarity index 100% rename from src/gradientai/types/inference/api_key_create_response.py rename to src/do_gradientai/types/inference/api_key_create_response.py diff --git a/src/gradientai/types/inference/api_key_delete_response.py b/src/do_gradientai/types/inference/api_key_delete_response.py similarity index 100% rename from src/gradientai/types/inference/api_key_delete_response.py rename to src/do_gradientai/types/inference/api_key_delete_response.py diff --git a/src/gradientai/types/inference/api_key_list_params.py b/src/do_gradientai/types/inference/api_key_list_params.py similarity index 100% rename from src/gradientai/types/inference/api_key_list_params.py rename to src/do_gradientai/types/inference/api_key_list_params.py diff --git a/src/gradientai/types/inference/api_key_list_response.py b/src/do_gradientai/types/inference/api_key_list_response.py similarity index 100% rename from src/gradientai/types/inference/api_key_list_response.py rename to src/do_gradientai/types/inference/api_key_list_response.py diff --git a/src/gradientai/types/inference/api_key_update_params.py b/src/do_gradientai/types/inference/api_key_update_params.py similarity index 100% rename from src/gradientai/types/inference/api_key_update_params.py rename to src/do_gradientai/types/inference/api_key_update_params.py diff --git a/src/gradientai/types/inference/api_key_update_regenerate_response.py b/src/do_gradientai/types/inference/api_key_update_regenerate_response.py similarity index 100% rename from src/gradientai/types/inference/api_key_update_regenerate_response.py rename to src/do_gradientai/types/inference/api_key_update_regenerate_response.py diff --git a/src/gradientai/types/inference/api_key_update_response.py b/src/do_gradientai/types/inference/api_key_update_response.py similarity index 100% rename from src/gradientai/types/inference/api_key_update_response.py rename to src/do_gradientai/types/inference/api_key_update_response.py diff --git a/src/gradientai/types/inference/api_model_api_key_info.py b/src/do_gradientai/types/inference/api_model_api_key_info.py similarity index 100% rename from src/gradientai/types/inference/api_model_api_key_info.py rename to src/do_gradientai/types/inference/api_model_api_key_info.py diff --git a/src/gradientai/types/knowledge_base_create_params.py b/src/do_gradientai/types/knowledge_base_create_params.py similarity index 100% rename from src/gradientai/types/knowledge_base_create_params.py rename to src/do_gradientai/types/knowledge_base_create_params.py diff --git a/src/gradientai/types/knowledge_base_create_response.py b/src/do_gradientai/types/knowledge_base_create_response.py similarity index 100% rename from src/gradientai/types/knowledge_base_create_response.py rename to src/do_gradientai/types/knowledge_base_create_response.py diff --git a/src/gradientai/types/knowledge_base_delete_response.py b/src/do_gradientai/types/knowledge_base_delete_response.py similarity index 100% rename from src/gradientai/types/knowledge_base_delete_response.py rename to src/do_gradientai/types/knowledge_base_delete_response.py diff --git a/src/gradientai/types/knowledge_base_list_params.py b/src/do_gradientai/types/knowledge_base_list_params.py similarity index 100% rename from src/gradientai/types/knowledge_base_list_params.py rename to src/do_gradientai/types/knowledge_base_list_params.py diff --git a/src/gradientai/types/knowledge_base_list_response.py b/src/do_gradientai/types/knowledge_base_list_response.py similarity index 100% rename from src/gradientai/types/knowledge_base_list_response.py rename to src/do_gradientai/types/knowledge_base_list_response.py diff --git a/src/gradientai/types/knowledge_base_retrieve_response.py b/src/do_gradientai/types/knowledge_base_retrieve_response.py similarity index 100% rename from src/gradientai/types/knowledge_base_retrieve_response.py rename to src/do_gradientai/types/knowledge_base_retrieve_response.py diff --git a/src/gradientai/types/knowledge_base_update_params.py b/src/do_gradientai/types/knowledge_base_update_params.py similarity index 100% rename from src/gradientai/types/knowledge_base_update_params.py rename to src/do_gradientai/types/knowledge_base_update_params.py diff --git a/src/gradientai/types/knowledge_base_update_response.py b/src/do_gradientai/types/knowledge_base_update_response.py similarity index 100% rename from src/gradientai/types/knowledge_base_update_response.py rename to src/do_gradientai/types/knowledge_base_update_response.py diff --git a/src/gradientai/types/knowledge_bases/__init__.py b/src/do_gradientai/types/knowledge_bases/__init__.py similarity index 100% rename from src/gradientai/types/knowledge_bases/__init__.py rename to src/do_gradientai/types/knowledge_bases/__init__.py diff --git a/src/gradientai/types/knowledge_bases/api_file_upload_data_source.py b/src/do_gradientai/types/knowledge_bases/api_file_upload_data_source.py similarity index 100% rename from src/gradientai/types/knowledge_bases/api_file_upload_data_source.py rename to src/do_gradientai/types/knowledge_bases/api_file_upload_data_source.py diff --git a/src/gradientai/types/knowledge_bases/api_file_upload_data_source_param.py b/src/do_gradientai/types/knowledge_bases/api_file_upload_data_source_param.py similarity index 100% rename from src/gradientai/types/knowledge_bases/api_file_upload_data_source_param.py rename to src/do_gradientai/types/knowledge_bases/api_file_upload_data_source_param.py diff --git a/src/gradientai/types/knowledge_bases/api_indexed_data_source.py b/src/do_gradientai/types/knowledge_bases/api_indexed_data_source.py similarity index 100% rename from src/gradientai/types/knowledge_bases/api_indexed_data_source.py rename to src/do_gradientai/types/knowledge_bases/api_indexed_data_source.py diff --git a/src/gradientai/types/knowledge_bases/api_indexing_job.py b/src/do_gradientai/types/knowledge_bases/api_indexing_job.py similarity index 100% rename from src/gradientai/types/knowledge_bases/api_indexing_job.py rename to src/do_gradientai/types/knowledge_bases/api_indexing_job.py diff --git a/src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py b/src/do_gradientai/types/knowledge_bases/api_knowledge_base_data_source.py similarity index 100% rename from src/gradientai/types/knowledge_bases/api_knowledge_base_data_source.py rename to src/do_gradientai/types/knowledge_bases/api_knowledge_base_data_source.py diff --git a/src/gradientai/types/knowledge_bases/api_spaces_data_source.py b/src/do_gradientai/types/knowledge_bases/api_spaces_data_source.py similarity index 100% rename from src/gradientai/types/knowledge_bases/api_spaces_data_source.py rename to src/do_gradientai/types/knowledge_bases/api_spaces_data_source.py diff --git a/src/gradientai/types/knowledge_bases/api_spaces_data_source_param.py b/src/do_gradientai/types/knowledge_bases/api_spaces_data_source_param.py similarity index 100% rename from src/gradientai/types/knowledge_bases/api_spaces_data_source_param.py rename to src/do_gradientai/types/knowledge_bases/api_spaces_data_source_param.py diff --git a/src/gradientai/types/knowledge_bases/api_web_crawler_data_source.py b/src/do_gradientai/types/knowledge_bases/api_web_crawler_data_source.py similarity index 100% rename from src/gradientai/types/knowledge_bases/api_web_crawler_data_source.py rename to src/do_gradientai/types/knowledge_bases/api_web_crawler_data_source.py diff --git a/src/gradientai/types/knowledge_bases/api_web_crawler_data_source_param.py b/src/do_gradientai/types/knowledge_bases/api_web_crawler_data_source_param.py similarity index 100% rename from src/gradientai/types/knowledge_bases/api_web_crawler_data_source_param.py rename to src/do_gradientai/types/knowledge_bases/api_web_crawler_data_source_param.py diff --git a/src/gradientai/types/knowledge_bases/aws_data_source_param.py b/src/do_gradientai/types/knowledge_bases/aws_data_source_param.py similarity index 100% rename from src/gradientai/types/knowledge_bases/aws_data_source_param.py rename to src/do_gradientai/types/knowledge_bases/aws_data_source_param.py diff --git a/src/gradientai/types/knowledge_bases/data_source_create_params.py b/src/do_gradientai/types/knowledge_bases/data_source_create_params.py similarity index 100% rename from src/gradientai/types/knowledge_bases/data_source_create_params.py rename to src/do_gradientai/types/knowledge_bases/data_source_create_params.py diff --git a/src/gradientai/types/knowledge_bases/data_source_create_response.py b/src/do_gradientai/types/knowledge_bases/data_source_create_response.py similarity index 100% rename from src/gradientai/types/knowledge_bases/data_source_create_response.py rename to src/do_gradientai/types/knowledge_bases/data_source_create_response.py diff --git a/src/gradientai/types/knowledge_bases/data_source_delete_response.py b/src/do_gradientai/types/knowledge_bases/data_source_delete_response.py similarity index 100% rename from src/gradientai/types/knowledge_bases/data_source_delete_response.py rename to src/do_gradientai/types/knowledge_bases/data_source_delete_response.py diff --git a/src/gradientai/types/knowledge_bases/data_source_list_params.py b/src/do_gradientai/types/knowledge_bases/data_source_list_params.py similarity index 100% rename from src/gradientai/types/knowledge_bases/data_source_list_params.py rename to src/do_gradientai/types/knowledge_bases/data_source_list_params.py diff --git a/src/gradientai/types/knowledge_bases/data_source_list_response.py b/src/do_gradientai/types/knowledge_bases/data_source_list_response.py similarity index 100% rename from src/gradientai/types/knowledge_bases/data_source_list_response.py rename to src/do_gradientai/types/knowledge_bases/data_source_list_response.py diff --git a/src/gradientai/types/knowledge_bases/indexing_job_create_params.py b/src/do_gradientai/types/knowledge_bases/indexing_job_create_params.py similarity index 100% rename from src/gradientai/types/knowledge_bases/indexing_job_create_params.py rename to src/do_gradientai/types/knowledge_bases/indexing_job_create_params.py diff --git a/src/gradientai/types/knowledge_bases/indexing_job_create_response.py b/src/do_gradientai/types/knowledge_bases/indexing_job_create_response.py similarity index 100% rename from src/gradientai/types/knowledge_bases/indexing_job_create_response.py rename to src/do_gradientai/types/knowledge_bases/indexing_job_create_response.py diff --git a/src/gradientai/types/knowledge_bases/indexing_job_list_params.py b/src/do_gradientai/types/knowledge_bases/indexing_job_list_params.py similarity index 100% rename from src/gradientai/types/knowledge_bases/indexing_job_list_params.py rename to src/do_gradientai/types/knowledge_bases/indexing_job_list_params.py diff --git a/src/gradientai/types/knowledge_bases/indexing_job_list_response.py b/src/do_gradientai/types/knowledge_bases/indexing_job_list_response.py similarity index 100% rename from src/gradientai/types/knowledge_bases/indexing_job_list_response.py rename to src/do_gradientai/types/knowledge_bases/indexing_job_list_response.py diff --git a/src/gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py b/src/do_gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py similarity index 100% rename from src/gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py rename to src/do_gradientai/types/knowledge_bases/indexing_job_retrieve_data_sources_response.py diff --git a/src/gradientai/types/knowledge_bases/indexing_job_retrieve_response.py b/src/do_gradientai/types/knowledge_bases/indexing_job_retrieve_response.py similarity index 100% rename from src/gradientai/types/knowledge_bases/indexing_job_retrieve_response.py rename to src/do_gradientai/types/knowledge_bases/indexing_job_retrieve_response.py diff --git a/src/gradientai/types/knowledge_bases/indexing_job_update_cancel_params.py b/src/do_gradientai/types/knowledge_bases/indexing_job_update_cancel_params.py similarity index 100% rename from src/gradientai/types/knowledge_bases/indexing_job_update_cancel_params.py rename to src/do_gradientai/types/knowledge_bases/indexing_job_update_cancel_params.py diff --git a/src/gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py b/src/do_gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py similarity index 100% rename from src/gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py rename to src/do_gradientai/types/knowledge_bases/indexing_job_update_cancel_response.py diff --git a/src/gradientai/types/model_list_response.py b/src/do_gradientai/types/model_list_response.py similarity index 100% rename from src/gradientai/types/model_list_response.py rename to src/do_gradientai/types/model_list_response.py diff --git a/src/gradientai/types/model_retrieve_response.py b/src/do_gradientai/types/model_retrieve_response.py similarity index 100% rename from src/gradientai/types/model_retrieve_response.py rename to src/do_gradientai/types/model_retrieve_response.py diff --git a/src/gradientai/types/models/__init__.py b/src/do_gradientai/types/models/__init__.py similarity index 100% rename from src/gradientai/types/models/__init__.py rename to src/do_gradientai/types/models/__init__.py diff --git a/src/gradientai/types/models/providers/__init__.py b/src/do_gradientai/types/models/providers/__init__.py similarity index 100% rename from src/gradientai/types/models/providers/__init__.py rename to src/do_gradientai/types/models/providers/__init__.py diff --git a/src/gradientai/types/models/providers/anthropic_create_params.py b/src/do_gradientai/types/models/providers/anthropic_create_params.py similarity index 100% rename from src/gradientai/types/models/providers/anthropic_create_params.py rename to src/do_gradientai/types/models/providers/anthropic_create_params.py diff --git a/src/gradientai/types/models/providers/anthropic_create_response.py b/src/do_gradientai/types/models/providers/anthropic_create_response.py similarity index 100% rename from src/gradientai/types/models/providers/anthropic_create_response.py rename to src/do_gradientai/types/models/providers/anthropic_create_response.py diff --git a/src/gradientai/types/models/providers/anthropic_delete_response.py b/src/do_gradientai/types/models/providers/anthropic_delete_response.py similarity index 100% rename from src/gradientai/types/models/providers/anthropic_delete_response.py rename to src/do_gradientai/types/models/providers/anthropic_delete_response.py diff --git a/src/gradientai/types/models/providers/anthropic_list_agents_params.py b/src/do_gradientai/types/models/providers/anthropic_list_agents_params.py similarity index 100% rename from src/gradientai/types/models/providers/anthropic_list_agents_params.py rename to src/do_gradientai/types/models/providers/anthropic_list_agents_params.py diff --git a/src/gradientai/types/models/providers/anthropic_list_agents_response.py b/src/do_gradientai/types/models/providers/anthropic_list_agents_response.py similarity index 100% rename from src/gradientai/types/models/providers/anthropic_list_agents_response.py rename to src/do_gradientai/types/models/providers/anthropic_list_agents_response.py diff --git a/src/gradientai/types/models/providers/anthropic_list_params.py b/src/do_gradientai/types/models/providers/anthropic_list_params.py similarity index 100% rename from src/gradientai/types/models/providers/anthropic_list_params.py rename to src/do_gradientai/types/models/providers/anthropic_list_params.py diff --git a/src/gradientai/types/models/providers/anthropic_list_response.py b/src/do_gradientai/types/models/providers/anthropic_list_response.py similarity index 100% rename from src/gradientai/types/models/providers/anthropic_list_response.py rename to src/do_gradientai/types/models/providers/anthropic_list_response.py diff --git a/src/gradientai/types/models/providers/anthropic_retrieve_response.py b/src/do_gradientai/types/models/providers/anthropic_retrieve_response.py similarity index 100% rename from src/gradientai/types/models/providers/anthropic_retrieve_response.py rename to src/do_gradientai/types/models/providers/anthropic_retrieve_response.py diff --git a/src/gradientai/types/models/providers/anthropic_update_params.py b/src/do_gradientai/types/models/providers/anthropic_update_params.py similarity index 100% rename from src/gradientai/types/models/providers/anthropic_update_params.py rename to src/do_gradientai/types/models/providers/anthropic_update_params.py diff --git a/src/gradientai/types/models/providers/anthropic_update_response.py b/src/do_gradientai/types/models/providers/anthropic_update_response.py similarity index 100% rename from src/gradientai/types/models/providers/anthropic_update_response.py rename to src/do_gradientai/types/models/providers/anthropic_update_response.py diff --git a/src/gradientai/types/models/providers/openai_create_params.py b/src/do_gradientai/types/models/providers/openai_create_params.py similarity index 100% rename from src/gradientai/types/models/providers/openai_create_params.py rename to src/do_gradientai/types/models/providers/openai_create_params.py diff --git a/src/gradientai/types/models/providers/openai_create_response.py b/src/do_gradientai/types/models/providers/openai_create_response.py similarity index 100% rename from src/gradientai/types/models/providers/openai_create_response.py rename to src/do_gradientai/types/models/providers/openai_create_response.py diff --git a/src/gradientai/types/models/providers/openai_delete_response.py b/src/do_gradientai/types/models/providers/openai_delete_response.py similarity index 100% rename from src/gradientai/types/models/providers/openai_delete_response.py rename to src/do_gradientai/types/models/providers/openai_delete_response.py diff --git a/src/gradientai/types/models/providers/openai_list_params.py b/src/do_gradientai/types/models/providers/openai_list_params.py similarity index 100% rename from src/gradientai/types/models/providers/openai_list_params.py rename to src/do_gradientai/types/models/providers/openai_list_params.py diff --git a/src/gradientai/types/models/providers/openai_list_response.py b/src/do_gradientai/types/models/providers/openai_list_response.py similarity index 100% rename from src/gradientai/types/models/providers/openai_list_response.py rename to src/do_gradientai/types/models/providers/openai_list_response.py diff --git a/src/gradientai/types/models/providers/openai_retrieve_agents_params.py b/src/do_gradientai/types/models/providers/openai_retrieve_agents_params.py similarity index 100% rename from src/gradientai/types/models/providers/openai_retrieve_agents_params.py rename to src/do_gradientai/types/models/providers/openai_retrieve_agents_params.py diff --git a/src/gradientai/types/models/providers/openai_retrieve_agents_response.py b/src/do_gradientai/types/models/providers/openai_retrieve_agents_response.py similarity index 100% rename from src/gradientai/types/models/providers/openai_retrieve_agents_response.py rename to src/do_gradientai/types/models/providers/openai_retrieve_agents_response.py diff --git a/src/gradientai/types/models/providers/openai_retrieve_response.py b/src/do_gradientai/types/models/providers/openai_retrieve_response.py similarity index 100% rename from src/gradientai/types/models/providers/openai_retrieve_response.py rename to src/do_gradientai/types/models/providers/openai_retrieve_response.py diff --git a/src/gradientai/types/models/providers/openai_update_params.py b/src/do_gradientai/types/models/providers/openai_update_params.py similarity index 100% rename from src/gradientai/types/models/providers/openai_update_params.py rename to src/do_gradientai/types/models/providers/openai_update_params.py diff --git a/src/gradientai/types/models/providers/openai_update_response.py b/src/do_gradientai/types/models/providers/openai_update_response.py similarity index 100% rename from src/gradientai/types/models/providers/openai_update_response.py rename to src/do_gradientai/types/models/providers/openai_update_response.py diff --git a/src/gradientai/types/region_list_params.py b/src/do_gradientai/types/region_list_params.py similarity index 100% rename from src/gradientai/types/region_list_params.py rename to src/do_gradientai/types/region_list_params.py diff --git a/src/gradientai/types/region_list_response.py b/src/do_gradientai/types/region_list_response.py similarity index 100% rename from src/gradientai/types/region_list_response.py rename to src/do_gradientai/types/region_list_response.py diff --git a/src/gradientai/types/shared/__init__.py b/src/do_gradientai/types/shared/__init__.py similarity index 100% rename from src/gradientai/types/shared/__init__.py rename to src/do_gradientai/types/shared/__init__.py diff --git a/src/gradientai/types/shared/action.py b/src/do_gradientai/types/shared/action.py similarity index 100% rename from src/gradientai/types/shared/action.py rename to src/do_gradientai/types/shared/action.py diff --git a/src/gradientai/types/shared/action_link.py b/src/do_gradientai/types/shared/action_link.py similarity index 100% rename from src/gradientai/types/shared/action_link.py rename to src/do_gradientai/types/shared/action_link.py diff --git a/src/gradientai/types/shared/api_links.py b/src/do_gradientai/types/shared/api_links.py similarity index 100% rename from src/gradientai/types/shared/api_links.py rename to src/do_gradientai/types/shared/api_links.py diff --git a/src/gradientai/types/shared/api_meta.py b/src/do_gradientai/types/shared/api_meta.py similarity index 100% rename from src/gradientai/types/shared/api_meta.py rename to src/do_gradientai/types/shared/api_meta.py diff --git a/src/gradientai/types/shared/backward_links.py b/src/do_gradientai/types/shared/backward_links.py similarity index 100% rename from src/gradientai/types/shared/backward_links.py rename to src/do_gradientai/types/shared/backward_links.py diff --git a/src/gradientai/types/shared/chat_completion_chunk.py b/src/do_gradientai/types/shared/chat_completion_chunk.py similarity index 100% rename from src/gradientai/types/shared/chat_completion_chunk.py rename to src/do_gradientai/types/shared/chat_completion_chunk.py diff --git a/src/gradientai/types/shared/chat_completion_token_logprob.py b/src/do_gradientai/types/shared/chat_completion_token_logprob.py similarity index 100% rename from src/gradientai/types/shared/chat_completion_token_logprob.py rename to src/do_gradientai/types/shared/chat_completion_token_logprob.py diff --git a/src/gradientai/types/shared/completion_usage.py b/src/do_gradientai/types/shared/completion_usage.py similarity index 100% rename from src/gradientai/types/shared/completion_usage.py rename to src/do_gradientai/types/shared/completion_usage.py diff --git a/src/gradientai/types/shared/disk_info.py b/src/do_gradientai/types/shared/disk_info.py similarity index 100% rename from src/gradientai/types/shared/disk_info.py rename to src/do_gradientai/types/shared/disk_info.py diff --git a/src/gradientai/types/shared/droplet.py b/src/do_gradientai/types/shared/droplet.py similarity index 100% rename from src/gradientai/types/shared/droplet.py rename to src/do_gradientai/types/shared/droplet.py diff --git a/src/gradientai/types/shared/droplet_next_backup_window.py b/src/do_gradientai/types/shared/droplet_next_backup_window.py similarity index 100% rename from src/gradientai/types/shared/droplet_next_backup_window.py rename to src/do_gradientai/types/shared/droplet_next_backup_window.py diff --git a/src/gradientai/types/shared/firewall_rule_target.py b/src/do_gradientai/types/shared/firewall_rule_target.py similarity index 100% rename from src/gradientai/types/shared/firewall_rule_target.py rename to src/do_gradientai/types/shared/firewall_rule_target.py diff --git a/src/gradientai/types/shared/forward_links.py b/src/do_gradientai/types/shared/forward_links.py similarity index 100% rename from src/gradientai/types/shared/forward_links.py rename to src/do_gradientai/types/shared/forward_links.py diff --git a/src/gradientai/types/shared/garbage_collection.py b/src/do_gradientai/types/shared/garbage_collection.py similarity index 100% rename from src/gradientai/types/shared/garbage_collection.py rename to src/do_gradientai/types/shared/garbage_collection.py diff --git a/src/gradientai/types/shared/gpu_info.py b/src/do_gradientai/types/shared/gpu_info.py similarity index 100% rename from src/gradientai/types/shared/gpu_info.py rename to src/do_gradientai/types/shared/gpu_info.py diff --git a/src/gradientai/types/shared/image.py b/src/do_gradientai/types/shared/image.py similarity index 100% rename from src/gradientai/types/shared/image.py rename to src/do_gradientai/types/shared/image.py diff --git a/src/gradientai/types/shared/kernel.py b/src/do_gradientai/types/shared/kernel.py similarity index 100% rename from src/gradientai/types/shared/kernel.py rename to src/do_gradientai/types/shared/kernel.py diff --git a/src/gradientai/types/shared/meta_properties.py b/src/do_gradientai/types/shared/meta_properties.py similarity index 100% rename from src/gradientai/types/shared/meta_properties.py rename to src/do_gradientai/types/shared/meta_properties.py diff --git a/src/gradientai/types/shared/network_v4.py b/src/do_gradientai/types/shared/network_v4.py similarity index 100% rename from src/gradientai/types/shared/network_v4.py rename to src/do_gradientai/types/shared/network_v4.py diff --git a/src/gradientai/types/shared/network_v6.py b/src/do_gradientai/types/shared/network_v6.py similarity index 100% rename from src/gradientai/types/shared/network_v6.py rename to src/do_gradientai/types/shared/network_v6.py diff --git a/src/gradientai/types/shared/page_links.py b/src/do_gradientai/types/shared/page_links.py similarity index 100% rename from src/gradientai/types/shared/page_links.py rename to src/do_gradientai/types/shared/page_links.py diff --git a/src/gradientai/types/shared/region.py b/src/do_gradientai/types/shared/region.py similarity index 100% rename from src/gradientai/types/shared/region.py rename to src/do_gradientai/types/shared/region.py diff --git a/src/gradientai/types/shared/size.py b/src/do_gradientai/types/shared/size.py similarity index 100% rename from src/gradientai/types/shared/size.py rename to src/do_gradientai/types/shared/size.py diff --git a/src/gradientai/types/shared/snapshots.py b/src/do_gradientai/types/shared/snapshots.py similarity index 100% rename from src/gradientai/types/shared/snapshots.py rename to src/do_gradientai/types/shared/snapshots.py diff --git a/src/gradientai/types/shared/subscription.py b/src/do_gradientai/types/shared/subscription.py similarity index 100% rename from src/gradientai/types/shared/subscription.py rename to src/do_gradientai/types/shared/subscription.py diff --git a/src/gradientai/types/shared/subscription_tier_base.py b/src/do_gradientai/types/shared/subscription_tier_base.py similarity index 100% rename from src/gradientai/types/shared/subscription_tier_base.py rename to src/do_gradientai/types/shared/subscription_tier_base.py diff --git a/src/gradientai/types/shared/vpc_peering.py b/src/do_gradientai/types/shared/vpc_peering.py similarity index 100% rename from src/gradientai/types/shared/vpc_peering.py rename to src/do_gradientai/types/shared/vpc_peering.py diff --git a/src/gradientai/types/shared_params/__init__.py b/src/do_gradientai/types/shared_params/__init__.py similarity index 100% rename from src/gradientai/types/shared_params/__init__.py rename to src/do_gradientai/types/shared_params/__init__.py diff --git a/src/gradientai/types/shared_params/firewall_rule_target.py b/src/do_gradientai/types/shared_params/firewall_rule_target.py similarity index 100% rename from src/gradientai/types/shared_params/firewall_rule_target.py rename to src/do_gradientai/types/shared_params/firewall_rule_target.py diff --git a/tests/api_resources/agents/chat/test_completions.py b/tests/api_resources/agents/chat/test_completions.py index 06342867..de43cc34 100644 --- a/tests/api_resources/agents/chat/test_completions.py +++ b/tests/api_resources/agents/chat/test_completions.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents.chat import CompletionCreateResponse +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.agents.chat import CompletionCreateResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/agents/evaluation_metrics/test_models.py b/tests/api_resources/agents/evaluation_metrics/test_models.py index 6b8f8bc7..27ab4a27 100644 --- a/tests/api_resources/agents/evaluation_metrics/test_models.py +++ b/tests/api_resources/agents/evaluation_metrics/test_models.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents.evaluation_metrics import ModelListResponse +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.agents.evaluation_metrics import ModelListResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/agents/evaluation_metrics/test_workspaces.py b/tests/api_resources/agents/evaluation_metrics/test_workspaces.py index ea39c474..2728393e 100644 --- a/tests/api_resources/agents/evaluation_metrics/test_workspaces.py +++ b/tests/api_resources/agents/evaluation_metrics/test_workspaces.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents.evaluation_metrics import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.agents.evaluation_metrics import ( WorkspaceListResponse, WorkspaceCreateResponse, WorkspaceDeleteResponse, diff --git a/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py b/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py index 635721b3..37d39018 100644 --- a/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py +++ b/tests/api_resources/agents/evaluation_metrics/workspaces/test_agents.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents.evaluation_metrics.workspaces import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.agents.evaluation_metrics.workspaces import ( AgentListResponse, AgentMoveResponse, ) diff --git a/tests/api_resources/agents/test_api_keys.py b/tests/api_resources/agents/test_api_keys.py index c29511f5..1e5275fe 100644 --- a/tests/api_resources/agents/test_api_keys.py +++ b/tests/api_resources/agents/test_api_keys.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.agents import ( APIKeyListResponse, APIKeyCreateResponse, APIKeyDeleteResponse, diff --git a/tests/api_resources/agents/test_evaluation_datasets.py b/tests/api_resources/agents/test_evaluation_datasets.py index 0413591e..56edd598 100644 --- a/tests/api_resources/agents/test_evaluation_datasets.py +++ b/tests/api_resources/agents/test_evaluation_datasets.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.agents import ( EvaluationDatasetCreateResponse, EvaluationDatasetCreateFileUploadPresignedURLsResponse, ) diff --git a/tests/api_resources/agents/test_evaluation_metrics.py b/tests/api_resources/agents/test_evaluation_metrics.py index d64367ae..303d85d6 100644 --- a/tests/api_resources/agents/test_evaluation_metrics.py +++ b/tests/api_resources/agents/test_evaluation_metrics.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.agents import ( EvaluationMetricListResponse, EvaluationMetricListRegionsResponse, ) diff --git a/tests/api_resources/agents/test_evaluation_runs.py b/tests/api_resources/agents/test_evaluation_runs.py index 2ea44e6b..9d443f16 100644 --- a/tests/api_resources/agents/test_evaluation_runs.py +++ b/tests/api_resources/agents/test_evaluation_runs.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.agents import ( EvaluationRunCreateResponse, EvaluationRunRetrieveResponse, EvaluationRunListResultsResponse, diff --git a/tests/api_resources/agents/test_evaluation_test_cases.py b/tests/api_resources/agents/test_evaluation_test_cases.py index e9083ba3..ae986abc 100644 --- a/tests/api_resources/agents/test_evaluation_test_cases.py +++ b/tests/api_resources/agents/test_evaluation_test_cases.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.agents import ( EvaluationTestCaseListResponse, EvaluationTestCaseCreateResponse, EvaluationTestCaseUpdateResponse, diff --git a/tests/api_resources/agents/test_functions.py b/tests/api_resources/agents/test_functions.py index 4390d1d2..624446e0 100644 --- a/tests/api_resources/agents/test_functions.py +++ b/tests/api_resources/agents/test_functions.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.agents import ( FunctionCreateResponse, FunctionDeleteResponse, FunctionUpdateResponse, diff --git a/tests/api_resources/agents/test_knowledge_bases.py b/tests/api_resources/agents/test_knowledge_bases.py index 2ac20d89..7ac99316 100644 --- a/tests/api_resources/agents/test_knowledge_bases.py +++ b/tests/api_resources/agents/test_knowledge_bases.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.agents import APILinkKnowledgeBaseOutput, KnowledgeBaseDetachResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/agents/test_routes.py b/tests/api_resources/agents/test_routes.py index d04e8c90..256a4757 100644 --- a/tests/api_resources/agents/test_routes.py +++ b/tests/api_resources/agents/test_routes.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.agents import ( RouteAddResponse, RouteViewResponse, RouteDeleteResponse, diff --git a/tests/api_resources/agents/test_versions.py b/tests/api_resources/agents/test_versions.py index d6151470..158856ed 100644 --- a/tests/api_resources/agents/test_versions.py +++ b/tests/api_resources/agents/test_versions.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.agents import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.agents import ( VersionListResponse, VersionUpdateResponse, ) diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index 46c8b431..95b02106 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.chat import CompletionCreateResponse +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.chat import CompletionCreateResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/gpu_droplets/account/test_keys.py b/tests/api_resources/gpu_droplets/account/test_keys.py index acad3575..cf168f61 100644 --- a/tests/api_resources/gpu_droplets/account/test_keys.py +++ b/tests/api_resources/gpu_droplets/account/test_keys.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.gpu_droplets.account import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.gpu_droplets.account import ( KeyListResponse, KeyCreateResponse, KeyUpdateResponse, diff --git a/tests/api_resources/gpu_droplets/firewalls/test_droplets.py b/tests/api_resources/gpu_droplets/firewalls/test_droplets.py index 67d132aa..819a5e6e 100644 --- a/tests/api_resources/gpu_droplets/firewalls/test_droplets.py +++ b/tests/api_resources/gpu_droplets/firewalls/test_droplets.py @@ -7,7 +7,7 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI +from do_gradientai import GradientAI, AsyncGradientAI base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/gpu_droplets/firewalls/test_rules.py b/tests/api_resources/gpu_droplets/firewalls/test_rules.py index 446a11af..b2eab40c 100644 --- a/tests/api_resources/gpu_droplets/firewalls/test_rules.py +++ b/tests/api_resources/gpu_droplets/firewalls/test_rules.py @@ -7,7 +7,7 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI +from do_gradientai import GradientAI, AsyncGradientAI base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/gpu_droplets/firewalls/test_tags.py b/tests/api_resources/gpu_droplets/firewalls/test_tags.py index a0227c61..25c9362b 100644 --- a/tests/api_resources/gpu_droplets/firewalls/test_tags.py +++ b/tests/api_resources/gpu_droplets/firewalls/test_tags.py @@ -7,7 +7,7 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI +from do_gradientai import GradientAI, AsyncGradientAI base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/gpu_droplets/floating_ips/test_actions.py b/tests/api_resources/gpu_droplets/floating_ips/test_actions.py index 82a12d2e..ad26db8a 100644 --- a/tests/api_resources/gpu_droplets/floating_ips/test_actions.py +++ b/tests/api_resources/gpu_droplets/floating_ips/test_actions.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.gpu_droplets.floating_ips import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.gpu_droplets.floating_ips import ( ActionListResponse, ActionCreateResponse, ActionRetrieveResponse, diff --git a/tests/api_resources/gpu_droplets/images/test_actions.py b/tests/api_resources/gpu_droplets/images/test_actions.py index 4d59c85b..35861bcb 100644 --- a/tests/api_resources/gpu_droplets/images/test_actions.py +++ b/tests/api_resources/gpu_droplets/images/test_actions.py @@ -7,10 +7,10 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.shared import Action -from gradientai.types.gpu_droplets.images import ActionListResponse +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.shared import Action +from do_gradientai.types.gpu_droplets.images import ActionListResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/gpu_droplets/load_balancers/test_droplets.py b/tests/api_resources/gpu_droplets/load_balancers/test_droplets.py index 333567f4..f22213e2 100644 --- a/tests/api_resources/gpu_droplets/load_balancers/test_droplets.py +++ b/tests/api_resources/gpu_droplets/load_balancers/test_droplets.py @@ -7,7 +7,7 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI +from do_gradientai import GradientAI, AsyncGradientAI base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/gpu_droplets/load_balancers/test_forwarding_rules.py b/tests/api_resources/gpu_droplets/load_balancers/test_forwarding_rules.py index ec6f7838..d53bd0db 100644 --- a/tests/api_resources/gpu_droplets/load_balancers/test_forwarding_rules.py +++ b/tests/api_resources/gpu_droplets/load_balancers/test_forwarding_rules.py @@ -7,7 +7,7 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI +from do_gradientai import GradientAI, AsyncGradientAI base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/gpu_droplets/test_actions.py b/tests/api_resources/gpu_droplets/test_actions.py index 5e443dd8..74e45b44 100644 --- a/tests/api_resources/gpu_droplets/test_actions.py +++ b/tests/api_resources/gpu_droplets/test_actions.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.gpu_droplets import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.gpu_droplets import ( ActionListResponse, ActionInitiateResponse, ActionRetrieveResponse, diff --git a/tests/api_resources/gpu_droplets/test_autoscale.py b/tests/api_resources/gpu_droplets/test_autoscale.py index 42164666..cec0371d 100644 --- a/tests/api_resources/gpu_droplets/test_autoscale.py +++ b/tests/api_resources/gpu_droplets/test_autoscale.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.gpu_droplets import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.gpu_droplets import ( AutoscaleListResponse, AutoscaleCreateResponse, AutoscaleUpdateResponse, diff --git a/tests/api_resources/gpu_droplets/test_backups.py b/tests/api_resources/gpu_droplets/test_backups.py index f8f72140..334c701f 100644 --- a/tests/api_resources/gpu_droplets/test_backups.py +++ b/tests/api_resources/gpu_droplets/test_backups.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.gpu_droplets import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.gpu_droplets import ( BackupListResponse, BackupListPoliciesResponse, BackupRetrievePolicyResponse, diff --git a/tests/api_resources/gpu_droplets/test_destroy_with_associated_resources.py b/tests/api_resources/gpu_droplets/test_destroy_with_associated_resources.py index b6922feb..2aef1fce 100644 --- a/tests/api_resources/gpu_droplets/test_destroy_with_associated_resources.py +++ b/tests/api_resources/gpu_droplets/test_destroy_with_associated_resources.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.gpu_droplets import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.gpu_droplets import ( DestroyWithAssociatedResourceListResponse, DestroyWithAssociatedResourceCheckStatusResponse, ) diff --git a/tests/api_resources/gpu_droplets/test_firewalls.py b/tests/api_resources/gpu_droplets/test_firewalls.py index 537fe7d2..6d98ebe8 100644 --- a/tests/api_resources/gpu_droplets/test_firewalls.py +++ b/tests/api_resources/gpu_droplets/test_firewalls.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.gpu_droplets import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.gpu_droplets import ( FirewallListResponse, FirewallCreateResponse, FirewallUpdateResponse, diff --git a/tests/api_resources/gpu_droplets/test_floating_ips.py b/tests/api_resources/gpu_droplets/test_floating_ips.py index 830e9b39..9b8b3183 100644 --- a/tests/api_resources/gpu_droplets/test_floating_ips.py +++ b/tests/api_resources/gpu_droplets/test_floating_ips.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.gpu_droplets import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.gpu_droplets import ( FloatingIPListResponse, FloatingIPCreateResponse, FloatingIPRetrieveResponse, diff --git a/tests/api_resources/gpu_droplets/test_images.py b/tests/api_resources/gpu_droplets/test_images.py index 7be6a786..5a2a7c0c 100644 --- a/tests/api_resources/gpu_droplets/test_images.py +++ b/tests/api_resources/gpu_droplets/test_images.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.gpu_droplets import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.gpu_droplets import ( ImageListResponse, ImageCreateResponse, ImageUpdateResponse, diff --git a/tests/api_resources/gpu_droplets/test_load_balancers.py b/tests/api_resources/gpu_droplets/test_load_balancers.py index c1ce1ce2..b96c6d52 100644 --- a/tests/api_resources/gpu_droplets/test_load_balancers.py +++ b/tests/api_resources/gpu_droplets/test_load_balancers.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.gpu_droplets import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.gpu_droplets import ( LoadBalancerListResponse, LoadBalancerCreateResponse, LoadBalancerUpdateResponse, diff --git a/tests/api_resources/gpu_droplets/test_sizes.py b/tests/api_resources/gpu_droplets/test_sizes.py index eda73b1e..1ff11cd7 100644 --- a/tests/api_resources/gpu_droplets/test_sizes.py +++ b/tests/api_resources/gpu_droplets/test_sizes.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.gpu_droplets import SizeListResponse +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.gpu_droplets import SizeListResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/gpu_droplets/test_snapshots.py b/tests/api_resources/gpu_droplets/test_snapshots.py index 5d7132c2..413dd993 100644 --- a/tests/api_resources/gpu_droplets/test_snapshots.py +++ b/tests/api_resources/gpu_droplets/test_snapshots.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.gpu_droplets import SnapshotListResponse, SnapshotRetrieveResponse +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.gpu_droplets import SnapshotListResponse, SnapshotRetrieveResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/gpu_droplets/test_volumes.py b/tests/api_resources/gpu_droplets/test_volumes.py index 64bcb4c5..baf6b430 100644 --- a/tests/api_resources/gpu_droplets/test_volumes.py +++ b/tests/api_resources/gpu_droplets/test_volumes.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.gpu_droplets import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.gpu_droplets import ( VolumeListResponse, VolumeCreateResponse, VolumeRetrieveResponse, diff --git a/tests/api_resources/gpu_droplets/volumes/test_actions.py b/tests/api_resources/gpu_droplets/volumes/test_actions.py index d5338c97..40d9b4eb 100644 --- a/tests/api_resources/gpu_droplets/volumes/test_actions.py +++ b/tests/api_resources/gpu_droplets/volumes/test_actions.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.gpu_droplets.volumes import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.gpu_droplets.volumes import ( ActionListResponse, ActionRetrieveResponse, ActionInitiateByIDResponse, diff --git a/tests/api_resources/gpu_droplets/volumes/test_snapshots.py b/tests/api_resources/gpu_droplets/volumes/test_snapshots.py index 8b72305c..4884d372 100644 --- a/tests/api_resources/gpu_droplets/volumes/test_snapshots.py +++ b/tests/api_resources/gpu_droplets/volumes/test_snapshots.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.gpu_droplets.volumes import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.gpu_droplets.volumes import ( SnapshotListResponse, SnapshotCreateResponse, SnapshotRetrieveResponse, diff --git a/tests/api_resources/inference/test_api_keys.py b/tests/api_resources/inference/test_api_keys.py index 157a2e3d..85ad49da 100644 --- a/tests/api_resources/inference/test_api_keys.py +++ b/tests/api_resources/inference/test_api_keys.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.inference import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.inference import ( APIKeyListResponse, APIKeyCreateResponse, APIKeyDeleteResponse, diff --git a/tests/api_resources/knowledge_bases/test_data_sources.py b/tests/api_resources/knowledge_bases/test_data_sources.py index 55b056b8..ebb0841a 100644 --- a/tests/api_resources/knowledge_bases/test_data_sources.py +++ b/tests/api_resources/knowledge_bases/test_data_sources.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.knowledge_bases import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.knowledge_bases import ( DataSourceListResponse, DataSourceCreateResponse, DataSourceDeleteResponse, diff --git a/tests/api_resources/knowledge_bases/test_indexing_jobs.py b/tests/api_resources/knowledge_bases/test_indexing_jobs.py index ed32d7f8..b0185941 100644 --- a/tests/api_resources/knowledge_bases/test_indexing_jobs.py +++ b/tests/api_resources/knowledge_bases/test_indexing_jobs.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.knowledge_bases import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.knowledge_bases import ( IndexingJobListResponse, IndexingJobCreateResponse, IndexingJobRetrieveResponse, diff --git a/tests/api_resources/models/providers/test_anthropic.py b/tests/api_resources/models/providers/test_anthropic.py index c61a97ea..6b3d99a3 100644 --- a/tests/api_resources/models/providers/test_anthropic.py +++ b/tests/api_resources/models/providers/test_anthropic.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.models.providers import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.models.providers import ( AnthropicListResponse, AnthropicCreateResponse, AnthropicDeleteResponse, diff --git a/tests/api_resources/models/providers/test_openai.py b/tests/api_resources/models/providers/test_openai.py index 7fde1a69..bdde97ca 100644 --- a/tests/api_resources/models/providers/test_openai.py +++ b/tests/api_resources/models/providers/test_openai.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types.models.providers import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types.models.providers import ( OpenAIListResponse, OpenAICreateResponse, OpenAIDeleteResponse, diff --git a/tests/api_resources/test_agents.py b/tests/api_resources/test_agents.py index 8a6a7d69..2f68a06f 100644 --- a/tests/api_resources/test_agents.py +++ b/tests/api_resources/test_agents.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types import ( AgentListResponse, AgentCreateResponse, AgentDeleteResponse, diff --git a/tests/api_resources/test_gpu_droplets.py b/tests/api_resources/test_gpu_droplets.py index 22f3d2d0..cbc7e63b 100644 --- a/tests/api_resources/test_gpu_droplets.py +++ b/tests/api_resources/test_gpu_droplets.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types import ( GPUDropletListResponse, GPUDropletCreateResponse, GPUDropletRetrieveResponse, diff --git a/tests/api_resources/test_knowledge_bases.py b/tests/api_resources/test_knowledge_bases.py index 8a331b52..c4d179cc 100644 --- a/tests/api_resources/test_knowledge_bases.py +++ b/tests/api_resources/test_knowledge_bases.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types import ( +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types import ( KnowledgeBaseListResponse, KnowledgeBaseCreateResponse, KnowledgeBaseDeleteResponse, diff --git a/tests/api_resources/test_models.py b/tests/api_resources/test_models.py index fe837973..803c5d5a 100644 --- a/tests/api_resources/test_models.py +++ b/tests/api_resources/test_models.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types import ModelListResponse, ModelRetrieveResponse +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types import ModelListResponse, ModelRetrieveResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/test_regions.py b/tests/api_resources/test_regions.py index 4f232293..f331342e 100644 --- a/tests/api_resources/test_regions.py +++ b/tests/api_resources/test_regions.py @@ -7,9 +7,9 @@ import pytest -from gradientai import GradientAI, AsyncGradientAI from tests.utils import assert_matches_type -from gradientai.types import RegionListResponse +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai.types import RegionListResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/conftest.py b/tests/conftest.py index a5fb13ce..d61eb8b7 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -10,15 +10,15 @@ import pytest from pytest_asyncio import is_async_test -from gradientai import GradientAI, AsyncGradientAI, DefaultAioHttpClient -from gradientai._utils import is_dict +from do_gradientai import GradientAI, AsyncGradientAI, DefaultAioHttpClient +from do_gradientai._utils import is_dict if TYPE_CHECKING: from _pytest.fixtures import FixtureRequest # pyright: ignore[reportPrivateImportUsage] pytest.register_assert_rewrite("tests.utils") -logging.getLogger("gradientai").setLevel(logging.DEBUG) +logging.getLogger("do_gradientai").setLevel(logging.DEBUG) # automatically add `pytest.mark.asyncio()` to all of our async tests diff --git a/tests/test_client.py b/tests/test_client.py index a9d8d9b7..e88c4544 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -21,12 +21,12 @@ from respx import MockRouter from pydantic import ValidationError -from gradientai import GradientAI, AsyncGradientAI, APIResponseValidationError -from gradientai._types import Omit -from gradientai._models import BaseModel, FinalRequestOptions -from gradientai._streaming import Stream, AsyncStream -from gradientai._exceptions import APIStatusError, APITimeoutError, APIResponseValidationError -from gradientai._base_client import ( +from do_gradientai import GradientAI, AsyncGradientAI, APIResponseValidationError +from do_gradientai._types import Omit +from do_gradientai._models import BaseModel, FinalRequestOptions +from do_gradientai._streaming import Stream, AsyncStream +from do_gradientai._exceptions import APIStatusError, APITimeoutError, APIResponseValidationError +from do_gradientai._base_client import ( DEFAULT_TIMEOUT, HTTPX_DEFAULT_TIMEOUT, BaseClient, @@ -259,10 +259,10 @@ def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.Statistic # to_raw_response_wrapper leaks through the @functools.wraps() decorator. # # removing the decorator fixes the leak for reasons we don't understand. - "gradientai/_legacy_response.py", - "gradientai/_response.py", + "do_gradientai/_legacy_response.py", + "do_gradientai/_response.py", # pydantic.BaseModel.model_dump || pydantic.BaseModel.dict leak memory for some reason. - "gradientai/_compat.py", + "do_gradientai/_compat.py", # Standard library leaks we don't care about. "/logging/__init__.py", ] @@ -873,7 +873,7 @@ def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str calculated = client._calculate_retry_timeout(remaining_retries, options, headers) assert calculated == pytest.approx(timeout, 0.5 * 0.875) # pyright: ignore[reportUnknownMemberType] - @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, client: GradientAI) -> None: respx_mock.post("/chat/completions").mock(side_effect=httpx.TimeoutException("Test timeout error")) @@ -891,7 +891,7 @@ def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, clien assert _get_open_connections(self.client) == 0 - @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, client: GradientAI) -> None: respx_mock.post("/chat/completions").mock(return_value=httpx.Response(500)) @@ -909,7 +909,7 @@ def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, client assert _get_open_connections(self.client) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) @pytest.mark.parametrize("failure_mode", ["status", "exception"]) def test_retries_taken( @@ -948,7 +948,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) def test_omit_retry_count_header( self, client: GradientAI, failures_before_success: int, respx_mock: MockRouter @@ -980,7 +980,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) def test_overwrite_retry_count_header( self, client: GradientAI, failures_before_success: int, respx_mock: MockRouter @@ -1261,10 +1261,10 @@ def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.Statistic # to_raw_response_wrapper leaks through the @functools.wraps() decorator. # # removing the decorator fixes the leak for reasons we don't understand. - "gradientai/_legacy_response.py", - "gradientai/_response.py", + "do_gradientai/_legacy_response.py", + "do_gradientai/_response.py", # pydantic.BaseModel.model_dump || pydantic.BaseModel.dict leak memory for some reason. - "gradientai/_compat.py", + "do_gradientai/_compat.py", # Standard library leaks we don't care about. "/logging/__init__.py", ] @@ -1880,7 +1880,7 @@ async def test_parse_retry_after_header(self, remaining_retries: int, retry_afte calculated = client._calculate_retry_timeout(remaining_retries, options, headers) assert calculated == pytest.approx(timeout, 0.5 * 0.875) # pyright: ignore[reportUnknownMemberType] - @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) async def test_retrying_timeout_errors_doesnt_leak( self, respx_mock: MockRouter, async_client: AsyncGradientAI @@ -1900,7 +1900,7 @@ async def test_retrying_timeout_errors_doesnt_leak( assert _get_open_connections(self.client) == 0 - @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) async def test_retrying_status_errors_doesnt_leak( self, respx_mock: MockRouter, async_client: AsyncGradientAI @@ -1920,7 +1920,7 @@ async def test_retrying_status_errors_doesnt_leak( assert _get_open_connections(self.client) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) @pytest.mark.asyncio @pytest.mark.parametrize("failure_mode", ["status", "exception"]) @@ -1960,7 +1960,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) @pytest.mark.asyncio async def test_omit_retry_count_header( @@ -1993,7 +1993,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) - @mock.patch("gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) + @mock.patch("do_gradientai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) @pytest.mark.asyncio async def test_overwrite_retry_count_header( @@ -2036,8 +2036,8 @@ def test_get_platform(self) -> None: import nest_asyncio import threading - from gradientai._utils import asyncify - from gradientai._base_client import get_platform + from do_gradientai._utils import asyncify + from do_gradientai._base_client import get_platform async def test_main() -> None: result = await asyncify(get_platform)() diff --git a/tests/test_deepcopy.py b/tests/test_deepcopy.py index 9d1579a8..5a98ce1b 100644 --- a/tests/test_deepcopy.py +++ b/tests/test_deepcopy.py @@ -1,4 +1,4 @@ -from gradientai._utils import deepcopy_minimal +from do_gradientai._utils import deepcopy_minimal def assert_different_identities(obj1: object, obj2: object) -> None: diff --git a/tests/test_extract_files.py b/tests/test_extract_files.py index 2905d59c..341e65ae 100644 --- a/tests/test_extract_files.py +++ b/tests/test_extract_files.py @@ -4,8 +4,8 @@ import pytest -from gradientai._types import FileTypes -from gradientai._utils import extract_files +from do_gradientai._types import FileTypes +from do_gradientai._utils import extract_files def test_removes_files_from_input() -> None: diff --git a/tests/test_files.py b/tests/test_files.py index 4a723313..ff7914bb 100644 --- a/tests/test_files.py +++ b/tests/test_files.py @@ -4,7 +4,7 @@ import pytest from dirty_equals import IsDict, IsList, IsBytes, IsTuple -from gradientai._files import to_httpx_files, async_to_httpx_files +from do_gradientai._files import to_httpx_files, async_to_httpx_files readme_path = Path(__file__).parent.parent.joinpath("README.md") diff --git a/tests/test_models.py b/tests/test_models.py index 3a857584..bfbef61a 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -7,9 +7,9 @@ import pydantic from pydantic import Field -from gradientai._utils import PropertyInfo -from gradientai._compat import PYDANTIC_V2, parse_obj, model_dump, model_json -from gradientai._models import BaseModel, construct_type +from do_gradientai._utils import PropertyInfo +from do_gradientai._compat import PYDANTIC_V2, parse_obj, model_dump, model_json +from do_gradientai._models import BaseModel, construct_type class BasicModel(BaseModel): diff --git a/tests/test_qs.py b/tests/test_qs.py index 9080377b..c9213571 100644 --- a/tests/test_qs.py +++ b/tests/test_qs.py @@ -4,7 +4,7 @@ import pytest -from gradientai._qs import Querystring, stringify +from do_gradientai._qs import Querystring, stringify def test_empty() -> None: diff --git a/tests/test_required_args.py b/tests/test_required_args.py index c4e6b9d8..434e9491 100644 --- a/tests/test_required_args.py +++ b/tests/test_required_args.py @@ -2,7 +2,7 @@ import pytest -from gradientai._utils import required_args +from do_gradientai._utils import required_args def test_too_many_positional_params() -> None: diff --git a/tests/test_response.py b/tests/test_response.py index 1a8f241e..001ce776 100644 --- a/tests/test_response.py +++ b/tests/test_response.py @@ -6,8 +6,8 @@ import pytest import pydantic -from gradientai import BaseModel, GradientAI, AsyncGradientAI -from gradientai._response import ( +from do_gradientai import BaseModel, GradientAI, AsyncGradientAI +from do_gradientai._response import ( APIResponse, BaseAPIResponse, AsyncAPIResponse, @@ -15,8 +15,8 @@ AsyncBinaryAPIResponse, extract_response_type, ) -from gradientai._streaming import Stream -from gradientai._base_client import FinalRequestOptions +from do_gradientai._streaming import Stream +from do_gradientai._base_client import FinalRequestOptions class ConcreteBaseAPIResponse(APIResponse[bytes]): ... @@ -37,7 +37,7 @@ def test_extract_response_type_direct_classes() -> None: def test_extract_response_type_direct_class_missing_type_arg() -> None: with pytest.raises( RuntimeError, - match="Expected type to have a type argument at index 0 but it did not", + match="Expected type to have a type argument at index 0 but it did not", ): extract_response_type(AsyncAPIResponse) @@ -68,7 +68,7 @@ def test_response_parse_mismatched_basemodel(client: GradientAI) -> None: with pytest.raises( TypeError, - match="Pydantic models must subclass our base model type, e.g. `from gradientai import BaseModel`", + match="Pydantic models must subclass our base model type, e.g. `from do_gradientai import BaseModel`", ): response.parse(to=PydanticModel) @@ -86,7 +86,7 @@ async def test_async_response_parse_mismatched_basemodel(async_client: AsyncGrad with pytest.raises( TypeError, - match="Pydantic models must subclass our base model type, e.g. `from gradientai import BaseModel`", + match="Pydantic models must subclass our base model type, e.g. `from do_gradientai import BaseModel`", ): await response.parse(to=PydanticModel) diff --git a/tests/test_streaming.py b/tests/test_streaming.py index cdb41a77..c1ce8e85 100644 --- a/tests/test_streaming.py +++ b/tests/test_streaming.py @@ -5,8 +5,8 @@ import httpx import pytest -from gradientai import GradientAI, AsyncGradientAI -from gradientai._streaming import Stream, AsyncStream, ServerSentEvent +from do_gradientai import GradientAI, AsyncGradientAI +from do_gradientai._streaming import Stream, AsyncStream, ServerSentEvent @pytest.mark.asyncio diff --git a/tests/test_transform.py b/tests/test_transform.py index 825fe048..30c06d6a 100644 --- a/tests/test_transform.py +++ b/tests/test_transform.py @@ -8,15 +8,15 @@ import pytest -from gradientai._types import NOT_GIVEN, Base64FileInput -from gradientai._utils import ( +from do_gradientai._types import NOT_GIVEN, Base64FileInput +from do_gradientai._utils import ( PropertyInfo, transform as _transform, parse_datetime, async_transform as _async_transform, ) -from gradientai._compat import PYDANTIC_V2 -from gradientai._models import BaseModel +from do_gradientai._compat import PYDANTIC_V2 +from do_gradientai._models import BaseModel _T = TypeVar("_T") diff --git a/tests/test_utils/test_proxy.py b/tests/test_utils/test_proxy.py index 3856b2c9..9ce2e0d3 100644 --- a/tests/test_utils/test_proxy.py +++ b/tests/test_utils/test_proxy.py @@ -2,7 +2,7 @@ from typing import Any from typing_extensions import override -from gradientai._utils import LazyProxy +from do_gradientai._utils import LazyProxy class RecursiveLazyProxy(LazyProxy[Any]): diff --git a/tests/test_utils/test_typing.py b/tests/test_utils/test_typing.py index 66ad064f..c9129fdc 100644 --- a/tests/test_utils/test_typing.py +++ b/tests/test_utils/test_typing.py @@ -2,7 +2,7 @@ from typing import Generic, TypeVar, cast -from gradientai._utils import extract_type_var_from_base +from do_gradientai._utils import extract_type_var_from_base _T = TypeVar("_T") _T2 = TypeVar("_T2") diff --git a/tests/utils.py b/tests/utils.py index b539ed2c..9def7c60 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -8,8 +8,8 @@ from datetime import date, datetime from typing_extensions import Literal, get_args, get_origin, assert_type -from gradientai._types import Omit, NoneType -from gradientai._utils import ( +from do_gradientai._types import Omit, NoneType +from do_gradientai._utils import ( is_dict, is_list, is_list_type, @@ -18,8 +18,8 @@ is_annotated_type, is_type_alias_type, ) -from gradientai._compat import PYDANTIC_V2, field_outer_type, get_model_fields -from gradientai._models import BaseModel +from do_gradientai._compat import PYDANTIC_V2, field_outer_type, get_model_fields +from do_gradientai._models import BaseModel BaseModelT = TypeVar("BaseModelT", bound=BaseModel) From d11e78152a253b3b8fa6b8a5ae56714633886292 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 18 Jul 2025 15:29:30 +0000 Subject: [PATCH 19/19] release: 0.1.0-alpha.15 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 27 +++++++++++++++++++++++++++ pyproject.toml | 2 +- src/do_gradientai/_version.py | 2 +- 4 files changed, 30 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index b0699969..08e82c45 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.1.0-alpha.14" + ".": "0.1.0-alpha.15" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 33b1a566..f37f144c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,32 @@ # Changelog +## 0.1.0-alpha.15 (2025-07-18) + +Full Changelog: [v0.1.0-alpha.14...v0.1.0-alpha.15](https://github.com/digitalocean/gradientai-python/compare/v0.1.0-alpha.14...v0.1.0-alpha.15) + +### Features + +* **api:** add gpu droplets ([b207e9a](https://github.com/digitalocean/gradientai-python/commit/b207e9a69ddf821522f5d9e9f10502850220585f)) +* **api:** add gpu droplets ([b9e317b](https://github.com/digitalocean/gradientai-python/commit/b9e317bac2c541a7eafcfb59a4b19c81e1145075)) + + +### Chores + +* format ([d940e66](https://github.com/digitalocean/gradientai-python/commit/d940e66107e00f351853c0bc667ca6ed3cf98605)) +* **internal:** version bump ([1a66126](https://github.com/digitalocean/gradientai-python/commit/1a661264f68580dff74c3f7d4891ab2661fde190)) +* **internal:** version bump ([9c546a1](https://github.com/digitalocean/gradientai-python/commit/9c546a1f97241bb448430e1e43f4e20589e243c1)) +* **internal:** version bump ([8814098](https://github.com/digitalocean/gradientai-python/commit/881409847161671b798baf2c89f37ae29e195f29)) +* **internal:** version bump ([bb3ad60](https://github.com/digitalocean/gradientai-python/commit/bb3ad60d02fe01b937eaced64682fd66d95a9aec)) +* **internal:** version bump ([2022024](https://github.com/digitalocean/gradientai-python/commit/20220246634accf95c4a53df200db5ace7107c55)) +* **internal:** version bump ([52e2c23](https://github.com/digitalocean/gradientai-python/commit/52e2c23c23d4dc27c176ebf4783c8fbd86a4c07b)) +* **internal:** version bump ([8ac0f2a](https://github.com/digitalocean/gradientai-python/commit/8ac0f2a6d4862907243ba78b132373289e2c3543)) +* **internal:** version bump ([d83fe97](https://github.com/digitalocean/gradientai-python/commit/d83fe97aa2f77c84c3c7f4bf40b9fb94c5c28aca)) +* **internal:** version bump ([9d20399](https://github.com/digitalocean/gradientai-python/commit/9d2039919e1d9c9e6d153edfb03bccff18b56686)) +* **internal:** version bump ([44a045a](https://github.com/digitalocean/gradientai-python/commit/44a045a9c0ce0f0769cce66bc7421a9d81cbc645)) +* **internal:** version bump ([95d1dd2](https://github.com/digitalocean/gradientai-python/commit/95d1dd24d290d7d5f23328e4c45c439dca5df748)) +* **internal:** version bump ([7416147](https://github.com/digitalocean/gradientai-python/commit/74161477f98e3a76b7227b07d942e1f26a4612b3)) +* **internal:** version bump ([06d7f19](https://github.com/digitalocean/gradientai-python/commit/06d7f19cd42a6bc578b39709fe6efed8741a24bc)) + ## 0.1.0-alpha.14 (2025-07-17) Full Changelog: [v0.1.0-alpha.13...v0.1.0-alpha.14](https://github.com/digitalocean/gradientai-python/compare/v0.1.0-alpha.13...v0.1.0-alpha.14) diff --git a/pyproject.toml b/pyproject.toml index f5e5770a..23570f42 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "c63a5cfe-b235-4fbe-8bbb-82a9e02a482a-python" -version = "0.1.0-alpha.14" +version = "0.1.0-alpha.15" description = "The official Python library for GradientAI" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/do_gradientai/_version.py b/src/do_gradientai/_version.py index d69cef74..d0c1c939 100644 --- a/src/do_gradientai/_version.py +++ b/src/do_gradientai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "do_gradientai" -__version__ = "0.1.0-alpha.14" # x-release-please-version +__version__ = "0.1.0-alpha.15" # x-release-please-version