From 86b6cd3201853a0a384d5c4fb12c584063fbf1ea Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Wed, 24 Apr 2024 17:23:35 -0700 Subject: [PATCH] Copybara import of the project: MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit -- b0a624659adf506e6929282357a374a60f2cfc05 by Owl Bot : feat: add model_monitor resource and APIs to public v1beta1 client library PiperOrigin-RevId: 624288685 Source-Link: https://github.com/googleapis/googleapis/commit/b5d0197dce43a7c81a38573b5c1efc8da14a27df Source-Link: https://github.com/googleapis/googleapis-gen/commit/aaff3b3875699456f5a48ce557843237d0d93ccf Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiYWFmZjNiMzg3NTY5OTQ1NmY1YTQ4Y2U1NTc4NDMyMzdkMGQ5M2NjZiJ9 -- dec29484add2a8e6c77700ebe7b7a349264e297a by Owl Bot : 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md -- 225a74b57242947bbc53aa025b73ca74a4a159cb by Owl Bot : chore: Update gapic-generator-python to v1.17.0 PiperOrigin-RevId: 626992299 Source-Link: https://github.com/googleapis/googleapis/commit/e495ff587351369637ecee17bfd260d2e76a41f7 Source-Link: https://github.com/googleapis/googleapis-gen/commit/2463c3c27110a92d1fab175109ef94bfe5967168 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiMjQ2M2MzYzI3MTEwYTkyZDFmYWIxNzUxMDllZjk0YmZlNTk2NzE2OCJ9 -- 74fce4a40f6fced7016727cb7cc3b47f14b60641 by Owl Bot : 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md -- 42980b98f64bb7cb0936dee5c8643e402c491546 by Owl Bot : chore: Update gapic-generator-python to v1.17.0 PiperOrigin-RevId: 627075268 Source-Link: https://github.com/googleapis/googleapis/commit/b0a5b9d2b7021525100441756e3914ed3d616cb6 Source-Link: https://github.com/googleapis/googleapis-gen/commit/56b44dca0ceea3ad2afe9ce4a9aeadf9bdf1b445 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiNTZiNDRkY2EwY2VlYTNhZDJhZmU5Y2U0YTlhZWFkZjliZGYxYjQ0NSJ9 -- 0a2bf4011d8a01dddee50c95de06bc6ca46f3bcb by Owl Bot : fix!: delete the deprecated field for model monitor PiperOrigin-RevId: 627075504 Source-Link: https://github.com/googleapis/googleapis/commit/5fa62a929507a36c969aedafa732a5b8ef9a01ca Source-Link: https://github.com/googleapis/googleapis-gen/commit/a557727a7cdc36b0982b7a15099ac2097e0265ae Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiYTU1NzcyN2E3Y2RjMzZiMDk4MmI3YTE1MDk5YWMyMDk3ZTAyNjVhZSJ9 -- 54b304d6a0c4863802b956700958d104b4b16ccf by Owl Bot : 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md COPYBARA_INTEGRATE_REVIEW=https://github.com/googleapis/python-aiplatform/pull/3610 from googleapis:owl-bot-copy ac88c1b1363d0e7e67c0c63baca41fd478c22535 PiperOrigin-RevId: 627899282 --- .../model_monitoring_service.rst | 10 + docs/aiplatform_v1beta1/services_.rst | 1 + google/cloud/aiplatform_v1beta1/__init__.py | 70 + .../aiplatform_v1beta1/gapic_metadata.json | 184 + .../services/migration_service/client.py | 18 +- .../model_monitoring_service/__init__.py | 22 + .../model_monitoring_service/async_client.py | 2392 +++ .../model_monitoring_service/client.py | 2949 ++++ .../model_monitoring_service/pagers.py | 577 + .../transports/__init__.py | 40 + .../transports/base.py | 426 + .../transports/grpc.py | 795 + .../transports/grpc_asyncio.py | 800 + .../transports/rest.py | 6487 ++++++++ .../services/schedule_service/async_client.py | 21 + .../services/schedule_service/client.py | 113 + .../aiplatform_v1beta1/types/__init__.py | 78 + .../aiplatform_v1beta1/types/model_monitor.py | 295 + .../types/model_monitoring_alert.py | 170 + .../types/model_monitoring_job.py | 193 + .../types/model_monitoring_service.py | 573 + .../types/model_monitoring_spec.py | 600 + .../types/model_monitoring_stats.py | 270 + .../aiplatform_v1beta1/types/schedule.py | 16 + ...ring_service_create_model_monitor_async.py | 56 + ...oring_service_create_model_monitor_sync.py | 56 + ...rvice_create_model_monitoring_job_async.py | 52 + ...ervice_create_model_monitoring_job_sync.py | 52 + ...ring_service_delete_model_monitor_async.py | 56 + ...oring_service_delete_model_monitor_sync.py | 56 + ...rvice_delete_model_monitoring_job_async.py | 56 + ...ervice_delete_model_monitoring_job_sync.py | 56 + ...itoring_service_get_model_monitor_async.py | 52 + ...nitoring_service_get_model_monitor_sync.py | 52 + ..._service_get_model_monitoring_job_async.py | 52 + ...g_service_get_model_monitoring_job_sync.py | 52 + ...ervice_list_model_monitoring_jobs_async.py | 53 + ...service_list_model_monitoring_jobs_sync.py | 53 + ...oring_service_list_model_monitors_async.py | 53 + ...toring_service_list_model_monitors_sync.py | 53 + ...ce_search_model_monitoring_alerts_async.py | 53 + ...ice_search_model_monitoring_alerts_sync.py | 53 + ...ice_search_model_monitoring_stats_async.py | 53 + ...vice_search_model_monitoring_stats_sync.py | 53 + ...ring_service_update_model_monitor_async.py | 55 + ...oring_service_update_model_monitor_sync.py | 55 + ...adata_google.cloud.aiplatform.v1beta1.json | 1795 +++ .../test_migration_service.py | 26 +- .../test_model_monitoring_service.py | 12620 ++++++++++++++++ .../test_schedule_service.py | 532 +- 50 files changed, 33185 insertions(+), 70 deletions(-) create mode 100644 docs/aiplatform_v1beta1/model_monitoring_service.rst create mode 100644 google/cloud/aiplatform_v1beta1/services/model_monitoring_service/__init__.py create mode 100644 google/cloud/aiplatform_v1beta1/services/model_monitoring_service/async_client.py create mode 100644 google/cloud/aiplatform_v1beta1/services/model_monitoring_service/client.py create mode 100644 google/cloud/aiplatform_v1beta1/services/model_monitoring_service/pagers.py create mode 100644 google/cloud/aiplatform_v1beta1/services/model_monitoring_service/transports/__init__.py create mode 100644 google/cloud/aiplatform_v1beta1/services/model_monitoring_service/transports/base.py create mode 100644 google/cloud/aiplatform_v1beta1/services/model_monitoring_service/transports/grpc.py create mode 100644 google/cloud/aiplatform_v1beta1/services/model_monitoring_service/transports/grpc_asyncio.py create mode 100644 google/cloud/aiplatform_v1beta1/services/model_monitoring_service/transports/rest.py create mode 100644 google/cloud/aiplatform_v1beta1/types/model_monitor.py create mode 100644 google/cloud/aiplatform_v1beta1/types/model_monitoring_alert.py create mode 100644 google/cloud/aiplatform_v1beta1/types/model_monitoring_job.py create mode 100644 google/cloud/aiplatform_v1beta1/types/model_monitoring_service.py create mode 100644 google/cloud/aiplatform_v1beta1/types/model_monitoring_spec.py create mode 100644 google/cloud/aiplatform_v1beta1/types/model_monitoring_stats.py create mode 100644 samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_create_model_monitor_async.py create mode 100644 samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_create_model_monitor_sync.py create mode 100644 samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_create_model_monitoring_job_async.py create mode 100644 samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_create_model_monitoring_job_sync.py create mode 100644 samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_delete_model_monitor_async.py create mode 100644 samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_delete_model_monitor_sync.py create mode 100644 samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_delete_model_monitoring_job_async.py create mode 100644 samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_delete_model_monitoring_job_sync.py create mode 100644 samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_get_model_monitor_async.py create mode 100644 samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_get_model_monitor_sync.py create mode 100644 samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_get_model_monitoring_job_async.py create mode 100644 samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_get_model_monitoring_job_sync.py create mode 100644 samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_list_model_monitoring_jobs_async.py create mode 100644 samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_list_model_monitoring_jobs_sync.py create mode 100644 samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_list_model_monitors_async.py create mode 100644 samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_list_model_monitors_sync.py create mode 100644 samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_search_model_monitoring_alerts_async.py create mode 100644 samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_search_model_monitoring_alerts_sync.py create mode 100644 samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_search_model_monitoring_stats_async.py create mode 100644 samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_search_model_monitoring_stats_sync.py create mode 100644 samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_update_model_monitor_async.py create mode 100644 samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_update_model_monitor_sync.py create mode 100644 tests/unit/gapic/aiplatform_v1beta1/test_model_monitoring_service.py diff --git a/docs/aiplatform_v1beta1/model_monitoring_service.rst b/docs/aiplatform_v1beta1/model_monitoring_service.rst new file mode 100644 index 0000000000..7516ff3bbe --- /dev/null +++ b/docs/aiplatform_v1beta1/model_monitoring_service.rst @@ -0,0 +1,10 @@ +ModelMonitoringService +---------------------------------------- + +.. automodule:: google.cloud.aiplatform_v1beta1.services.model_monitoring_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1beta1.services.model_monitoring_service.pagers + :members: + :inherited-members: diff --git a/docs/aiplatform_v1beta1/services_.rst b/docs/aiplatform_v1beta1/services_.rst index bad04de935..693f944014 100644 --- a/docs/aiplatform_v1beta1/services_.rst +++ b/docs/aiplatform_v1beta1/services_.rst @@ -22,6 +22,7 @@ Services for Google Cloud Aiplatform v1beta1 API metadata_service migration_service model_garden_service + model_monitoring_service model_service notebook_service persistent_resource_service diff --git a/google/cloud/aiplatform_v1beta1/__init__.py b/google/cloud/aiplatform_v1beta1/__init__.py index 83996e56bb..784c2ff027 100644 --- a/google/cloud/aiplatform_v1beta1/__init__.py +++ b/google/cloud/aiplatform_v1beta1/__init__.py @@ -68,6 +68,8 @@ from .services.migration_service import MigrationServiceAsyncClient from .services.model_garden_service import ModelGardenServiceClient from .services.model_garden_service import ModelGardenServiceAsyncClient +from .services.model_monitoring_service import ModelMonitoringServiceClient +from .services.model_monitoring_service import ModelMonitoringServiceAsyncClient from .services.model_service import ModelServiceClient from .services.model_service import ModelServiceAsyncClient from .services.notebook_service import NotebookServiceClient @@ -650,11 +652,44 @@ from .types.model_garden_service import ListPublisherModelsRequest from .types.model_garden_service import ListPublisherModelsResponse from .types.model_garden_service import PublisherModelView +from .types.model_monitor import ModelMonitor +from .types.model_monitor import ModelMonitoringSchema from .types.model_monitoring import ModelMonitoringAlertConfig from .types.model_monitoring import ModelMonitoringConfig from .types.model_monitoring import ModelMonitoringObjectiveConfig from .types.model_monitoring import SamplingStrategy from .types.model_monitoring import ThresholdConfig +from .types.model_monitoring_alert import ModelMonitoringAlert +from .types.model_monitoring_alert import ModelMonitoringAlertCondition +from .types.model_monitoring_alert import ModelMonitoringAnomaly +from .types.model_monitoring_job import ModelMonitoringJob +from .types.model_monitoring_job import ModelMonitoringJobExecutionDetail +from .types.model_monitoring_service import CreateModelMonitoringJobRequest +from .types.model_monitoring_service import CreateModelMonitorOperationMetadata +from .types.model_monitoring_service import CreateModelMonitorRequest +from .types.model_monitoring_service import DeleteModelMonitoringJobRequest +from .types.model_monitoring_service import DeleteModelMonitorRequest +from .types.model_monitoring_service import GetModelMonitoringJobRequest +from .types.model_monitoring_service import GetModelMonitorRequest +from .types.model_monitoring_service import ListModelMonitoringJobsRequest +from .types.model_monitoring_service import ListModelMonitoringJobsResponse +from .types.model_monitoring_service import ListModelMonitorsRequest +from .types.model_monitoring_service import ListModelMonitorsResponse +from .types.model_monitoring_service import SearchModelMonitoringAlertsRequest +from .types.model_monitoring_service import SearchModelMonitoringAlertsResponse +from .types.model_monitoring_service import SearchModelMonitoringStatsRequest +from .types.model_monitoring_service import SearchModelMonitoringStatsResponse +from .types.model_monitoring_service import UpdateModelMonitorOperationMetadata +from .types.model_monitoring_service import UpdateModelMonitorRequest +from .types.model_monitoring_spec import ModelMonitoringInput +from .types.model_monitoring_spec import ModelMonitoringNotificationSpec +from .types.model_monitoring_spec import ModelMonitoringObjectiveSpec +from .types.model_monitoring_spec import ModelMonitoringOutputSpec +from .types.model_monitoring_spec import ModelMonitoringSpec +from .types.model_monitoring_stats import ModelMonitoringStats +from .types.model_monitoring_stats import ModelMonitoringStatsDataPoint +from .types.model_monitoring_stats import ModelMonitoringTabularStats +from .types.model_monitoring_stats import SearchModelMonitoringStatsFilter from .types.model_service import BatchImportEvaluatedAnnotationsRequest from .types.model_service import BatchImportEvaluatedAnnotationsResponse from .types.model_service import BatchImportModelEvaluationSlicesRequest @@ -971,6 +1006,7 @@ "MetadataServiceAsyncClient", "MigrationServiceAsyncClient", "ModelGardenServiceAsyncClient", + "ModelMonitoringServiceAsyncClient", "ModelServiceAsyncClient", "NotebookServiceAsyncClient", "PersistentResourceServiceAsyncClient", @@ -1106,6 +1142,9 @@ "CreateMetadataStoreOperationMetadata", "CreateMetadataStoreRequest", "CreateModelDeploymentMonitoringJobRequest", + "CreateModelMonitorOperationMetadata", + "CreateModelMonitorRequest", + "CreateModelMonitoringJobRequest", "CreateNasJobRequest", "CreateNotebookRuntimeTemplateOperationMetadata", "CreateNotebookRuntimeTemplateRequest", @@ -1165,6 +1204,8 @@ "DeleteMetadataStoreOperationMetadata", "DeleteMetadataStoreRequest", "DeleteModelDeploymentMonitoringJobRequest", + "DeleteModelMonitorRequest", + "DeleteModelMonitoringJobRequest", "DeleteModelRequest", "DeleteModelVersionRequest", "DeleteNasJobRequest", @@ -1331,6 +1372,8 @@ "GetModelDeploymentMonitoringJobRequest", "GetModelEvaluationRequest", "GetModelEvaluationSliceRequest", + "GetModelMonitorRequest", + "GetModelMonitoringJobRequest", "GetModelRequest", "GetNasJobRequest", "GetNasTrialDetailRequest", @@ -1447,6 +1490,10 @@ "ListModelEvaluationSlicesResponse", "ListModelEvaluationsRequest", "ListModelEvaluationsResponse", + "ListModelMonitoringJobsRequest", + "ListModelMonitoringJobsResponse", + "ListModelMonitorsRequest", + "ListModelMonitorsResponse", "ListModelVersionsRequest", "ListModelVersionsResponse", "ListModelsRequest", @@ -1519,10 +1566,26 @@ "ModelExplanation", "ModelGardenServiceClient", "ModelGardenSource", + "ModelMonitor", + "ModelMonitoringAlert", + "ModelMonitoringAlertCondition", "ModelMonitoringAlertConfig", + "ModelMonitoringAnomaly", "ModelMonitoringConfig", + "ModelMonitoringInput", + "ModelMonitoringJob", + "ModelMonitoringJobExecutionDetail", + "ModelMonitoringNotificationSpec", "ModelMonitoringObjectiveConfig", + "ModelMonitoringObjectiveSpec", + "ModelMonitoringOutputSpec", + "ModelMonitoringSchema", + "ModelMonitoringServiceClient", + "ModelMonitoringSpec", + "ModelMonitoringStats", "ModelMonitoringStatsAnomalies", + "ModelMonitoringStatsDataPoint", + "ModelMonitoringTabularStats", "ModelServiceClient", "ModelSourceInfo", "MutateDeployedIndexOperationMetadata", @@ -1691,6 +1754,11 @@ "SearchMigratableResourcesResponse", "SearchModelDeploymentMonitoringStatsAnomaliesRequest", "SearchModelDeploymentMonitoringStatsAnomaliesResponse", + "SearchModelMonitoringAlertsRequest", + "SearchModelMonitoringAlertsResponse", + "SearchModelMonitoringStatsFilter", + "SearchModelMonitoringStatsRequest", + "SearchModelMonitoringStatsResponse", "SearchNearestEntitiesRequest", "SearchNearestEntitiesResponse", "Segment", @@ -1812,6 +1880,8 @@ "UpdateIndexRequest", "UpdateModelDeploymentMonitoringJobOperationMetadata", "UpdateModelDeploymentMonitoringJobRequest", + "UpdateModelMonitorOperationMetadata", + "UpdateModelMonitorRequest", "UpdateModelRequest", "UpdatePersistentResourceOperationMetadata", "UpdatePersistentResourceRequest", diff --git a/google/cloud/aiplatform_v1beta1/gapic_metadata.json b/google/cloud/aiplatform_v1beta1/gapic_metadata.json index a659d266cc..6ebc20fc5d 100644 --- a/google/cloud/aiplatform_v1beta1/gapic_metadata.json +++ b/google/cloud/aiplatform_v1beta1/gapic_metadata.json @@ -3036,6 +3036,190 @@ } } }, + "ModelMonitoringService": { + "clients": { + "grpc": { + "libraryClient": "ModelMonitoringServiceClient", + "rpcs": { + "CreateModelMonitor": { + "methods": [ + "create_model_monitor" + ] + }, + "CreateModelMonitoringJob": { + "methods": [ + "create_model_monitoring_job" + ] + }, + "DeleteModelMonitor": { + "methods": [ + "delete_model_monitor" + ] + }, + "DeleteModelMonitoringJob": { + "methods": [ + "delete_model_monitoring_job" + ] + }, + "GetModelMonitor": { + "methods": [ + "get_model_monitor" + ] + }, + "GetModelMonitoringJob": { + "methods": [ + "get_model_monitoring_job" + ] + }, + "ListModelMonitoringJobs": { + "methods": [ + "list_model_monitoring_jobs" + ] + }, + "ListModelMonitors": { + "methods": [ + "list_model_monitors" + ] + }, + "SearchModelMonitoringAlerts": { + "methods": [ + "search_model_monitoring_alerts" + ] + }, + "SearchModelMonitoringStats": { + "methods": [ + "search_model_monitoring_stats" + ] + }, + "UpdateModelMonitor": { + "methods": [ + "update_model_monitor" + ] + } + } + }, + "grpc-async": { + "libraryClient": "ModelMonitoringServiceAsyncClient", + "rpcs": { + "CreateModelMonitor": { + "methods": [ + "create_model_monitor" + ] + }, + "CreateModelMonitoringJob": { + "methods": [ + "create_model_monitoring_job" + ] + }, + "DeleteModelMonitor": { + "methods": [ + "delete_model_monitor" + ] + }, + "DeleteModelMonitoringJob": { + "methods": [ + "delete_model_monitoring_job" + ] + }, + "GetModelMonitor": { + "methods": [ + "get_model_monitor" + ] + }, + "GetModelMonitoringJob": { + "methods": [ + "get_model_monitoring_job" + ] + }, + "ListModelMonitoringJobs": { + "methods": [ + "list_model_monitoring_jobs" + ] + }, + "ListModelMonitors": { + "methods": [ + "list_model_monitors" + ] + }, + "SearchModelMonitoringAlerts": { + "methods": [ + "search_model_monitoring_alerts" + ] + }, + "SearchModelMonitoringStats": { + "methods": [ + "search_model_monitoring_stats" + ] + }, + "UpdateModelMonitor": { + "methods": [ + "update_model_monitor" + ] + } + } + }, + "rest": { + "libraryClient": "ModelMonitoringServiceClient", + "rpcs": { + "CreateModelMonitor": { + "methods": [ + "create_model_monitor" + ] + }, + "CreateModelMonitoringJob": { + "methods": [ + "create_model_monitoring_job" + ] + }, + "DeleteModelMonitor": { + "methods": [ + "delete_model_monitor" + ] + }, + "DeleteModelMonitoringJob": { + "methods": [ + "delete_model_monitoring_job" + ] + }, + "GetModelMonitor": { + "methods": [ + "get_model_monitor" + ] + }, + "GetModelMonitoringJob": { + "methods": [ + "get_model_monitoring_job" + ] + }, + "ListModelMonitoringJobs": { + "methods": [ + "list_model_monitoring_jobs" + ] + }, + "ListModelMonitors": { + "methods": [ + "list_model_monitors" + ] + }, + "SearchModelMonitoringAlerts": { + "methods": [ + "search_model_monitoring_alerts" + ] + }, + "SearchModelMonitoringStats": { + "methods": [ + "search_model_monitoring_stats" + ] + }, + "UpdateModelMonitor": { + "methods": [ + "update_model_monitor" + ] + } + } + } + } + }, "ModelService": { "clients": { "grpc": { diff --git a/google/cloud/aiplatform_v1beta1/services/migration_service/client.py b/google/cloud/aiplatform_v1beta1/services/migration_service/client.py index e1e1aa128d..299c8b5e65 100644 --- a/google/cloud/aiplatform_v1beta1/services/migration_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/migration_service/client.py @@ -215,40 +215,40 @@ def parse_annotated_dataset_path(path: str) -> Dict[str, str]: @staticmethod def dataset_path( project: str, - location: str, dataset: str, ) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}".format( + return "projects/{project}/datasets/{dataset}".format( project=project, - location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match( - r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", - path, - ) + m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) return m.groupdict() if m else {} @staticmethod def dataset_path( project: str, + location: str, dataset: str, ) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/datasets/{dataset}".format( + return "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, + location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", + path, + ) return m.groupdict() if m else {} @staticmethod diff --git a/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/__init__.py b/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/__init__.py new file mode 100644 index 0000000000..3b1a73ddda --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import ModelMonitoringServiceClient +from .async_client import ModelMonitoringServiceAsyncClient + +__all__ = ( + "ModelMonitoringServiceClient", + "ModelMonitoringServiceAsyncClient", +) diff --git a/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/async_client.py new file mode 100644 index 0000000000..2beca43a6c --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/async_client.py @@ -0,0 +1,2392 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, +) + +from google.cloud.aiplatform_v1beta1 import gapic_version as package_version + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry_async as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.model_monitoring_service import pagers +from google.cloud.aiplatform_v1beta1.types import explanation +from google.cloud.aiplatform_v1beta1.types import job_state +from google.cloud.aiplatform_v1beta1.types import model_monitor +from google.cloud.aiplatform_v1beta1.types import model_monitor as gca_model_monitor +from google.cloud.aiplatform_v1beta1.types import model_monitoring_alert +from google.cloud.aiplatform_v1beta1.types import model_monitoring_job +from google.cloud.aiplatform_v1beta1.types import ( + model_monitoring_job as gca_model_monitoring_job, +) +from google.cloud.aiplatform_v1beta1.types import model_monitoring_service +from google.cloud.aiplatform_v1beta1.types import model_monitoring_spec +from google.cloud.aiplatform_v1beta1.types import model_monitoring_stats +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import ModelMonitoringServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import ModelMonitoringServiceGrpcAsyncIOTransport +from .client import ModelMonitoringServiceClient + + +class ModelMonitoringServiceAsyncClient: + """A service for creating and managing Vertex AI Model moitoring. This + includes ``ModelMonitor`` resources, ``ModelMonitoringJob`` + resources. + """ + + _client: ModelMonitoringServiceClient + + # Copy defaults from the synchronous client for use here. + # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. + DEFAULT_ENDPOINT = ModelMonitoringServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = ModelMonitoringServiceClient.DEFAULT_MTLS_ENDPOINT + _DEFAULT_ENDPOINT_TEMPLATE = ModelMonitoringServiceClient._DEFAULT_ENDPOINT_TEMPLATE + _DEFAULT_UNIVERSE = ModelMonitoringServiceClient._DEFAULT_UNIVERSE + + batch_prediction_job_path = staticmethod( + ModelMonitoringServiceClient.batch_prediction_job_path + ) + parse_batch_prediction_job_path = staticmethod( + ModelMonitoringServiceClient.parse_batch_prediction_job_path + ) + dataset_path = staticmethod(ModelMonitoringServiceClient.dataset_path) + parse_dataset_path = staticmethod(ModelMonitoringServiceClient.parse_dataset_path) + endpoint_path = staticmethod(ModelMonitoringServiceClient.endpoint_path) + parse_endpoint_path = staticmethod(ModelMonitoringServiceClient.parse_endpoint_path) + model_path = staticmethod(ModelMonitoringServiceClient.model_path) + parse_model_path = staticmethod(ModelMonitoringServiceClient.parse_model_path) + model_monitor_path = staticmethod(ModelMonitoringServiceClient.model_monitor_path) + parse_model_monitor_path = staticmethod( + ModelMonitoringServiceClient.parse_model_monitor_path + ) + model_monitoring_job_path = staticmethod( + ModelMonitoringServiceClient.model_monitoring_job_path + ) + parse_model_monitoring_job_path = staticmethod( + ModelMonitoringServiceClient.parse_model_monitoring_job_path + ) + schedule_path = staticmethod(ModelMonitoringServiceClient.schedule_path) + parse_schedule_path = staticmethod(ModelMonitoringServiceClient.parse_schedule_path) + common_billing_account_path = staticmethod( + ModelMonitoringServiceClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + ModelMonitoringServiceClient.parse_common_billing_account_path + ) + common_folder_path = staticmethod(ModelMonitoringServiceClient.common_folder_path) + parse_common_folder_path = staticmethod( + ModelMonitoringServiceClient.parse_common_folder_path + ) + common_organization_path = staticmethod( + ModelMonitoringServiceClient.common_organization_path + ) + parse_common_organization_path = staticmethod( + ModelMonitoringServiceClient.parse_common_organization_path + ) + common_project_path = staticmethod(ModelMonitoringServiceClient.common_project_path) + parse_common_project_path = staticmethod( + ModelMonitoringServiceClient.parse_common_project_path + ) + common_location_path = staticmethod( + ModelMonitoringServiceClient.common_location_path + ) + parse_common_location_path = staticmethod( + ModelMonitoringServiceClient.parse_common_location_path + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ModelMonitoringServiceAsyncClient: The constructed client. + """ + return ModelMonitoringServiceClient.from_service_account_info.__func__(ModelMonitoringServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ModelMonitoringServiceAsyncClient: The constructed client. + """ + return ModelMonitoringServiceClient.from_service_account_file.__func__(ModelMonitoringServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[ClientOptions] = None + ): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return ModelMonitoringServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> ModelMonitoringServiceTransport: + """Returns the transport used by the client instance. + + Returns: + ModelMonitoringServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + @property + def api_endpoint(self): + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._client._api_endpoint + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used + by the client instance. + """ + return self._client._universe_domain + + get_transport_class = functools.partial( + type(ModelMonitoringServiceClient).get_transport_class, + type(ModelMonitoringServiceClient), + ) + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, ModelMonitoringServiceTransport] = "grpc_asyncio", + client_options: Optional[ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the model monitoring service async client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.ModelMonitoringServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + NOTE: "rest" transport functionality is currently in a + beta state (preview). We welcome your feedback via an + issue in this library's source repository. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide a client certificate for mTLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = ModelMonitoringServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def create_model_monitor( + self, + request: Optional[ + Union[model_monitoring_service.CreateModelMonitorRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + model_monitor: Optional[gca_model_monitor.ModelMonitor] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a ModelMonitor. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + async def sample_create_model_monitor(): + # Create a client + client = aiplatform_v1beta1.ModelMonitoringServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateModelMonitorRequest( + parent="parent_value", + ) + + # Make the request + operation = client.create_model_monitor(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1beta1.types.CreateModelMonitorRequest, dict]]): + The request object. Request message for + [ModelMonitoringService.CreateModelMonitor][google.cloud.aiplatform.v1beta1.ModelMonitoringService.CreateModelMonitor]. + parent (:class:`str`): + Required. The resource name of the Location to create + the ModelMonitor in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + model_monitor (:class:`google.cloud.aiplatform_v1beta1.types.ModelMonitor`): + Required. The ModelMonitor to create. + This corresponds to the ``model_monitor`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.ModelMonitor` Vertex AI Model Monitoring Service serves as a central hub for the analysis + and visualization of data quality and performance + related to models. ModelMonitor stands as a top level + resource for overseeing your model monitoring tasks. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, model_monitor]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = model_monitoring_service.CreateModelMonitorRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if model_monitor is not None: + request.model_monitor = model_monitor + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_model_monitor, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_model_monitor.ModelMonitor, + metadata_type=model_monitoring_service.CreateModelMonitorOperationMetadata, + ) + + # Done; return the response. + return response + + async def update_model_monitor( + self, + request: Optional[ + Union[model_monitoring_service.UpdateModelMonitorRequest, dict] + ] = None, + *, + model_monitor: Optional[gca_model_monitor.ModelMonitor] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Updates a ModelMonitor. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + async def sample_update_model_monitor(): + # Create a client + client = aiplatform_v1beta1.ModelMonitoringServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UpdateModelMonitorRequest( + ) + + # Make the request + operation = client.update_model_monitor(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1beta1.types.UpdateModelMonitorRequest, dict]]): + The request object. Request message for + [ModelMonitoringService.UpdateModelMonitor][google.cloud.aiplatform.v1beta1.ModelMonitoringService.UpdateModelMonitor]. + model_monitor (:class:`google.cloud.aiplatform_v1beta1.types.ModelMonitor`): + Required. The model monitoring + configuration which replaces the + resource on the server. + + This corresponds to the ``model_monitor`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. Mask specifying which + fields to update. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.ModelMonitor` Vertex AI Model Monitoring Service serves as a central hub for the analysis + and visualization of data quality and performance + related to models. ModelMonitor stands as a top level + resource for overseeing your model monitoring tasks. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model_monitor, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = model_monitoring_service.UpdateModelMonitorRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model_monitor is not None: + request.model_monitor = model_monitor + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_model_monitor, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("model_monitor.name", request.model_monitor.name),) + ), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_model_monitor.ModelMonitor, + metadata_type=model_monitoring_service.UpdateModelMonitorOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_model_monitor( + self, + request: Optional[ + Union[model_monitoring_service.GetModelMonitorRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model_monitor.ModelMonitor: + r"""Gets a ModelMonitor. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + async def sample_get_model_monitor(): + # Create a client + client = aiplatform_v1beta1.ModelMonitoringServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetModelMonitorRequest( + name="name_value", + ) + + # Make the request + response = await client.get_model_monitor(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1beta1.types.GetModelMonitorRequest, dict]]): + The request object. Request message for + [ModelMonitoringService.GetModelMonitor][google.cloud.aiplatform.v1beta1.ModelMonitoringService.GetModelMonitor]. + name (:class:`str`): + Required. The name of the ModelMonitor resource. Format: + ``projects/{project}/locations/{location}/modelMonitors/{model_monitor}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ModelMonitor: + Vertex AI Model Monitoring Service + serves as a central hub for the analysis + and visualization of data quality and + performance related to models. + ModelMonitor stands as a top level + resource for overseeing your model + monitoring tasks. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = model_monitoring_service.GetModelMonitorRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_model_monitor, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_model_monitors( + self, + request: Optional[ + Union[model_monitoring_service.ListModelMonitorsRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelMonitorsAsyncPager: + r"""Lists ModelMonitors in a Location. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + async def sample_list_model_monitors(): + # Create a client + client = aiplatform_v1beta1.ModelMonitoringServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListModelMonitorsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_model_monitors(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1beta1.types.ListModelMonitorsRequest, dict]]): + The request object. Request message for + [ModelMonitoringService.ListModelMonitors][google.cloud.aiplatform.v1beta1.ModelMonitoringService.ListModelMonitors]. + parent (:class:`str`): + Required. The resource name of the Location to list the + ModelMonitors from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.model_monitoring_service.pagers.ListModelMonitorsAsyncPager: + Response message for + [ModelMonitoringService.ListModelMonitors][google.cloud.aiplatform.v1beta1.ModelMonitoringService.ListModelMonitors] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = model_monitoring_service.ListModelMonitorsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_model_monitors, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListModelMonitorsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_model_monitor( + self, + request: Optional[ + Union[model_monitoring_service.DeleteModelMonitorRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a ModelMonitor. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + async def sample_delete_model_monitor(): + # Create a client + client = aiplatform_v1beta1.ModelMonitoringServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteModelMonitorRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_model_monitor(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1beta1.types.DeleteModelMonitorRequest, dict]]): + The request object. Request message for + [ModelMonitoringService.DeleteModelMonitor][google.cloud.aiplatform.v1beta1.ModelMonitoringService.DeleteModelMonitor]. + name (:class:`str`): + Required. The name of the ModelMonitor resource to be + deleted. Format: + ``projects/{project}/locations/{location}/modelMonitords/{model_monitor}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = model_monitoring_service.DeleteModelMonitorRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_model_monitor, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def create_model_monitoring_job( + self, + request: Optional[ + Union[model_monitoring_service.CreateModelMonitoringJobRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + model_monitoring_job: Optional[ + gca_model_monitoring_job.ModelMonitoringJob + ] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_model_monitoring_job.ModelMonitoringJob: + r"""Creates a ModelMonitoringJob. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + async def sample_create_model_monitoring_job(): + # Create a client + client = aiplatform_v1beta1.ModelMonitoringServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateModelMonitoringJobRequest( + parent="parent_value", + ) + + # Make the request + response = await client.create_model_monitoring_job(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1beta1.types.CreateModelMonitoringJobRequest, dict]]): + The request object. Request message for + [ModelMonitoringService.CreateModelMonitoringJob][google.cloud.aiplatform.v1beta1.ModelMonitoringService.CreateModelMonitoringJob]. + parent (:class:`str`): + Required. The parent of the ModelMonitoringJob. Format: + ``projects/{project}/locations/{location}/modelMoniitors/{model_monitor}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + model_monitoring_job (:class:`google.cloud.aiplatform_v1beta1.types.ModelMonitoringJob`): + Required. The ModelMonitoringJob to + create + + This corresponds to the ``model_monitoring_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ModelMonitoringJob: + Represents a model monitoring job + that analyze dataset using different + monitoring algorithm. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, model_monitoring_job]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = model_monitoring_service.CreateModelMonitoringJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if model_monitoring_job is not None: + request.model_monitoring_job = model_monitoring_job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_model_monitoring_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_model_monitoring_job( + self, + request: Optional[ + Union[model_monitoring_service.GetModelMonitoringJobRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model_monitoring_job.ModelMonitoringJob: + r"""Gets a ModelMonitoringJob. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + async def sample_get_model_monitoring_job(): + # Create a client + client = aiplatform_v1beta1.ModelMonitoringServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetModelMonitoringJobRequest( + name="name_value", + ) + + # Make the request + response = await client.get_model_monitoring_job(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1beta1.types.GetModelMonitoringJobRequest, dict]]): + The request object. Request message for + [ModelMonitoringService.GetModelMonitoringJob][google.cloud.aiplatform.v1beta1.ModelMonitoringService.GetModelMonitoringJob]. + name (:class:`str`): + Required. The resource name of the ModelMonitoringJob. + Format: + ``projects/{project}/locations/{location}/modelMonitors/{model_monitor}/modelMonitoringJobs/{model_monitoring_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ModelMonitoringJob: + Represents a model monitoring job + that analyze dataset using different + monitoring algorithm. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = model_monitoring_service.GetModelMonitoringJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_model_monitoring_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_model_monitoring_jobs( + self, + request: Optional[ + Union[model_monitoring_service.ListModelMonitoringJobsRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelMonitoringJobsAsyncPager: + r"""Lists ModelMonitoringJobs. Callers may choose to read across + multiple Monitors as per + `AIP-159 `__ by using '-' (the + hyphen or dash character) as a wildcard character instead of + modelMonitor id in the parent. Format + ``projects/{project_id}/locations/{location}/moodelMonitors/-/modelMonitoringJobs`` + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + async def sample_list_model_monitoring_jobs(): + # Create a client + client = aiplatform_v1beta1.ModelMonitoringServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListModelMonitoringJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_model_monitoring_jobs(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1beta1.types.ListModelMonitoringJobsRequest, dict]]): + The request object. Request message for + [ModelMonitoringService.ListModelMonitoringJobs][google.cloud.aiplatform.v1beta1.ModelMonitoringService.ListModelMonitoringJobs]. + parent (:class:`str`): + Required. The parent of the ModelMonitoringJob. Format: + ``projects/{project}/locations/{location}/modelMonitors/{model_monitor}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.model_monitoring_service.pagers.ListModelMonitoringJobsAsyncPager: + Response message for + [ModelMonitoringService.ListModelMonitoringJobs][google.cloud.aiplatform.v1beta1.ModelMonitoringService.ListModelMonitoringJobs]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = model_monitoring_service.ListModelMonitoringJobsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_model_monitoring_jobs, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListModelMonitoringJobsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_model_monitoring_job( + self, + request: Optional[ + Union[model_monitoring_service.DeleteModelMonitoringJobRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a ModelMonitoringJob. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + async def sample_delete_model_monitoring_job(): + # Create a client + client = aiplatform_v1beta1.ModelMonitoringServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteModelMonitoringJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_model_monitoring_job(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1beta1.types.DeleteModelMonitoringJobRequest, dict]]): + The request object. Request message for + [ModelMonitoringService.DeleteModelMonitoringJob][google.cloud.aiplatform.v1beta1.ModelMonitoringService.DeleteModelMonitoringJob]. + name (:class:`str`): + Required. The resource name of the model monitoring job + to delete. Format: + ``projects/{project}/locations/{location}/modelMonitors/{model_monitor}/modelMonitoringJobs/{model_monitoring_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = model_monitoring_service.DeleteModelMonitoringJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_model_monitoring_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def search_model_monitoring_stats( + self, + request: Optional[ + Union[model_monitoring_service.SearchModelMonitoringStatsRequest, dict] + ] = None, + *, + model_monitor: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.SearchModelMonitoringStatsAsyncPager: + r"""Searches Model Monitoring Stats generated within a + given time window. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + async def sample_search_model_monitoring_stats(): + # Create a client + client = aiplatform_v1beta1.ModelMonitoringServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.SearchModelMonitoringStatsRequest( + model_monitor="model_monitor_value", + ) + + # Make the request + page_result = client.search_model_monitoring_stats(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1beta1.types.SearchModelMonitoringStatsRequest, dict]]): + The request object. Request message for + [ModelMonitoringService.SearchModelMonitoringStats][google.cloud.aiplatform.v1beta1.ModelMonitoringService.SearchModelMonitoringStats]. + model_monitor (:class:`str`): + Required. ModelMonitor resource name. Format: + ``projects/{project}/locations/{location}/modelMonitors/{model_monitor}`` + + This corresponds to the ``model_monitor`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.model_monitoring_service.pagers.SearchModelMonitoringStatsAsyncPager: + Response message for + [ModelMonitoringService.SearchModelMonitoringStats][google.cloud.aiplatform.v1beta1.ModelMonitoringService.SearchModelMonitoringStats]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model_monitor]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = model_monitoring_service.SearchModelMonitoringStatsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model_monitor is not None: + request.model_monitor = model_monitor + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.search_model_monitoring_stats, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("model_monitor", request.model_monitor),) + ), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.SearchModelMonitoringStatsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def search_model_monitoring_alerts( + self, + request: Optional[ + Union[model_monitoring_service.SearchModelMonitoringAlertsRequest, dict] + ] = None, + *, + model_monitor: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.SearchModelMonitoringAlertsAsyncPager: + r"""Returns the Model Monitoring alerts. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + async def sample_search_model_monitoring_alerts(): + # Create a client + client = aiplatform_v1beta1.ModelMonitoringServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.SearchModelMonitoringAlertsRequest( + model_monitor="model_monitor_value", + ) + + # Make the request + page_result = client.search_model_monitoring_alerts(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1beta1.types.SearchModelMonitoringAlertsRequest, dict]]): + The request object. Request message for + [ModelMonitoringService.SearchModelMonitoringAlerts][google.cloud.aiplatform.v1beta1.ModelMonitoringService.SearchModelMonitoringAlerts]. + model_monitor (:class:`str`): + Required. ModelMonitor resource name. Format: + ``projects/{project}/locations/{location}/modelMonitors/{model_monitor}`` + + This corresponds to the ``model_monitor`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.model_monitoring_service.pagers.SearchModelMonitoringAlertsAsyncPager: + Response message for + [ModelMonitoringService.SearchModelMonitoringAlerts][google.cloud.aiplatform.v1beta1.ModelMonitoringService.SearchModelMonitoringAlerts]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model_monitor]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = model_monitoring_service.SearchModelMonitoringAlertsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model_monitor is not None: + request.model_monitor = model_monitor + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.search_model_monitoring_alerts, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("model_monitor", request.model_monitor),) + ), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.SearchModelMonitoringAlertsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_operations( + self, + request: Optional[operations_pb2.ListOperationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_operation( + self, + request: Optional[operations_pb2.GetOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_operation( + self, + request: Optional[operations_pb2.DeleteOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def cancel_operation( + self, + request: Optional[operations_pb2.CancelOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def wait_operation( + self, + request: Optional[operations_pb2.WaitOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def set_iam_policy( + self, + request: Optional[iam_policy_pb2.SetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_iam_policy( + self, + request: Optional[iam_policy_pb2.GetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: Optional[iam_policy_pb2.TestIamPermissionsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_location( + self, + request: Optional[locations_pb2.GetLocationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_locations( + self, + request: Optional[locations_pb2.ListLocationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def __aenter__(self) -> "ModelMonitoringServiceAsyncClient": + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +__all__ = ("ModelMonitoringServiceAsyncClient",) diff --git a/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/client.py b/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/client.py new file mode 100644 index 0000000000..509fca20d8 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/client.py @@ -0,0 +1,2949 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, + cast, +) +import warnings + +from google.cloud.aiplatform_v1beta1 import gapic_version as package_version + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.model_monitoring_service import pagers +from google.cloud.aiplatform_v1beta1.types import explanation +from google.cloud.aiplatform_v1beta1.types import job_state +from google.cloud.aiplatform_v1beta1.types import model_monitor +from google.cloud.aiplatform_v1beta1.types import model_monitor as gca_model_monitor +from google.cloud.aiplatform_v1beta1.types import model_monitoring_alert +from google.cloud.aiplatform_v1beta1.types import model_monitoring_job +from google.cloud.aiplatform_v1beta1.types import ( + model_monitoring_job as gca_model_monitoring_job, +) +from google.cloud.aiplatform_v1beta1.types import model_monitoring_service +from google.cloud.aiplatform_v1beta1.types import model_monitoring_spec +from google.cloud.aiplatform_v1beta1.types import model_monitoring_stats +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import ModelMonitoringServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import ModelMonitoringServiceGrpcTransport +from .transports.grpc_asyncio import ModelMonitoringServiceGrpcAsyncIOTransport +from .transports.rest import ModelMonitoringServiceRestTransport + + +class ModelMonitoringServiceClientMeta(type): + """Metaclass for the ModelMonitoringService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[ModelMonitoringServiceTransport]] + _transport_registry["grpc"] = ModelMonitoringServiceGrpcTransport + _transport_registry["grpc_asyncio"] = ModelMonitoringServiceGrpcAsyncIOTransport + _transport_registry["rest"] = ModelMonitoringServiceRestTransport + + def get_transport_class( + cls, + label: Optional[str] = None, + ) -> Type[ModelMonitoringServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class ModelMonitoringServiceClient(metaclass=ModelMonitoringServiceClientMeta): + """A service for creating and managing Vertex AI Model moitoring. This + includes ``ModelMonitor`` resources, ``ModelMonitoringJob`` + resources. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + _DEFAULT_ENDPOINT_TEMPLATE = "aiplatform.{UNIVERSE_DOMAIN}" + _DEFAULT_UNIVERSE = "googleapis.com" + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ModelMonitoringServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ModelMonitoringServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> ModelMonitoringServiceTransport: + """Returns the transport used by the client instance. + + Returns: + ModelMonitoringServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def batch_prediction_job_path( + project: str, + location: str, + batch_prediction_job: str, + ) -> str: + """Returns a fully-qualified batch_prediction_job string.""" + return "projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}".format( + project=project, + location=location, + batch_prediction_job=batch_prediction_job, + ) + + @staticmethod + def parse_batch_prediction_job_path(path: str) -> Dict[str, str]: + """Parses a batch_prediction_job path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/batchPredictionJobs/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def dataset_path( + project: str, + location: str, + dataset: str, + ) -> str: + """Returns a fully-qualified dataset string.""" + return "projects/{project}/locations/{location}/datasets/{dataset}".format( + project=project, + location=location, + dataset=dataset, + ) + + @staticmethod + def parse_dataset_path(path: str) -> Dict[str, str]: + """Parses a dataset path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def endpoint_path( + project: str, + location: str, + endpoint: str, + ) -> str: + """Returns a fully-qualified endpoint string.""" + return "projects/{project}/locations/{location}/endpoints/{endpoint}".format( + project=project, + location=location, + endpoint=endpoint, + ) + + @staticmethod + def parse_endpoint_path(path: str) -> Dict[str, str]: + """Parses a endpoint path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def model_path( + project: str, + location: str, + model: str, + ) -> str: + """Returns a fully-qualified model string.""" + return "projects/{project}/locations/{location}/models/{model}".format( + project=project, + location=location, + model=model, + ) + + @staticmethod + def parse_model_path(path: str) -> Dict[str, str]: + """Parses a model path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def model_monitor_path( + project: str, + location: str, + model_monitor: str, + ) -> str: + """Returns a fully-qualified model_monitor string.""" + return "projects/{project}/locations/{location}/modelMonitors/{model_monitor}".format( + project=project, + location=location, + model_monitor=model_monitor, + ) + + @staticmethod + def parse_model_monitor_path(path: str) -> Dict[str, str]: + """Parses a model_monitor path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/modelMonitors/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def model_monitoring_job_path( + project: str, + location: str, + model_monitor: str, + model_monitoring_job: str, + ) -> str: + """Returns a fully-qualified model_monitoring_job string.""" + return "projects/{project}/locations/{location}/modelMonitors/{model_monitor}/modelMonitoringJobs/{model_monitoring_job}".format( + project=project, + location=location, + model_monitor=model_monitor, + model_monitoring_job=model_monitoring_job, + ) + + @staticmethod + def parse_model_monitoring_job_path(path: str) -> Dict[str, str]: + """Parses a model_monitoring_job path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/modelMonitors/(?P.+?)/modelMonitoringJobs/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def schedule_path( + project: str, + location: str, + schedule: str, + ) -> str: + """Returns a fully-qualified schedule string.""" + return "projects/{project}/locations/{location}/schedules/{schedule}".format( + project=project, + location=location, + schedule=schedule, + ) + + @staticmethod + def parse_schedule_path(path: str) -> Dict[str, str]: + """Parses a schedule path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/schedules/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path( + billing_account: str, + ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path( + folder: str, + ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format( + folder=folder, + ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path( + organization: str, + ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format( + organization=organization, + ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path( + project: str, + ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format( + project=project, + ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path( + project: str, + location: str, + ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[client_options_lib.ClientOptions] = None + ): + """Deprecated. Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + + warnings.warn( + "get_mtls_endpoint_and_cert_source is deprecated. Use the api_endpoint property instead.", + DeprecationWarning, + ) + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + @staticmethod + def _read_environment_variables(): + """Returns the environment variables used by the client. + + Returns: + Tuple[bool, str, str]: returns the GOOGLE_API_USE_CLIENT_CERTIFICATE, + GOOGLE_API_USE_MTLS_ENDPOINT, and GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variables. + + Raises: + ValueError: If GOOGLE_API_USE_CLIENT_CERTIFICATE is not + any of ["true", "false"]. + google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT + is not any of ["auto", "never", "always"]. + """ + use_client_cert = os.getenv( + "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" + ).lower() + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() + universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") + if use_client_cert not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + return use_client_cert == "true", use_mtls_endpoint, universe_domain_env + + @staticmethod + def _get_client_cert_source(provided_cert_source, use_cert_flag): + """Return the client cert source to be used by the client. + + Args: + provided_cert_source (bytes): The client certificate source provided. + use_cert_flag (bool): A flag indicating whether to use the client certificate. + + Returns: + bytes or None: The client cert source to be used by the client. + """ + client_cert_source = None + if use_cert_flag: + if provided_cert_source: + client_cert_source = provided_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + return client_cert_source + + @staticmethod + def _get_api_endpoint( + api_override, client_cert_source, universe_domain, use_mtls_endpoint + ): + """Return the API endpoint used by the client. + + Args: + api_override (str): The API endpoint override. If specified, this is always + the return value of this function and the other arguments are not used. + client_cert_source (bytes): The client certificate source used by the client. + universe_domain (str): The universe domain used by the client. + use_mtls_endpoint (str): How to use the mTLS endpoint, which depends also on the other parameters. + Possible values are "always", "auto", or "never". + + Returns: + str: The API endpoint to be used by the client. + """ + if api_override is not None: + api_endpoint = api_override + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + _default_universe = ModelMonitoringServiceClient._DEFAULT_UNIVERSE + if universe_domain != _default_universe: + raise MutualTLSChannelError( + f"mTLS is not supported in any universe other than {_default_universe}." + ) + api_endpoint = ModelMonitoringServiceClient.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = ( + ModelMonitoringServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=universe_domain + ) + ) + return api_endpoint + + @staticmethod + def _get_universe_domain( + client_universe_domain: Optional[str], universe_domain_env: Optional[str] + ) -> str: + """Return the universe domain used by the client. + + Args: + client_universe_domain (Optional[str]): The universe domain configured via the client options. + universe_domain_env (Optional[str]): The universe domain configured via the "GOOGLE_CLOUD_UNIVERSE_DOMAIN" environment variable. + + Returns: + str: The universe domain to be used by the client. + + Raises: + ValueError: If the universe domain is an empty string. + """ + universe_domain = ModelMonitoringServiceClient._DEFAULT_UNIVERSE + if client_universe_domain is not None: + universe_domain = client_universe_domain + elif universe_domain_env is not None: + universe_domain = universe_domain_env + if len(universe_domain.strip()) == 0: + raise ValueError("Universe Domain cannot be an empty string.") + return universe_domain + + @staticmethod + def _compare_universes( + client_universe: str, credentials: ga_credentials.Credentials + ) -> bool: + """Returns True iff the universe domains used by the client and credentials match. + + Args: + client_universe (str): The universe domain configured via the client options. + credentials (ga_credentials.Credentials): The credentials being used in the client. + + Returns: + bool: True iff client_universe matches the universe in credentials. + + Raises: + ValueError: when client_universe does not match the universe in credentials. + """ + + default_universe = ModelMonitoringServiceClient._DEFAULT_UNIVERSE + credentials_universe = getattr(credentials, "universe_domain", default_universe) + + if client_universe != credentials_universe: + raise ValueError( + "The configured universe domain " + f"({client_universe}) does not match the universe domain " + f"found in the credentials ({credentials_universe}). " + "If you haven't configured the universe domain explicitly, " + f"`{default_universe}` is the default." + ) + return True + + def _validate_universe_domain(self): + """Validates client's and credentials' universe domains are consistent. + + Returns: + bool: True iff the configured universe domain is valid. + + Raises: + ValueError: If the configured universe domain is not valid. + """ + self._is_universe_domain_valid = ( + self._is_universe_domain_valid + or ModelMonitoringServiceClient._compare_universes( + self.universe_domain, self.transport._credentials + ) + ) + return self._is_universe_domain_valid + + @property + def api_endpoint(self): + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._api_endpoint + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used by the client instance. + """ + return self._universe_domain + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Optional[Union[str, ModelMonitoringServiceTransport]] = None, + client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the model monitoring service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ModelMonitoringServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + NOTE: "rest" transport functionality is currently in a + beta state (preview). We welcome your feedback via an + issue in this library's source repository. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide a client certificate for mTLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that the ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client_options = client_options + if isinstance(self._client_options, dict): + self._client_options = client_options_lib.from_dict(self._client_options) + if self._client_options is None: + self._client_options = client_options_lib.ClientOptions() + self._client_options = cast( + client_options_lib.ClientOptions, self._client_options + ) + + universe_domain_opt = getattr(self._client_options, "universe_domain", None) + + ( + self._use_client_cert, + self._use_mtls_endpoint, + self._universe_domain_env, + ) = ModelMonitoringServiceClient._read_environment_variables() + self._client_cert_source = ModelMonitoringServiceClient._get_client_cert_source( + self._client_options.client_cert_source, self._use_client_cert + ) + self._universe_domain = ModelMonitoringServiceClient._get_universe_domain( + universe_domain_opt, self._universe_domain_env + ) + self._api_endpoint = None # updated below, depending on `transport` + + # Initialize the universe domain validation. + self._is_universe_domain_valid = False + + api_key_value = getattr(self._client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError( + "client_options.api_key and credentials are mutually exclusive" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + transport_provided = isinstance(transport, ModelMonitoringServiceTransport) + if transport_provided: + # transport is a ModelMonitoringServiceTransport instance. + if credentials or self._client_options.credentials_file or api_key_value: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if self._client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = cast(ModelMonitoringServiceTransport, transport) + self._api_endpoint = self._transport.host + + self._api_endpoint = ( + self._api_endpoint + or ModelMonitoringServiceClient._get_api_endpoint( + self._client_options.api_endpoint, + self._client_cert_source, + self._universe_domain, + self._use_mtls_endpoint, + ) + ) + + if not transport_provided: + import google.auth._default # type: ignore + + if api_key_value and hasattr( + google.auth._default, "get_api_key_credentials" + ): + credentials = google.auth._default.get_api_key_credentials( + api_key_value + ) + + Transport = type(self).get_transport_class(cast(str, transport)) + self._transport = Transport( + credentials=credentials, + credentials_file=self._client_options.credentials_file, + host=self._api_endpoint, + scopes=self._client_options.scopes, + client_cert_source_for_mtls=self._client_cert_source, + quota_project_id=self._client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + api_audience=self._client_options.api_audience, + ) + + def create_model_monitor( + self, + request: Optional[ + Union[model_monitoring_service.CreateModelMonitorRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + model_monitor: Optional[gca_model_monitor.ModelMonitor] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Creates a ModelMonitor. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + def sample_create_model_monitor(): + # Create a client + client = aiplatform_v1beta1.ModelMonitoringServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateModelMonitorRequest( + parent="parent_value", + ) + + # Make the request + operation = client.create_model_monitor(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateModelMonitorRequest, dict]): + The request object. Request message for + [ModelMonitoringService.CreateModelMonitor][google.cloud.aiplatform.v1beta1.ModelMonitoringService.CreateModelMonitor]. + parent (str): + Required. The resource name of the Location to create + the ModelMonitor in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + model_monitor (google.cloud.aiplatform_v1beta1.types.ModelMonitor): + Required. The ModelMonitor to create. + This corresponds to the ``model_monitor`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.ModelMonitor` Vertex AI Model Monitoring Service serves as a central hub for the analysis + and visualization of data quality and performance + related to models. ModelMonitor stands as a top level + resource for overseeing your model monitoring tasks. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, model_monitor]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a model_monitoring_service.CreateModelMonitorRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_monitoring_service.CreateModelMonitorRequest): + request = model_monitoring_service.CreateModelMonitorRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if model_monitor is not None: + request.model_monitor = model_monitor + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_model_monitor] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_model_monitor.ModelMonitor, + metadata_type=model_monitoring_service.CreateModelMonitorOperationMetadata, + ) + + # Done; return the response. + return response + + def update_model_monitor( + self, + request: Optional[ + Union[model_monitoring_service.UpdateModelMonitorRequest, dict] + ] = None, + *, + model_monitor: Optional[gca_model_monitor.ModelMonitor] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Updates a ModelMonitor. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + def sample_update_model_monitor(): + # Create a client + client = aiplatform_v1beta1.ModelMonitoringServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UpdateModelMonitorRequest( + ) + + # Make the request + operation = client.update_model_monitor(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.UpdateModelMonitorRequest, dict]): + The request object. Request message for + [ModelMonitoringService.UpdateModelMonitor][google.cloud.aiplatform.v1beta1.ModelMonitoringService.UpdateModelMonitor]. + model_monitor (google.cloud.aiplatform_v1beta1.types.ModelMonitor): + Required. The model monitoring + configuration which replaces the + resource on the server. + + This corresponds to the ``model_monitor`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. Mask specifying which + fields to update. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.ModelMonitor` Vertex AI Model Monitoring Service serves as a central hub for the analysis + and visualization of data quality and performance + related to models. ModelMonitor stands as a top level + resource for overseeing your model monitoring tasks. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model_monitor, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a model_monitoring_service.UpdateModelMonitorRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_monitoring_service.UpdateModelMonitorRequest): + request = model_monitoring_service.UpdateModelMonitorRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model_monitor is not None: + request.model_monitor = model_monitor + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_model_monitor] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("model_monitor.name", request.model_monitor.name),) + ), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_model_monitor.ModelMonitor, + metadata_type=model_monitoring_service.UpdateModelMonitorOperationMetadata, + ) + + # Done; return the response. + return response + + def get_model_monitor( + self, + request: Optional[ + Union[model_monitoring_service.GetModelMonitorRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model_monitor.ModelMonitor: + r"""Gets a ModelMonitor. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + def sample_get_model_monitor(): + # Create a client + client = aiplatform_v1beta1.ModelMonitoringServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetModelMonitorRequest( + name="name_value", + ) + + # Make the request + response = client.get_model_monitor(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetModelMonitorRequest, dict]): + The request object. Request message for + [ModelMonitoringService.GetModelMonitor][google.cloud.aiplatform.v1beta1.ModelMonitoringService.GetModelMonitor]. + name (str): + Required. The name of the ModelMonitor resource. Format: + ``projects/{project}/locations/{location}/modelMonitors/{model_monitor}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ModelMonitor: + Vertex AI Model Monitoring Service + serves as a central hub for the analysis + and visualization of data quality and + performance related to models. + ModelMonitor stands as a top level + resource for overseeing your model + monitoring tasks. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a model_monitoring_service.GetModelMonitorRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_monitoring_service.GetModelMonitorRequest): + request = model_monitoring_service.GetModelMonitorRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_model_monitor] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_model_monitors( + self, + request: Optional[ + Union[model_monitoring_service.ListModelMonitorsRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelMonitorsPager: + r"""Lists ModelMonitors in a Location. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + def sample_list_model_monitors(): + # Create a client + client = aiplatform_v1beta1.ModelMonitoringServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListModelMonitorsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_model_monitors(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListModelMonitorsRequest, dict]): + The request object. Request message for + [ModelMonitoringService.ListModelMonitors][google.cloud.aiplatform.v1beta1.ModelMonitoringService.ListModelMonitors]. + parent (str): + Required. The resource name of the Location to list the + ModelMonitors from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.model_monitoring_service.pagers.ListModelMonitorsPager: + Response message for + [ModelMonitoringService.ListModelMonitors][google.cloud.aiplatform.v1beta1.ModelMonitoringService.ListModelMonitors] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a model_monitoring_service.ListModelMonitorsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_monitoring_service.ListModelMonitorsRequest): + request = model_monitoring_service.ListModelMonitorsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_model_monitors] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListModelMonitorsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_model_monitor( + self, + request: Optional[ + Union[model_monitoring_service.DeleteModelMonitorRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a ModelMonitor. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + def sample_delete_model_monitor(): + # Create a client + client = aiplatform_v1beta1.ModelMonitoringServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteModelMonitorRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_model_monitor(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteModelMonitorRequest, dict]): + The request object. Request message for + [ModelMonitoringService.DeleteModelMonitor][google.cloud.aiplatform.v1beta1.ModelMonitoringService.DeleteModelMonitor]. + name (str): + Required. The name of the ModelMonitor resource to be + deleted. Format: + ``projects/{project}/locations/{location}/modelMonitords/{model_monitor}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a model_monitoring_service.DeleteModelMonitorRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_monitoring_service.DeleteModelMonitorRequest): + request = model_monitoring_service.DeleteModelMonitorRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_model_monitor] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def create_model_monitoring_job( + self, + request: Optional[ + Union[model_monitoring_service.CreateModelMonitoringJobRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + model_monitoring_job: Optional[ + gca_model_monitoring_job.ModelMonitoringJob + ] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_model_monitoring_job.ModelMonitoringJob: + r"""Creates a ModelMonitoringJob. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + def sample_create_model_monitoring_job(): + # Create a client + client = aiplatform_v1beta1.ModelMonitoringServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateModelMonitoringJobRequest( + parent="parent_value", + ) + + # Make the request + response = client.create_model_monitoring_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateModelMonitoringJobRequest, dict]): + The request object. Request message for + [ModelMonitoringService.CreateModelMonitoringJob][google.cloud.aiplatform.v1beta1.ModelMonitoringService.CreateModelMonitoringJob]. + parent (str): + Required. The parent of the ModelMonitoringJob. Format: + ``projects/{project}/locations/{location}/modelMoniitors/{model_monitor}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + model_monitoring_job (google.cloud.aiplatform_v1beta1.types.ModelMonitoringJob): + Required. The ModelMonitoringJob to + create + + This corresponds to the ``model_monitoring_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ModelMonitoringJob: + Represents a model monitoring job + that analyze dataset using different + monitoring algorithm. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, model_monitoring_job]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a model_monitoring_service.CreateModelMonitoringJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, model_monitoring_service.CreateModelMonitoringJobRequest + ): + request = model_monitoring_service.CreateModelMonitoringJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if model_monitoring_job is not None: + request.model_monitoring_job = model_monitoring_job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.create_model_monitoring_job + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_model_monitoring_job( + self, + request: Optional[ + Union[model_monitoring_service.GetModelMonitoringJobRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model_monitoring_job.ModelMonitoringJob: + r"""Gets a ModelMonitoringJob. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + def sample_get_model_monitoring_job(): + # Create a client + client = aiplatform_v1beta1.ModelMonitoringServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetModelMonitoringJobRequest( + name="name_value", + ) + + # Make the request + response = client.get_model_monitoring_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetModelMonitoringJobRequest, dict]): + The request object. Request message for + [ModelMonitoringService.GetModelMonitoringJob][google.cloud.aiplatform.v1beta1.ModelMonitoringService.GetModelMonitoringJob]. + name (str): + Required. The resource name of the ModelMonitoringJob. + Format: + ``projects/{project}/locations/{location}/modelMonitors/{model_monitor}/modelMonitoringJobs/{model_monitoring_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ModelMonitoringJob: + Represents a model monitoring job + that analyze dataset using different + monitoring algorithm. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a model_monitoring_service.GetModelMonitoringJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, model_monitoring_service.GetModelMonitoringJobRequest + ): + request = model_monitoring_service.GetModelMonitoringJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_model_monitoring_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_model_monitoring_jobs( + self, + request: Optional[ + Union[model_monitoring_service.ListModelMonitoringJobsRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelMonitoringJobsPager: + r"""Lists ModelMonitoringJobs. Callers may choose to read across + multiple Monitors as per + `AIP-159 `__ by using '-' (the + hyphen or dash character) as a wildcard character instead of + modelMonitor id in the parent. Format + ``projects/{project_id}/locations/{location}/moodelMonitors/-/modelMonitoringJobs`` + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + def sample_list_model_monitoring_jobs(): + # Create a client + client = aiplatform_v1beta1.ModelMonitoringServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListModelMonitoringJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_model_monitoring_jobs(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListModelMonitoringJobsRequest, dict]): + The request object. Request message for + [ModelMonitoringService.ListModelMonitoringJobs][google.cloud.aiplatform.v1beta1.ModelMonitoringService.ListModelMonitoringJobs]. + parent (str): + Required. The parent of the ModelMonitoringJob. Format: + ``projects/{project}/locations/{location}/modelMonitors/{model_monitor}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.model_monitoring_service.pagers.ListModelMonitoringJobsPager: + Response message for + [ModelMonitoringService.ListModelMonitoringJobs][google.cloud.aiplatform.v1beta1.ModelMonitoringService.ListModelMonitoringJobs]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a model_monitoring_service.ListModelMonitoringJobsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, model_monitoring_service.ListModelMonitoringJobsRequest + ): + request = model_monitoring_service.ListModelMonitoringJobsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.list_model_monitoring_jobs + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListModelMonitoringJobsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_model_monitoring_job( + self, + request: Optional[ + Union[model_monitoring_service.DeleteModelMonitoringJobRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a ModelMonitoringJob. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + def sample_delete_model_monitoring_job(): + # Create a client + client = aiplatform_v1beta1.ModelMonitoringServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteModelMonitoringJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_model_monitoring_job(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteModelMonitoringJobRequest, dict]): + The request object. Request message for + [ModelMonitoringService.DeleteModelMonitoringJob][google.cloud.aiplatform.v1beta1.ModelMonitoringService.DeleteModelMonitoringJob]. + name (str): + Required. The resource name of the model monitoring job + to delete. Format: + ``projects/{project}/locations/{location}/modelMonitors/{model_monitor}/modelMonitoringJobs/{model_monitoring_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a model_monitoring_service.DeleteModelMonitoringJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, model_monitoring_service.DeleteModelMonitoringJobRequest + ): + request = model_monitoring_service.DeleteModelMonitoringJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.delete_model_monitoring_job + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def search_model_monitoring_stats( + self, + request: Optional[ + Union[model_monitoring_service.SearchModelMonitoringStatsRequest, dict] + ] = None, + *, + model_monitor: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.SearchModelMonitoringStatsPager: + r"""Searches Model Monitoring Stats generated within a + given time window. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + def sample_search_model_monitoring_stats(): + # Create a client + client = aiplatform_v1beta1.ModelMonitoringServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.SearchModelMonitoringStatsRequest( + model_monitor="model_monitor_value", + ) + + # Make the request + page_result = client.search_model_monitoring_stats(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.SearchModelMonitoringStatsRequest, dict]): + The request object. Request message for + [ModelMonitoringService.SearchModelMonitoringStats][google.cloud.aiplatform.v1beta1.ModelMonitoringService.SearchModelMonitoringStats]. + model_monitor (str): + Required. ModelMonitor resource name. Format: + ``projects/{project}/locations/{location}/modelMonitors/{model_monitor}`` + + This corresponds to the ``model_monitor`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.model_monitoring_service.pagers.SearchModelMonitoringStatsPager: + Response message for + [ModelMonitoringService.SearchModelMonitoringStats][google.cloud.aiplatform.v1beta1.ModelMonitoringService.SearchModelMonitoringStats]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model_monitor]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a model_monitoring_service.SearchModelMonitoringStatsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, model_monitoring_service.SearchModelMonitoringStatsRequest + ): + request = model_monitoring_service.SearchModelMonitoringStatsRequest( + request + ) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model_monitor is not None: + request.model_monitor = model_monitor + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.search_model_monitoring_stats + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("model_monitor", request.model_monitor),) + ), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.SearchModelMonitoringStatsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def search_model_monitoring_alerts( + self, + request: Optional[ + Union[model_monitoring_service.SearchModelMonitoringAlertsRequest, dict] + ] = None, + *, + model_monitor: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.SearchModelMonitoringAlertsPager: + r"""Returns the Model Monitoring alerts. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + def sample_search_model_monitoring_alerts(): + # Create a client + client = aiplatform_v1beta1.ModelMonitoringServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.SearchModelMonitoringAlertsRequest( + model_monitor="model_monitor_value", + ) + + # Make the request + page_result = client.search_model_monitoring_alerts(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.SearchModelMonitoringAlertsRequest, dict]): + The request object. Request message for + [ModelMonitoringService.SearchModelMonitoringAlerts][google.cloud.aiplatform.v1beta1.ModelMonitoringService.SearchModelMonitoringAlerts]. + model_monitor (str): + Required. ModelMonitor resource name. Format: + ``projects/{project}/locations/{location}/modelMonitors/{model_monitor}`` + + This corresponds to the ``model_monitor`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.model_monitoring_service.pagers.SearchModelMonitoringAlertsPager: + Response message for + [ModelMonitoringService.SearchModelMonitoringAlerts][google.cloud.aiplatform.v1beta1.ModelMonitoringService.SearchModelMonitoringAlerts]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model_monitor]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a model_monitoring_service.SearchModelMonitoringAlertsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, model_monitoring_service.SearchModelMonitoringAlertsRequest + ): + request = model_monitoring_service.SearchModelMonitoringAlertsRequest( + request + ) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model_monitor is not None: + request.model_monitor = model_monitor + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.search_model_monitoring_alerts + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("model_monitor", request.model_monitor),) + ), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.SearchModelMonitoringAlertsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self) -> "ModelMonitoringServiceClient": + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + def list_operations( + self, + request: Optional[operations_pb2.ListOperationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_operation( + self, + request: Optional[operations_pb2.GetOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_operation( + self, + request: Optional[operations_pb2.DeleteOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def cancel_operation( + self, + request: Optional[operations_pb2.CancelOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def wait_operation( + self, + request: Optional[operations_pb2.WaitOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_iam_policy( + self, + request: Optional[iam_policy_pb2.SetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_iam_policy( + self, + request: Optional[iam_policy_pb2.GetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def test_iam_permissions( + self, + request: Optional[iam_policy_pb2.TestIamPermissionsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_location( + self, + request: Optional[locations_pb2.GetLocationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_locations( + self, + request: Optional[locations_pb2.ListLocationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +__all__ = ("ModelMonitoringServiceClient",) diff --git a/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/pagers.py b/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/pagers.py new file mode 100644 index 0000000000..35379d323c --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/pagers.py @@ -0,0 +1,577 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import ( + Any, + AsyncIterator, + Awaitable, + Callable, + Sequence, + Tuple, + Optional, + Iterator, +) + +from google.cloud.aiplatform_v1beta1.types import model_monitor +from google.cloud.aiplatform_v1beta1.types import model_monitoring_alert +from google.cloud.aiplatform_v1beta1.types import model_monitoring_job +from google.cloud.aiplatform_v1beta1.types import model_monitoring_service +from google.cloud.aiplatform_v1beta1.types import model_monitoring_stats + + +class ListModelMonitorsPager: + """A pager for iterating through ``list_model_monitors`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListModelMonitorsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``model_monitors`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListModelMonitors`` requests and continue to iterate + through the ``model_monitors`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListModelMonitorsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., model_monitoring_service.ListModelMonitorsResponse], + request: model_monitoring_service.ListModelMonitorsRequest, + response: model_monitoring_service.ListModelMonitorsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListModelMonitorsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListModelMonitorsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = model_monitoring_service.ListModelMonitorsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[model_monitoring_service.ListModelMonitorsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[model_monitor.ModelMonitor]: + for page in self.pages: + yield from page.model_monitors + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListModelMonitorsAsyncPager: + """A pager for iterating through ``list_model_monitors`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListModelMonitorsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``model_monitors`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListModelMonitors`` requests and continue to iterate + through the ``model_monitors`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListModelMonitorsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., Awaitable[model_monitoring_service.ListModelMonitorsResponse] + ], + request: model_monitoring_service.ListModelMonitorsRequest, + response: model_monitoring_service.ListModelMonitorsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListModelMonitorsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListModelMonitorsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = model_monitoring_service.ListModelMonitorsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterator[model_monitoring_service.ListModelMonitorsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterator[model_monitor.ModelMonitor]: + async def async_generator(): + async for page in self.pages: + for response in page.model_monitors: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListModelMonitoringJobsPager: + """A pager for iterating through ``list_model_monitoring_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListModelMonitoringJobsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``model_monitoring_jobs`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListModelMonitoringJobs`` requests and continue to iterate + through the ``model_monitoring_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListModelMonitoringJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., model_monitoring_service.ListModelMonitoringJobsResponse], + request: model_monitoring_service.ListModelMonitoringJobsRequest, + response: model_monitoring_service.ListModelMonitoringJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListModelMonitoringJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListModelMonitoringJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = model_monitoring_service.ListModelMonitoringJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages( + self, + ) -> Iterator[model_monitoring_service.ListModelMonitoringJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[model_monitoring_job.ModelMonitoringJob]: + for page in self.pages: + yield from page.model_monitoring_jobs + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListModelMonitoringJobsAsyncPager: + """A pager for iterating through ``list_model_monitoring_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListModelMonitoringJobsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``model_monitoring_jobs`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListModelMonitoringJobs`` requests and continue to iterate + through the ``model_monitoring_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListModelMonitoringJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., Awaitable[model_monitoring_service.ListModelMonitoringJobsResponse] + ], + request: model_monitoring_service.ListModelMonitoringJobsRequest, + response: model_monitoring_service.ListModelMonitoringJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListModelMonitoringJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListModelMonitoringJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = model_monitoring_service.ListModelMonitoringJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterator[model_monitoring_service.ListModelMonitoringJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterator[model_monitoring_job.ModelMonitoringJob]: + async def async_generator(): + async for page in self.pages: + for response in page.model_monitoring_jobs: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class SearchModelMonitoringStatsPager: + """A pager for iterating through ``search_model_monitoring_stats`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.SearchModelMonitoringStatsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``monitoring_stats`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``SearchModelMonitoringStats`` requests and continue to iterate + through the ``monitoring_stats`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.SearchModelMonitoringStatsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., model_monitoring_service.SearchModelMonitoringStatsResponse + ], + request: model_monitoring_service.SearchModelMonitoringStatsRequest, + response: model_monitoring_service.SearchModelMonitoringStatsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.SearchModelMonitoringStatsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.SearchModelMonitoringStatsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = model_monitoring_service.SearchModelMonitoringStatsRequest( + request + ) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages( + self, + ) -> Iterator[model_monitoring_service.SearchModelMonitoringStatsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[model_monitoring_stats.ModelMonitoringStats]: + for page in self.pages: + yield from page.monitoring_stats + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class SearchModelMonitoringStatsAsyncPager: + """A pager for iterating through ``search_model_monitoring_stats`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.SearchModelMonitoringStatsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``monitoring_stats`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``SearchModelMonitoringStats`` requests and continue to iterate + through the ``monitoring_stats`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.SearchModelMonitoringStatsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., Awaitable[model_monitoring_service.SearchModelMonitoringStatsResponse] + ], + request: model_monitoring_service.SearchModelMonitoringStatsRequest, + response: model_monitoring_service.SearchModelMonitoringStatsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.SearchModelMonitoringStatsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.SearchModelMonitoringStatsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = model_monitoring_service.SearchModelMonitoringStatsRequest( + request + ) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterator[model_monitoring_service.SearchModelMonitoringStatsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterator[model_monitoring_stats.ModelMonitoringStats]: + async def async_generator(): + async for page in self.pages: + for response in page.monitoring_stats: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class SearchModelMonitoringAlertsPager: + """A pager for iterating through ``search_model_monitoring_alerts`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.SearchModelMonitoringAlertsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``model_monitoring_alerts`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``SearchModelMonitoringAlerts`` requests and continue to iterate + through the ``model_monitoring_alerts`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.SearchModelMonitoringAlertsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., model_monitoring_service.SearchModelMonitoringAlertsResponse + ], + request: model_monitoring_service.SearchModelMonitoringAlertsRequest, + response: model_monitoring_service.SearchModelMonitoringAlertsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.SearchModelMonitoringAlertsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.SearchModelMonitoringAlertsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = model_monitoring_service.SearchModelMonitoringAlertsRequest( + request + ) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages( + self, + ) -> Iterator[model_monitoring_service.SearchModelMonitoringAlertsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[model_monitoring_alert.ModelMonitoringAlert]: + for page in self.pages: + yield from page.model_monitoring_alerts + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class SearchModelMonitoringAlertsAsyncPager: + """A pager for iterating through ``search_model_monitoring_alerts`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.SearchModelMonitoringAlertsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``model_monitoring_alerts`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``SearchModelMonitoringAlerts`` requests and continue to iterate + through the ``model_monitoring_alerts`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.SearchModelMonitoringAlertsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., Awaitable[model_monitoring_service.SearchModelMonitoringAlertsResponse] + ], + request: model_monitoring_service.SearchModelMonitoringAlertsRequest, + response: model_monitoring_service.SearchModelMonitoringAlertsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.SearchModelMonitoringAlertsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.SearchModelMonitoringAlertsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = model_monitoring_service.SearchModelMonitoringAlertsRequest( + request + ) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterator[model_monitoring_service.SearchModelMonitoringAlertsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterator[model_monitoring_alert.ModelMonitoringAlert]: + async def async_generator(): + async for page in self.pages: + for response in page.model_monitoring_alerts: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/transports/__init__.py b/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/transports/__init__.py new file mode 100644 index 0000000000..9ef3a7e354 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/transports/__init__.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import ModelMonitoringServiceTransport +from .grpc import ModelMonitoringServiceGrpcTransport +from .grpc_asyncio import ModelMonitoringServiceGrpcAsyncIOTransport +from .rest import ModelMonitoringServiceRestTransport +from .rest import ModelMonitoringServiceRestInterceptor + + +# Compile a registry of transports. +_transport_registry = ( + OrderedDict() +) # type: Dict[str, Type[ModelMonitoringServiceTransport]] +_transport_registry["grpc"] = ModelMonitoringServiceGrpcTransport +_transport_registry["grpc_asyncio"] = ModelMonitoringServiceGrpcAsyncIOTransport +_transport_registry["rest"] = ModelMonitoringServiceRestTransport + +__all__ = ( + "ModelMonitoringServiceTransport", + "ModelMonitoringServiceGrpcTransport", + "ModelMonitoringServiceGrpcAsyncIOTransport", + "ModelMonitoringServiceRestTransport", + "ModelMonitoringServiceRestInterceptor", +) diff --git a/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/transports/base.py new file mode 100644 index 0000000000..eee3826b14 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/transports/base.py @@ -0,0 +1,426 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union + +from google.cloud.aiplatform_v1beta1 import gapic_version as package_version + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1beta1.types import model_monitor +from google.cloud.aiplatform_v1beta1.types import model_monitoring_job +from google.cloud.aiplatform_v1beta1.types import ( + model_monitoring_job as gca_model_monitoring_job, +) +from google.cloud.aiplatform_v1beta1.types import model_monitoring_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +class ModelMonitoringServiceTransport(abc.ABC): + """Abstract transport class for ModelMonitoringService.""" + + AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) + + DEFAULT_HOST: str = "aiplatform.googleapis.com" + + def __init__( + self, + *, + host: str = DEFAULT_HOST, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default( + **scopes_kwargs, quota_project_id=quota_project_id + ) + # Don't apply audience if the credentials file passed from user. + if hasattr(credentials, "with_gdch_audience"): + credentials = credentials.with_gdch_audience( + api_audience if api_audience else host + ) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if ( + always_use_jwt_access + and isinstance(credentials, service_account.Credentials) + and hasattr(service_account.Credentials, "with_always_use_jwt_access") + ): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + @property + def host(self): + return self._host + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_model_monitor: gapic_v1.method.wrap_method( + self.create_model_monitor, + default_timeout=None, + client_info=client_info, + ), + self.update_model_monitor: gapic_v1.method.wrap_method( + self.update_model_monitor, + default_timeout=None, + client_info=client_info, + ), + self.get_model_monitor: gapic_v1.method.wrap_method( + self.get_model_monitor, + default_timeout=None, + client_info=client_info, + ), + self.list_model_monitors: gapic_v1.method.wrap_method( + self.list_model_monitors, + default_timeout=None, + client_info=client_info, + ), + self.delete_model_monitor: gapic_v1.method.wrap_method( + self.delete_model_monitor, + default_timeout=None, + client_info=client_info, + ), + self.create_model_monitoring_job: gapic_v1.method.wrap_method( + self.create_model_monitoring_job, + default_timeout=None, + client_info=client_info, + ), + self.get_model_monitoring_job: gapic_v1.method.wrap_method( + self.get_model_monitoring_job, + default_timeout=None, + client_info=client_info, + ), + self.list_model_monitoring_jobs: gapic_v1.method.wrap_method( + self.list_model_monitoring_jobs, + default_timeout=None, + client_info=client_info, + ), + self.delete_model_monitoring_job: gapic_v1.method.wrap_method( + self.delete_model_monitoring_job, + default_timeout=None, + client_info=client_info, + ), + self.search_model_monitoring_stats: gapic_v1.method.wrap_method( + self.search_model_monitoring_stats, + default_timeout=None, + client_info=client_info, + ), + self.search_model_monitoring_alerts: gapic_v1.method.wrap_method( + self.search_model_monitoring_alerts, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def operations_client(self): + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_model_monitor( + self, + ) -> Callable[ + [model_monitoring_service.CreateModelMonitorRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def update_model_monitor( + self, + ) -> Callable[ + [model_monitoring_service.UpdateModelMonitorRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def get_model_monitor( + self, + ) -> Callable[ + [model_monitoring_service.GetModelMonitorRequest], + Union[model_monitor.ModelMonitor, Awaitable[model_monitor.ModelMonitor]], + ]: + raise NotImplementedError() + + @property + def list_model_monitors( + self, + ) -> Callable[ + [model_monitoring_service.ListModelMonitorsRequest], + Union[ + model_monitoring_service.ListModelMonitorsResponse, + Awaitable[model_monitoring_service.ListModelMonitorsResponse], + ], + ]: + raise NotImplementedError() + + @property + def delete_model_monitor( + self, + ) -> Callable[ + [model_monitoring_service.DeleteModelMonitorRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def create_model_monitoring_job( + self, + ) -> Callable[ + [model_monitoring_service.CreateModelMonitoringJobRequest], + Union[ + gca_model_monitoring_job.ModelMonitoringJob, + Awaitable[gca_model_monitoring_job.ModelMonitoringJob], + ], + ]: + raise NotImplementedError() + + @property + def get_model_monitoring_job( + self, + ) -> Callable[ + [model_monitoring_service.GetModelMonitoringJobRequest], + Union[ + model_monitoring_job.ModelMonitoringJob, + Awaitable[model_monitoring_job.ModelMonitoringJob], + ], + ]: + raise NotImplementedError() + + @property + def list_model_monitoring_jobs( + self, + ) -> Callable[ + [model_monitoring_service.ListModelMonitoringJobsRequest], + Union[ + model_monitoring_service.ListModelMonitoringJobsResponse, + Awaitable[model_monitoring_service.ListModelMonitoringJobsResponse], + ], + ]: + raise NotImplementedError() + + @property + def delete_model_monitoring_job( + self, + ) -> Callable[ + [model_monitoring_service.DeleteModelMonitoringJobRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def search_model_monitoring_stats( + self, + ) -> Callable[ + [model_monitoring_service.SearchModelMonitoringStatsRequest], + Union[ + model_monitoring_service.SearchModelMonitoringStatsResponse, + Awaitable[model_monitoring_service.SearchModelMonitoringStatsResponse], + ], + ]: + raise NotImplementedError() + + @property + def search_model_monitoring_alerts( + self, + ) -> Callable[ + [model_monitoring_service.SearchModelMonitoringAlertsRequest], + Union[ + model_monitoring_service.SearchModelMonitoringAlertsResponse, + Awaitable[model_monitoring_service.SearchModelMonitoringAlertsResponse], + ], + ]: + raise NotImplementedError() + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], + Union[ + operations_pb2.ListOperationsResponse, + Awaitable[operations_pb2.ListOperationsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None,]: + raise NotImplementedError() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None,]: + raise NotImplementedError() + + @property + def wait_operation( + self, + ) -> Callable[ + [operations_pb2.WaitOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Union[ + iam_policy_pb2.TestIamPermissionsResponse, + Awaitable[iam_policy_pb2.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_location( + self, + ) -> Callable[ + [locations_pb2.GetLocationRequest], + Union[locations_pb2.Location, Awaitable[locations_pb2.Location]], + ]: + raise NotImplementedError() + + @property + def list_locations( + self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], + Union[ + locations_pb2.ListLocationsResponse, + Awaitable[locations_pb2.ListLocationsResponse], + ], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ("ModelMonitoringServiceTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/transports/grpc.py new file mode 100644 index 0000000000..6340db5548 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/transports/grpc.py @@ -0,0 +1,795 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import operations_v1 +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1beta1.types import model_monitor +from google.cloud.aiplatform_v1beta1.types import model_monitoring_job +from google.cloud.aiplatform_v1beta1.types import ( + model_monitoring_job as gca_model_monitoring_job, +) +from google.cloud.aiplatform_v1beta1.types import model_monitoring_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from .base import ModelMonitoringServiceTransport, DEFAULT_CLIENT_INFO + + +class ModelMonitoringServiceGrpcTransport(ModelMonitoringServiceTransport): + """gRPC backend transport for ModelMonitoringService. + + A service for creating and managing Vertex AI Model moitoring. This + includes ``ModelMonitor`` resources, ``ModelMonitoringJob`` + resources. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[grpc.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel( + cls, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service.""" + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient(self.grpc_channel) + + # Return the client from cache. + return self._operations_client + + @property + def create_model_monitor( + self, + ) -> Callable[ + [model_monitoring_service.CreateModelMonitorRequest], operations_pb2.Operation + ]: + r"""Return a callable for the create model monitor method over gRPC. + + Creates a ModelMonitor. + + Returns: + Callable[[~.CreateModelMonitorRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_model_monitor" not in self._stubs: + self._stubs["create_model_monitor"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.ModelMonitoringService/CreateModelMonitor", + request_serializer=model_monitoring_service.CreateModelMonitorRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_model_monitor"] + + @property + def update_model_monitor( + self, + ) -> Callable[ + [model_monitoring_service.UpdateModelMonitorRequest], operations_pb2.Operation + ]: + r"""Return a callable for the update model monitor method over gRPC. + + Updates a ModelMonitor. + + Returns: + Callable[[~.UpdateModelMonitorRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_model_monitor" not in self._stubs: + self._stubs["update_model_monitor"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.ModelMonitoringService/UpdateModelMonitor", + request_serializer=model_monitoring_service.UpdateModelMonitorRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["update_model_monitor"] + + @property + def get_model_monitor( + self, + ) -> Callable[ + [model_monitoring_service.GetModelMonitorRequest], model_monitor.ModelMonitor + ]: + r"""Return a callable for the get model monitor method over gRPC. + + Gets a ModelMonitor. + + Returns: + Callable[[~.GetModelMonitorRequest], + ~.ModelMonitor]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_model_monitor" not in self._stubs: + self._stubs["get_model_monitor"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.ModelMonitoringService/GetModelMonitor", + request_serializer=model_monitoring_service.GetModelMonitorRequest.serialize, + response_deserializer=model_monitor.ModelMonitor.deserialize, + ) + return self._stubs["get_model_monitor"] + + @property + def list_model_monitors( + self, + ) -> Callable[ + [model_monitoring_service.ListModelMonitorsRequest], + model_monitoring_service.ListModelMonitorsResponse, + ]: + r"""Return a callable for the list model monitors method over gRPC. + + Lists ModelMonitors in a Location. + + Returns: + Callable[[~.ListModelMonitorsRequest], + ~.ListModelMonitorsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_model_monitors" not in self._stubs: + self._stubs["list_model_monitors"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.ModelMonitoringService/ListModelMonitors", + request_serializer=model_monitoring_service.ListModelMonitorsRequest.serialize, + response_deserializer=model_monitoring_service.ListModelMonitorsResponse.deserialize, + ) + return self._stubs["list_model_monitors"] + + @property + def delete_model_monitor( + self, + ) -> Callable[ + [model_monitoring_service.DeleteModelMonitorRequest], operations_pb2.Operation + ]: + r"""Return a callable for the delete model monitor method over gRPC. + + Deletes a ModelMonitor. + + Returns: + Callable[[~.DeleteModelMonitorRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_model_monitor" not in self._stubs: + self._stubs["delete_model_monitor"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.ModelMonitoringService/DeleteModelMonitor", + request_serializer=model_monitoring_service.DeleteModelMonitorRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_model_monitor"] + + @property + def create_model_monitoring_job( + self, + ) -> Callable[ + [model_monitoring_service.CreateModelMonitoringJobRequest], + gca_model_monitoring_job.ModelMonitoringJob, + ]: + r"""Return a callable for the create model monitoring job method over gRPC. + + Creates a ModelMonitoringJob. + + Returns: + Callable[[~.CreateModelMonitoringJobRequest], + ~.ModelMonitoringJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_model_monitoring_job" not in self._stubs: + self._stubs["create_model_monitoring_job"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.ModelMonitoringService/CreateModelMonitoringJob", + request_serializer=model_monitoring_service.CreateModelMonitoringJobRequest.serialize, + response_deserializer=gca_model_monitoring_job.ModelMonitoringJob.deserialize, + ) + return self._stubs["create_model_monitoring_job"] + + @property + def get_model_monitoring_job( + self, + ) -> Callable[ + [model_monitoring_service.GetModelMonitoringJobRequest], + model_monitoring_job.ModelMonitoringJob, + ]: + r"""Return a callable for the get model monitoring job method over gRPC. + + Gets a ModelMonitoringJob. + + Returns: + Callable[[~.GetModelMonitoringJobRequest], + ~.ModelMonitoringJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_model_monitoring_job" not in self._stubs: + self._stubs["get_model_monitoring_job"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.ModelMonitoringService/GetModelMonitoringJob", + request_serializer=model_monitoring_service.GetModelMonitoringJobRequest.serialize, + response_deserializer=model_monitoring_job.ModelMonitoringJob.deserialize, + ) + return self._stubs["get_model_monitoring_job"] + + @property + def list_model_monitoring_jobs( + self, + ) -> Callable[ + [model_monitoring_service.ListModelMonitoringJobsRequest], + model_monitoring_service.ListModelMonitoringJobsResponse, + ]: + r"""Return a callable for the list model monitoring jobs method over gRPC. + + Lists ModelMonitoringJobs. Callers may choose to read across + multiple Monitors as per + `AIP-159 `__ by using '-' (the + hyphen or dash character) as a wildcard character instead of + modelMonitor id in the parent. Format + ``projects/{project_id}/locations/{location}/moodelMonitors/-/modelMonitoringJobs`` + + Returns: + Callable[[~.ListModelMonitoringJobsRequest], + ~.ListModelMonitoringJobsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_model_monitoring_jobs" not in self._stubs: + self._stubs["list_model_monitoring_jobs"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.ModelMonitoringService/ListModelMonitoringJobs", + request_serializer=model_monitoring_service.ListModelMonitoringJobsRequest.serialize, + response_deserializer=model_monitoring_service.ListModelMonitoringJobsResponse.deserialize, + ) + return self._stubs["list_model_monitoring_jobs"] + + @property + def delete_model_monitoring_job( + self, + ) -> Callable[ + [model_monitoring_service.DeleteModelMonitoringJobRequest], + operations_pb2.Operation, + ]: + r"""Return a callable for the delete model monitoring job method over gRPC. + + Deletes a ModelMonitoringJob. + + Returns: + Callable[[~.DeleteModelMonitoringJobRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_model_monitoring_job" not in self._stubs: + self._stubs["delete_model_monitoring_job"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.ModelMonitoringService/DeleteModelMonitoringJob", + request_serializer=model_monitoring_service.DeleteModelMonitoringJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_model_monitoring_job"] + + @property + def search_model_monitoring_stats( + self, + ) -> Callable[ + [model_monitoring_service.SearchModelMonitoringStatsRequest], + model_monitoring_service.SearchModelMonitoringStatsResponse, + ]: + r"""Return a callable for the search model monitoring stats method over gRPC. + + Searches Model Monitoring Stats generated within a + given time window. + + Returns: + Callable[[~.SearchModelMonitoringStatsRequest], + ~.SearchModelMonitoringStatsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "search_model_monitoring_stats" not in self._stubs: + self._stubs[ + "search_model_monitoring_stats" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.ModelMonitoringService/SearchModelMonitoringStats", + request_serializer=model_monitoring_service.SearchModelMonitoringStatsRequest.serialize, + response_deserializer=model_monitoring_service.SearchModelMonitoringStatsResponse.deserialize, + ) + return self._stubs["search_model_monitoring_stats"] + + @property + def search_model_monitoring_alerts( + self, + ) -> Callable[ + [model_monitoring_service.SearchModelMonitoringAlertsRequest], + model_monitoring_service.SearchModelMonitoringAlertsResponse, + ]: + r"""Return a callable for the search model monitoring alerts method over gRPC. + + Returns the Model Monitoring alerts. + + Returns: + Callable[[~.SearchModelMonitoringAlertsRequest], + ~.SearchModelMonitoringAlertsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "search_model_monitoring_alerts" not in self._stubs: + self._stubs[ + "search_model_monitoring_alerts" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.ModelMonitoringService/SearchModelMonitoringAlerts", + request_serializer=model_monitoring_service.SearchModelMonitoringAlertsRequest.serialize, + response_deserializer=model_monitoring_service.SearchModelMonitoringAlertsResponse.deserialize, + ) + return self._stubs["search_model_monitoring_alerts"] + + def close(self): + self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse + ]: + r"""Return a callable for the list_operations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse + ]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + iam_policy_pb2.TestIamPermissionsResponse, + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ("ModelMonitoringServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..ceeb91f21c --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/transports/grpc_asyncio.py @@ -0,0 +1,800 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1beta1.types import model_monitor +from google.cloud.aiplatform_v1beta1.types import model_monitoring_job +from google.cloud.aiplatform_v1beta1.types import ( + model_monitoring_job as gca_model_monitoring_job, +) +from google.cloud.aiplatform_v1beta1.types import model_monitoring_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from .base import ModelMonitoringServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import ModelMonitoringServiceGrpcTransport + + +class ModelMonitoringServiceGrpcAsyncIOTransport(ModelMonitoringServiceTransport): + """gRPC AsyncIO backend transport for ModelMonitoringService. + + A service for creating and managing Vertex AI Model moitoring. This + includes ``ModelMonitor`` resources, ``ModelMonitoringJob`` + resources. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[aio.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_model_monitor( + self, + ) -> Callable[ + [model_monitoring_service.CreateModelMonitorRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the create model monitor method over gRPC. + + Creates a ModelMonitor. + + Returns: + Callable[[~.CreateModelMonitorRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_model_monitor" not in self._stubs: + self._stubs["create_model_monitor"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.ModelMonitoringService/CreateModelMonitor", + request_serializer=model_monitoring_service.CreateModelMonitorRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_model_monitor"] + + @property + def update_model_monitor( + self, + ) -> Callable[ + [model_monitoring_service.UpdateModelMonitorRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the update model monitor method over gRPC. + + Updates a ModelMonitor. + + Returns: + Callable[[~.UpdateModelMonitorRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_model_monitor" not in self._stubs: + self._stubs["update_model_monitor"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.ModelMonitoringService/UpdateModelMonitor", + request_serializer=model_monitoring_service.UpdateModelMonitorRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["update_model_monitor"] + + @property + def get_model_monitor( + self, + ) -> Callable[ + [model_monitoring_service.GetModelMonitorRequest], + Awaitable[model_monitor.ModelMonitor], + ]: + r"""Return a callable for the get model monitor method over gRPC. + + Gets a ModelMonitor. + + Returns: + Callable[[~.GetModelMonitorRequest], + Awaitable[~.ModelMonitor]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_model_monitor" not in self._stubs: + self._stubs["get_model_monitor"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.ModelMonitoringService/GetModelMonitor", + request_serializer=model_monitoring_service.GetModelMonitorRequest.serialize, + response_deserializer=model_monitor.ModelMonitor.deserialize, + ) + return self._stubs["get_model_monitor"] + + @property + def list_model_monitors( + self, + ) -> Callable[ + [model_monitoring_service.ListModelMonitorsRequest], + Awaitable[model_monitoring_service.ListModelMonitorsResponse], + ]: + r"""Return a callable for the list model monitors method over gRPC. + + Lists ModelMonitors in a Location. + + Returns: + Callable[[~.ListModelMonitorsRequest], + Awaitable[~.ListModelMonitorsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_model_monitors" not in self._stubs: + self._stubs["list_model_monitors"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.ModelMonitoringService/ListModelMonitors", + request_serializer=model_monitoring_service.ListModelMonitorsRequest.serialize, + response_deserializer=model_monitoring_service.ListModelMonitorsResponse.deserialize, + ) + return self._stubs["list_model_monitors"] + + @property + def delete_model_monitor( + self, + ) -> Callable[ + [model_monitoring_service.DeleteModelMonitorRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the delete model monitor method over gRPC. + + Deletes a ModelMonitor. + + Returns: + Callable[[~.DeleteModelMonitorRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_model_monitor" not in self._stubs: + self._stubs["delete_model_monitor"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.ModelMonitoringService/DeleteModelMonitor", + request_serializer=model_monitoring_service.DeleteModelMonitorRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_model_monitor"] + + @property + def create_model_monitoring_job( + self, + ) -> Callable[ + [model_monitoring_service.CreateModelMonitoringJobRequest], + Awaitable[gca_model_monitoring_job.ModelMonitoringJob], + ]: + r"""Return a callable for the create model monitoring job method over gRPC. + + Creates a ModelMonitoringJob. + + Returns: + Callable[[~.CreateModelMonitoringJobRequest], + Awaitable[~.ModelMonitoringJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_model_monitoring_job" not in self._stubs: + self._stubs["create_model_monitoring_job"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.ModelMonitoringService/CreateModelMonitoringJob", + request_serializer=model_monitoring_service.CreateModelMonitoringJobRequest.serialize, + response_deserializer=gca_model_monitoring_job.ModelMonitoringJob.deserialize, + ) + return self._stubs["create_model_monitoring_job"] + + @property + def get_model_monitoring_job( + self, + ) -> Callable[ + [model_monitoring_service.GetModelMonitoringJobRequest], + Awaitable[model_monitoring_job.ModelMonitoringJob], + ]: + r"""Return a callable for the get model monitoring job method over gRPC. + + Gets a ModelMonitoringJob. + + Returns: + Callable[[~.GetModelMonitoringJobRequest], + Awaitable[~.ModelMonitoringJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_model_monitoring_job" not in self._stubs: + self._stubs["get_model_monitoring_job"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.ModelMonitoringService/GetModelMonitoringJob", + request_serializer=model_monitoring_service.GetModelMonitoringJobRequest.serialize, + response_deserializer=model_monitoring_job.ModelMonitoringJob.deserialize, + ) + return self._stubs["get_model_monitoring_job"] + + @property + def list_model_monitoring_jobs( + self, + ) -> Callable[ + [model_monitoring_service.ListModelMonitoringJobsRequest], + Awaitable[model_monitoring_service.ListModelMonitoringJobsResponse], + ]: + r"""Return a callable for the list model monitoring jobs method over gRPC. + + Lists ModelMonitoringJobs. Callers may choose to read across + multiple Monitors as per + `AIP-159 `__ by using '-' (the + hyphen or dash character) as a wildcard character instead of + modelMonitor id in the parent. Format + ``projects/{project_id}/locations/{location}/moodelMonitors/-/modelMonitoringJobs`` + + Returns: + Callable[[~.ListModelMonitoringJobsRequest], + Awaitable[~.ListModelMonitoringJobsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_model_monitoring_jobs" not in self._stubs: + self._stubs["list_model_monitoring_jobs"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.ModelMonitoringService/ListModelMonitoringJobs", + request_serializer=model_monitoring_service.ListModelMonitoringJobsRequest.serialize, + response_deserializer=model_monitoring_service.ListModelMonitoringJobsResponse.deserialize, + ) + return self._stubs["list_model_monitoring_jobs"] + + @property + def delete_model_monitoring_job( + self, + ) -> Callable[ + [model_monitoring_service.DeleteModelMonitoringJobRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the delete model monitoring job method over gRPC. + + Deletes a ModelMonitoringJob. + + Returns: + Callable[[~.DeleteModelMonitoringJobRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_model_monitoring_job" not in self._stubs: + self._stubs["delete_model_monitoring_job"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.ModelMonitoringService/DeleteModelMonitoringJob", + request_serializer=model_monitoring_service.DeleteModelMonitoringJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_model_monitoring_job"] + + @property + def search_model_monitoring_stats( + self, + ) -> Callable[ + [model_monitoring_service.SearchModelMonitoringStatsRequest], + Awaitable[model_monitoring_service.SearchModelMonitoringStatsResponse], + ]: + r"""Return a callable for the search model monitoring stats method over gRPC. + + Searches Model Monitoring Stats generated within a + given time window. + + Returns: + Callable[[~.SearchModelMonitoringStatsRequest], + Awaitable[~.SearchModelMonitoringStatsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "search_model_monitoring_stats" not in self._stubs: + self._stubs[ + "search_model_monitoring_stats" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.ModelMonitoringService/SearchModelMonitoringStats", + request_serializer=model_monitoring_service.SearchModelMonitoringStatsRequest.serialize, + response_deserializer=model_monitoring_service.SearchModelMonitoringStatsResponse.deserialize, + ) + return self._stubs["search_model_monitoring_stats"] + + @property + def search_model_monitoring_alerts( + self, + ) -> Callable[ + [model_monitoring_service.SearchModelMonitoringAlertsRequest], + Awaitable[model_monitoring_service.SearchModelMonitoringAlertsResponse], + ]: + r"""Return a callable for the search model monitoring alerts method over gRPC. + + Returns the Model Monitoring alerts. + + Returns: + Callable[[~.SearchModelMonitoringAlertsRequest], + Awaitable[~.SearchModelMonitoringAlertsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "search_model_monitoring_alerts" not in self._stubs: + self._stubs[ + "search_model_monitoring_alerts" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.ModelMonitoringService/SearchModelMonitoringAlerts", + request_serializer=model_monitoring_service.SearchModelMonitoringAlertsRequest.serialize, + response_deserializer=model_monitoring_service.SearchModelMonitoringAlertsResponse.deserialize, + ) + return self._stubs["search_model_monitoring_alerts"] + + def close(self): + return self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse + ]: + r"""Return a callable for the list_operations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse + ]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + iam_policy_pb2.TestIamPermissionsResponse, + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ("ModelMonitoringServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/transports/rest.py new file mode 100644 index 0000000000..1b03c8763b --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/model_monitoring_service/transports/rest.py @@ -0,0 +1,6487 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import rest_streaming +from google.api_core import path_template +from google.api_core import gapic_v1 + +from google.protobuf import json_format +from google.api_core import operations_v1 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.cloud.location import locations_pb2 # type: ignore +from requests import __version__ as requests_version +import dataclasses +import re +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + + +from google.cloud.aiplatform_v1beta1.types import model_monitor +from google.cloud.aiplatform_v1beta1.types import model_monitoring_job +from google.cloud.aiplatform_v1beta1.types import ( + model_monitoring_job as gca_model_monitoring_job, +) +from google.cloud.aiplatform_v1beta1.types import model_monitoring_service +from google.longrunning import operations_pb2 # type: ignore + +from .base import ( + ModelMonitoringServiceTransport, + DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO, +) + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + + +class ModelMonitoringServiceRestInterceptor: + """Interceptor for ModelMonitoringService. + + Interceptors are used to manipulate requests, request metadata, and responses + in arbitrary ways. + Example use cases include: + * Logging + * Verifying requests according to service or custom semantics + * Stripping extraneous information from responses + + These use cases and more can be enabled by injecting an + instance of a custom subclass when constructing the ModelMonitoringServiceRestTransport. + + .. code-block:: python + class MyCustomModelMonitoringServiceInterceptor(ModelMonitoringServiceRestInterceptor): + def pre_create_model_monitor(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_create_model_monitor(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_create_model_monitoring_job(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_create_model_monitoring_job(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_delete_model_monitor(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_delete_model_monitor(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_delete_model_monitoring_job(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_delete_model_monitoring_job(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_model_monitor(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_model_monitor(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_model_monitoring_job(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_model_monitoring_job(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_model_monitoring_jobs(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_model_monitoring_jobs(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_model_monitors(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_model_monitors(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_search_model_monitoring_alerts(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_search_model_monitoring_alerts(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_search_model_monitoring_stats(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_search_model_monitoring_stats(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_update_model_monitor(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_update_model_monitor(self, response): + logging.log(f"Received response: {response}") + return response + + transport = ModelMonitoringServiceRestTransport(interceptor=MyCustomModelMonitoringServiceInterceptor()) + client = ModelMonitoringServiceClient(transport=transport) + + + """ + + def pre_create_model_monitor( + self, + request: model_monitoring_service.CreateModelMonitorRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + model_monitoring_service.CreateModelMonitorRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for create_model_monitor + + Override in a subclass to manipulate the request or metadata + before they are sent to the ModelMonitoringService server. + """ + return request, metadata + + def post_create_model_monitor( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for create_model_monitor + + Override in a subclass to manipulate the response + after it is returned by the ModelMonitoringService server but before + it is returned to user code. + """ + return response + + def pre_create_model_monitoring_job( + self, + request: model_monitoring_service.CreateModelMonitoringJobRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + model_monitoring_service.CreateModelMonitoringJobRequest, + Sequence[Tuple[str, str]], + ]: + """Pre-rpc interceptor for create_model_monitoring_job + + Override in a subclass to manipulate the request or metadata + before they are sent to the ModelMonitoringService server. + """ + return request, metadata + + def post_create_model_monitoring_job( + self, response: gca_model_monitoring_job.ModelMonitoringJob + ) -> gca_model_monitoring_job.ModelMonitoringJob: + """Post-rpc interceptor for create_model_monitoring_job + + Override in a subclass to manipulate the response + after it is returned by the ModelMonitoringService server but before + it is returned to user code. + """ + return response + + def pre_delete_model_monitor( + self, + request: model_monitoring_service.DeleteModelMonitorRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + model_monitoring_service.DeleteModelMonitorRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for delete_model_monitor + + Override in a subclass to manipulate the request or metadata + before they are sent to the ModelMonitoringService server. + """ + return request, metadata + + def post_delete_model_monitor( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for delete_model_monitor + + Override in a subclass to manipulate the response + after it is returned by the ModelMonitoringService server but before + it is returned to user code. + """ + return response + + def pre_delete_model_monitoring_job( + self, + request: model_monitoring_service.DeleteModelMonitoringJobRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + model_monitoring_service.DeleteModelMonitoringJobRequest, + Sequence[Tuple[str, str]], + ]: + """Pre-rpc interceptor for delete_model_monitoring_job + + Override in a subclass to manipulate the request or metadata + before they are sent to the ModelMonitoringService server. + """ + return request, metadata + + def post_delete_model_monitoring_job( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for delete_model_monitoring_job + + Override in a subclass to manipulate the response + after it is returned by the ModelMonitoringService server but before + it is returned to user code. + """ + return response + + def pre_get_model_monitor( + self, + request: model_monitoring_service.GetModelMonitorRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + model_monitoring_service.GetModelMonitorRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for get_model_monitor + + Override in a subclass to manipulate the request or metadata + before they are sent to the ModelMonitoringService server. + """ + return request, metadata + + def post_get_model_monitor( + self, response: model_monitor.ModelMonitor + ) -> model_monitor.ModelMonitor: + """Post-rpc interceptor for get_model_monitor + + Override in a subclass to manipulate the response + after it is returned by the ModelMonitoringService server but before + it is returned to user code. + """ + return response + + def pre_get_model_monitoring_job( + self, + request: model_monitoring_service.GetModelMonitoringJobRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + model_monitoring_service.GetModelMonitoringJobRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for get_model_monitoring_job + + Override in a subclass to manipulate the request or metadata + before they are sent to the ModelMonitoringService server. + """ + return request, metadata + + def post_get_model_monitoring_job( + self, response: model_monitoring_job.ModelMonitoringJob + ) -> model_monitoring_job.ModelMonitoringJob: + """Post-rpc interceptor for get_model_monitoring_job + + Override in a subclass to manipulate the response + after it is returned by the ModelMonitoringService server but before + it is returned to user code. + """ + return response + + def pre_list_model_monitoring_jobs( + self, + request: model_monitoring_service.ListModelMonitoringJobsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + model_monitoring_service.ListModelMonitoringJobsRequest, + Sequence[Tuple[str, str]], + ]: + """Pre-rpc interceptor for list_model_monitoring_jobs + + Override in a subclass to manipulate the request or metadata + before they are sent to the ModelMonitoringService server. + """ + return request, metadata + + def post_list_model_monitoring_jobs( + self, response: model_monitoring_service.ListModelMonitoringJobsResponse + ) -> model_monitoring_service.ListModelMonitoringJobsResponse: + """Post-rpc interceptor for list_model_monitoring_jobs + + Override in a subclass to manipulate the response + after it is returned by the ModelMonitoringService server but before + it is returned to user code. + """ + return response + + def pre_list_model_monitors( + self, + request: model_monitoring_service.ListModelMonitorsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + model_monitoring_service.ListModelMonitorsRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for list_model_monitors + + Override in a subclass to manipulate the request or metadata + before they are sent to the ModelMonitoringService server. + """ + return request, metadata + + def post_list_model_monitors( + self, response: model_monitoring_service.ListModelMonitorsResponse + ) -> model_monitoring_service.ListModelMonitorsResponse: + """Post-rpc interceptor for list_model_monitors + + Override in a subclass to manipulate the response + after it is returned by the ModelMonitoringService server but before + it is returned to user code. + """ + return response + + def pre_search_model_monitoring_alerts( + self, + request: model_monitoring_service.SearchModelMonitoringAlertsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + model_monitoring_service.SearchModelMonitoringAlertsRequest, + Sequence[Tuple[str, str]], + ]: + """Pre-rpc interceptor for search_model_monitoring_alerts + + Override in a subclass to manipulate the request or metadata + before they are sent to the ModelMonitoringService server. + """ + return request, metadata + + def post_search_model_monitoring_alerts( + self, response: model_monitoring_service.SearchModelMonitoringAlertsResponse + ) -> model_monitoring_service.SearchModelMonitoringAlertsResponse: + """Post-rpc interceptor for search_model_monitoring_alerts + + Override in a subclass to manipulate the response + after it is returned by the ModelMonitoringService server but before + it is returned to user code. + """ + return response + + def pre_search_model_monitoring_stats( + self, + request: model_monitoring_service.SearchModelMonitoringStatsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + model_monitoring_service.SearchModelMonitoringStatsRequest, + Sequence[Tuple[str, str]], + ]: + """Pre-rpc interceptor for search_model_monitoring_stats + + Override in a subclass to manipulate the request or metadata + before they are sent to the ModelMonitoringService server. + """ + return request, metadata + + def post_search_model_monitoring_stats( + self, response: model_monitoring_service.SearchModelMonitoringStatsResponse + ) -> model_monitoring_service.SearchModelMonitoringStatsResponse: + """Post-rpc interceptor for search_model_monitoring_stats + + Override in a subclass to manipulate the response + after it is returned by the ModelMonitoringService server but before + it is returned to user code. + """ + return response + + def pre_update_model_monitor( + self, + request: model_monitoring_service.UpdateModelMonitorRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + model_monitoring_service.UpdateModelMonitorRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for update_model_monitor + + Override in a subclass to manipulate the request or metadata + before they are sent to the ModelMonitoringService server. + """ + return request, metadata + + def post_update_model_monitor( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for update_model_monitor + + Override in a subclass to manipulate the response + after it is returned by the ModelMonitoringService server but before + it is returned to user code. + """ + return response + + def pre_get_location( + self, + request: locations_pb2.GetLocationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[locations_pb2.GetLocationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_location + + Override in a subclass to manipulate the request or metadata + before they are sent to the ModelMonitoringService server. + """ + return request, metadata + + def post_get_location( + self, response: locations_pb2.Location + ) -> locations_pb2.Location: + """Post-rpc interceptor for get_location + + Override in a subclass to manipulate the response + after it is returned by the ModelMonitoringService server but before + it is returned to user code. + """ + return response + + def pre_list_locations( + self, + request: locations_pb2.ListLocationsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[locations_pb2.ListLocationsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_locations + + Override in a subclass to manipulate the request or metadata + before they are sent to the ModelMonitoringService server. + """ + return request, metadata + + def post_list_locations( + self, response: locations_pb2.ListLocationsResponse + ) -> locations_pb2.ListLocationsResponse: + """Post-rpc interceptor for list_locations + + Override in a subclass to manipulate the response + after it is returned by the ModelMonitoringService server but before + it is returned to user code. + """ + return response + + def pre_get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[iam_policy_pb2.GetIamPolicyRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_iam_policy + + Override in a subclass to manipulate the request or metadata + before they are sent to the ModelMonitoringService server. + """ + return request, metadata + + def post_get_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: + """Post-rpc interceptor for get_iam_policy + + Override in a subclass to manipulate the response + after it is returned by the ModelMonitoringService server but before + it is returned to user code. + """ + return response + + def pre_set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[iam_policy_pb2.SetIamPolicyRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for set_iam_policy + + Override in a subclass to manipulate the request or metadata + before they are sent to the ModelMonitoringService server. + """ + return request, metadata + + def post_set_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: + """Post-rpc interceptor for set_iam_policy + + Override in a subclass to manipulate the response + after it is returned by the ModelMonitoringService server but before + it is returned to user code. + """ + return response + + def pre_test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[iam_policy_pb2.TestIamPermissionsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for test_iam_permissions + + Override in a subclass to manipulate the request or metadata + before they are sent to the ModelMonitoringService server. + """ + return request, metadata + + def post_test_iam_permissions( + self, response: iam_policy_pb2.TestIamPermissionsResponse + ) -> iam_policy_pb2.TestIamPermissionsResponse: + """Post-rpc interceptor for test_iam_permissions + + Override in a subclass to manipulate the response + after it is returned by the ModelMonitoringService server but before + it is returned to user code. + """ + return response + + def pre_cancel_operation( + self, + request: operations_pb2.CancelOperationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.CancelOperationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for cancel_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the ModelMonitoringService server. + """ + return request, metadata + + def post_cancel_operation(self, response: None) -> None: + """Post-rpc interceptor for cancel_operation + + Override in a subclass to manipulate the response + after it is returned by the ModelMonitoringService server but before + it is returned to user code. + """ + return response + + def pre_delete_operation( + self, + request: operations_pb2.DeleteOperationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.DeleteOperationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for delete_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the ModelMonitoringService server. + """ + return request, metadata + + def post_delete_operation(self, response: None) -> None: + """Post-rpc interceptor for delete_operation + + Override in a subclass to manipulate the response + after it is returned by the ModelMonitoringService server but before + it is returned to user code. + """ + return response + + def pre_get_operation( + self, + request: operations_pb2.GetOperationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.GetOperationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the ModelMonitoringService server. + """ + return request, metadata + + def post_get_operation( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for get_operation + + Override in a subclass to manipulate the response + after it is returned by the ModelMonitoringService server but before + it is returned to user code. + """ + return response + + def pre_list_operations( + self, + request: operations_pb2.ListOperationsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.ListOperationsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_operations + + Override in a subclass to manipulate the request or metadata + before they are sent to the ModelMonitoringService server. + """ + return request, metadata + + def post_list_operations( + self, response: operations_pb2.ListOperationsResponse + ) -> operations_pb2.ListOperationsResponse: + """Post-rpc interceptor for list_operations + + Override in a subclass to manipulate the response + after it is returned by the ModelMonitoringService server but before + it is returned to user code. + """ + return response + + def pre_wait_operation( + self, + request: operations_pb2.WaitOperationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.WaitOperationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for wait_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the ModelMonitoringService server. + """ + return request, metadata + + def post_wait_operation( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for wait_operation + + Override in a subclass to manipulate the response + after it is returned by the ModelMonitoringService server but before + it is returned to user code. + """ + return response + + +@dataclasses.dataclass +class ModelMonitoringServiceRestStub: + _session: AuthorizedSession + _host: str + _interceptor: ModelMonitoringServiceRestInterceptor + + +class ModelMonitoringServiceRestTransport(ModelMonitoringServiceTransport): + """REST backend transport for ModelMonitoringService. + + A service for creating and managing Vertex AI Model moitoring. This + includes ``ModelMonitor`` resources, ``ModelMonitoringJob`` + resources. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + + NOTE: This REST transport functionality is currently in a beta + state (preview). We welcome your feedback via an issue in this + library's source repository. Thank you! + """ + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + url_scheme: str = "https", + interceptor: Optional[ModelMonitoringServiceRestInterceptor] = None, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + NOTE: This REST transport functionality is currently in a beta + state (preview). We welcome your feedback via a GitHub issue in + this library's repository. Thank you! + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) + if maybe_url_match is None: + raise ValueError( + f"Unexpected hostname structure: {host}" + ) # pragma: NO COVER + + url_match_items = maybe_url_match.groupdict() + + host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host + + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST + ) + self._operations_client: Optional[operations_v1.AbstractOperationsClient] = None + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._interceptor = interceptor or ModelMonitoringServiceRestInterceptor() + self._prep_wrapped_messages(client_info) + + @property + def operations_client(self) -> operations_v1.AbstractOperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Only create a new client if we do not already have one. + if self._operations_client is None: + http_options: Dict[str, List[Dict[str, str]]] = { + "google.longrunning.Operations.CancelOperation": [ + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", + }, + ], + "google.longrunning.Operations.DeleteOperation": [ + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/evaluationTasks/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/solvers/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + ], + "google.longrunning.Operations.GetOperation": [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDeploymentJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/evaluationTasks/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/solvers/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + ], + "google.longrunning.Operations.ListOperations": [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/evaluationTasks/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/solvers/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", + }, + ], + "google.longrunning.Operations.WaitOperation": [ + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/evaluationTasks/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + ], + } + + rest_transport = operations_v1.OperationsRestTransport( + host=self._host, + # use the credentials which are saved + credentials=self._credentials, + scopes=self._scopes, + http_options=http_options, + path_prefix="v1beta1", + ) + + self._operations_client = operations_v1.AbstractOperationsClient( + transport=rest_transport + ) + + # Return the client from cache. + return self._operations_client + + class _CreateModelMonitor(ModelMonitoringServiceRestStub): + def __hash__(self): + return hash("CreateModelMonitor") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: model_monitoring_service.CreateModelMonitorRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the create model monitor method over HTTP. + + Args: + request (~.model_monitoring_service.CreateModelMonitorRequest): + The request object. Request message for + [ModelMonitoringService.CreateModelMonitor][google.cloud.aiplatform.v1beta1.ModelMonitoringService.CreateModelMonitor]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta1/{parent=projects/*/locations/*}/modelMonitors", + "body": "model_monitor", + }, + ] + request, metadata = self._interceptor.pre_create_model_monitor( + request, metadata + ) + pb_request = model_monitoring_service.CreateModelMonitorRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=False + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_model_monitor(resp) + return resp + + class _CreateModelMonitoringJob(ModelMonitoringServiceRestStub): + def __hash__(self): + return hash("CreateModelMonitoringJob") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: model_monitoring_service.CreateModelMonitoringJobRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_model_monitoring_job.ModelMonitoringJob: + r"""Call the create model monitoring + job method over HTTP. + + Args: + request (~.model_monitoring_service.CreateModelMonitoringJobRequest): + The request object. Request message for + [ModelMonitoringService.CreateModelMonitoringJob][google.cloud.aiplatform.v1beta1.ModelMonitoringService.CreateModelMonitoringJob]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.gca_model_monitoring_job.ModelMonitoringJob: + Represents a model monitoring job + that analyze dataset using different + monitoring algorithm. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta1/{parent=projects/*/locations/*/modelMonitors/*}/modelMonitoringJobs", + "body": "model_monitoring_job", + }, + ] + request, metadata = self._interceptor.pre_create_model_monitoring_job( + request, metadata + ) + pb_request = model_monitoring_service.CreateModelMonitoringJobRequest.pb( + request + ) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=False + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = gca_model_monitoring_job.ModelMonitoringJob() + pb_resp = gca_model_monitoring_job.ModelMonitoringJob.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_model_monitoring_job(resp) + return resp + + class _DeleteModelMonitor(ModelMonitoringServiceRestStub): + def __hash__(self): + return hash("DeleteModelMonitor") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: model_monitoring_service.DeleteModelMonitorRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the delete model monitor method over HTTP. + + Args: + request (~.model_monitoring_service.DeleteModelMonitorRequest): + The request object. Request message for + [ModelMonitoringService.DeleteModelMonitor][google.cloud.aiplatform.v1beta1.ModelMonitoringService.DeleteModelMonitor]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*}", + }, + ] + request, metadata = self._interceptor.pre_delete_model_monitor( + request, metadata + ) + pb_request = model_monitoring_service.DeleteModelMonitorRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_delete_model_monitor(resp) + return resp + + class _DeleteModelMonitoringJob(ModelMonitoringServiceRestStub): + def __hash__(self): + return hash("DeleteModelMonitoringJob") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: model_monitoring_service.DeleteModelMonitoringJobRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the delete model monitoring + job method over HTTP. + + Args: + request (~.model_monitoring_service.DeleteModelMonitoringJobRequest): + The request object. Request message for + [ModelMonitoringService.DeleteModelMonitoringJob][google.cloud.aiplatform.v1beta1.ModelMonitoringService.DeleteModelMonitoringJob]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*/modelMonitoringJobs/*}", + }, + ] + request, metadata = self._interceptor.pre_delete_model_monitoring_job( + request, metadata + ) + pb_request = model_monitoring_service.DeleteModelMonitoringJobRequest.pb( + request + ) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_delete_model_monitoring_job(resp) + return resp + + class _GetModelMonitor(ModelMonitoringServiceRestStub): + def __hash__(self): + return hash("GetModelMonitor") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: model_monitoring_service.GetModelMonitorRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model_monitor.ModelMonitor: + r"""Call the get model monitor method over HTTP. + + Args: + request (~.model_monitoring_service.GetModelMonitorRequest): + The request object. Request message for + [ModelMonitoringService.GetModelMonitor][google.cloud.aiplatform.v1beta1.ModelMonitoringService.GetModelMonitor]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.model_monitor.ModelMonitor: + Vertex AI Model Monitoring Service + serves as a central hub for the analysis + and visualization of data quality and + performance related to models. + ModelMonitor stands as a top level + resource for overseeing your model + monitoring tasks. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*}", + }, + ] + request, metadata = self._interceptor.pre_get_model_monitor( + request, metadata + ) + pb_request = model_monitoring_service.GetModelMonitorRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = model_monitor.ModelMonitor() + pb_resp = model_monitor.ModelMonitor.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_model_monitor(resp) + return resp + + class _GetModelMonitoringJob(ModelMonitoringServiceRestStub): + def __hash__(self): + return hash("GetModelMonitoringJob") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: model_monitoring_service.GetModelMonitoringJobRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model_monitoring_job.ModelMonitoringJob: + r"""Call the get model monitoring job method over HTTP. + + Args: + request (~.model_monitoring_service.GetModelMonitoringJobRequest): + The request object. Request message for + [ModelMonitoringService.GetModelMonitoringJob][google.cloud.aiplatform.v1beta1.ModelMonitoringService.GetModelMonitoringJob]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.model_monitoring_job.ModelMonitoringJob: + Represents a model monitoring job + that analyze dataset using different + monitoring algorithm. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*/modelMonitoringJobs/*}", + }, + ] + request, metadata = self._interceptor.pre_get_model_monitoring_job( + request, metadata + ) + pb_request = model_monitoring_service.GetModelMonitoringJobRequest.pb( + request + ) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = model_monitoring_job.ModelMonitoringJob() + pb_resp = model_monitoring_job.ModelMonitoringJob.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_model_monitoring_job(resp) + return resp + + class _ListModelMonitoringJobs(ModelMonitoringServiceRestStub): + def __hash__(self): + return hash("ListModelMonitoringJobs") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: model_monitoring_service.ListModelMonitoringJobsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model_monitoring_service.ListModelMonitoringJobsResponse: + r"""Call the list model monitoring + jobs method over HTTP. + + Args: + request (~.model_monitoring_service.ListModelMonitoringJobsRequest): + The request object. Request message for + [ModelMonitoringService.ListModelMonitoringJobs][google.cloud.aiplatform.v1beta1.ModelMonitoringService.ListModelMonitoringJobs]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.model_monitoring_service.ListModelMonitoringJobsResponse: + Response message for + [ModelMonitoringService.ListModelMonitoringJobs][google.cloud.aiplatform.v1beta1.ModelMonitoringService.ListModelMonitoringJobs]. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1beta1/{parent=projects/*/locations/*/modelMonitors/*}/modelMonitoringJobs", + }, + ] + request, metadata = self._interceptor.pre_list_model_monitoring_jobs( + request, metadata + ) + pb_request = model_monitoring_service.ListModelMonitoringJobsRequest.pb( + request + ) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = model_monitoring_service.ListModelMonitoringJobsResponse() + pb_resp = model_monitoring_service.ListModelMonitoringJobsResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_model_monitoring_jobs(resp) + return resp + + class _ListModelMonitors(ModelMonitoringServiceRestStub): + def __hash__(self): + return hash("ListModelMonitors") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: model_monitoring_service.ListModelMonitorsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model_monitoring_service.ListModelMonitorsResponse: + r"""Call the list model monitors method over HTTP. + + Args: + request (~.model_monitoring_service.ListModelMonitorsRequest): + The request object. Request message for + [ModelMonitoringService.ListModelMonitors][google.cloud.aiplatform.v1beta1.ModelMonitoringService.ListModelMonitors]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.model_monitoring_service.ListModelMonitorsResponse: + Response message for + [ModelMonitoringService.ListModelMonitors][google.cloud.aiplatform.v1beta1.ModelMonitoringService.ListModelMonitors] + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1beta1/{parent=projects/*/locations/*}/modelMonitors", + }, + ] + request, metadata = self._interceptor.pre_list_model_monitors( + request, metadata + ) + pb_request = model_monitoring_service.ListModelMonitorsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = model_monitoring_service.ListModelMonitorsResponse() + pb_resp = model_monitoring_service.ListModelMonitorsResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_model_monitors(resp) + return resp + + class _SearchModelMonitoringAlerts(ModelMonitoringServiceRestStub): + def __hash__(self): + return hash("SearchModelMonitoringAlerts") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: model_monitoring_service.SearchModelMonitoringAlertsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model_monitoring_service.SearchModelMonitoringAlertsResponse: + r"""Call the search model monitoring + alerts method over HTTP. + + Args: + request (~.model_monitoring_service.SearchModelMonitoringAlertsRequest): + The request object. Request message for + [ModelMonitoringService.SearchModelMonitoringAlerts][google.cloud.aiplatform.v1beta1.ModelMonitoringService.SearchModelMonitoringAlerts]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.model_monitoring_service.SearchModelMonitoringAlertsResponse: + Response message for + [ModelMonitoringService.SearchModelMonitoringAlerts][google.cloud.aiplatform.v1beta1.ModelMonitoringService.SearchModelMonitoringAlerts]. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta1/{model_monitor=projects/*/locations/*/modelMonitors/*}:searchModelMonitoringAlerts", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_search_model_monitoring_alerts( + request, metadata + ) + pb_request = model_monitoring_service.SearchModelMonitoringAlertsRequest.pb( + request + ) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=False + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = model_monitoring_service.SearchModelMonitoringAlertsResponse() + pb_resp = model_monitoring_service.SearchModelMonitoringAlertsResponse.pb( + resp + ) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_search_model_monitoring_alerts(resp) + return resp + + class _SearchModelMonitoringStats(ModelMonitoringServiceRestStub): + def __hash__(self): + return hash("SearchModelMonitoringStats") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: model_monitoring_service.SearchModelMonitoringStatsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model_monitoring_service.SearchModelMonitoringStatsResponse: + r"""Call the search model monitoring + stats method over HTTP. + + Args: + request (~.model_monitoring_service.SearchModelMonitoringStatsRequest): + The request object. Request message for + [ModelMonitoringService.SearchModelMonitoringStats][google.cloud.aiplatform.v1beta1.ModelMonitoringService.SearchModelMonitoringStats]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.model_monitoring_service.SearchModelMonitoringStatsResponse: + Response message for + [ModelMonitoringService.SearchModelMonitoringStats][google.cloud.aiplatform.v1beta1.ModelMonitoringService.SearchModelMonitoringStats]. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta1/{model_monitor=projects/*/locations/*/modelMonitors/*}:searchModelMonitoringStats", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_search_model_monitoring_stats( + request, metadata + ) + pb_request = model_monitoring_service.SearchModelMonitoringStatsRequest.pb( + request + ) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=False + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = model_monitoring_service.SearchModelMonitoringStatsResponse() + pb_resp = model_monitoring_service.SearchModelMonitoringStatsResponse.pb( + resp + ) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_search_model_monitoring_stats(resp) + return resp + + class _UpdateModelMonitor(ModelMonitoringServiceRestStub): + def __hash__(self): + return hash("UpdateModelMonitor") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "updateMask": {}, + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: model_monitoring_service.UpdateModelMonitorRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the update model monitor method over HTTP. + + Args: + request (~.model_monitoring_service.UpdateModelMonitorRequest): + The request object. Request message for + [ModelMonitoringService.UpdateModelMonitor][google.cloud.aiplatform.v1beta1.ModelMonitoringService.UpdateModelMonitor]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "patch", + "uri": "/v1beta1/{model_monitor.name=projects/*/locations/*/modelMonitors/*}", + "body": "model_monitor", + }, + ] + request, metadata = self._interceptor.pre_update_model_monitor( + request, metadata + ) + pb_request = model_monitoring_service.UpdateModelMonitorRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=False + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_update_model_monitor(resp) + return resp + + @property + def create_model_monitor( + self, + ) -> Callable[ + [model_monitoring_service.CreateModelMonitorRequest], operations_pb2.Operation + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CreateModelMonitor(self._session, self._host, self._interceptor) # type: ignore + + @property + def create_model_monitoring_job( + self, + ) -> Callable[ + [model_monitoring_service.CreateModelMonitoringJobRequest], + gca_model_monitoring_job.ModelMonitoringJob, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CreateModelMonitoringJob(self._session, self._host, self._interceptor) # type: ignore + + @property + def delete_model_monitor( + self, + ) -> Callable[ + [model_monitoring_service.DeleteModelMonitorRequest], operations_pb2.Operation + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeleteModelMonitor(self._session, self._host, self._interceptor) # type: ignore + + @property + def delete_model_monitoring_job( + self, + ) -> Callable[ + [model_monitoring_service.DeleteModelMonitoringJobRequest], + operations_pb2.Operation, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeleteModelMonitoringJob(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_model_monitor( + self, + ) -> Callable[ + [model_monitoring_service.GetModelMonitorRequest], model_monitor.ModelMonitor + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetModelMonitor(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_model_monitoring_job( + self, + ) -> Callable[ + [model_monitoring_service.GetModelMonitoringJobRequest], + model_monitoring_job.ModelMonitoringJob, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetModelMonitoringJob(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_model_monitoring_jobs( + self, + ) -> Callable[ + [model_monitoring_service.ListModelMonitoringJobsRequest], + model_monitoring_service.ListModelMonitoringJobsResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListModelMonitoringJobs(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_model_monitors( + self, + ) -> Callable[ + [model_monitoring_service.ListModelMonitorsRequest], + model_monitoring_service.ListModelMonitorsResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListModelMonitors(self._session, self._host, self._interceptor) # type: ignore + + @property + def search_model_monitoring_alerts( + self, + ) -> Callable[ + [model_monitoring_service.SearchModelMonitoringAlertsRequest], + model_monitoring_service.SearchModelMonitoringAlertsResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._SearchModelMonitoringAlerts(self._session, self._host, self._interceptor) # type: ignore + + @property + def search_model_monitoring_stats( + self, + ) -> Callable[ + [model_monitoring_service.SearchModelMonitoringStatsRequest], + model_monitoring_service.SearchModelMonitoringStatsResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._SearchModelMonitoringStats(self._session, self._host, self._interceptor) # type: ignore + + @property + def update_model_monitor( + self, + ) -> Callable[ + [model_monitoring_service.UpdateModelMonitorRequest], operations_pb2.Operation + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._UpdateModelMonitor(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_location(self): + return self._GetLocation(self._session, self._host, self._interceptor) # type: ignore + + class _GetLocation(ModelMonitoringServiceRestStub): + def __call__( + self, + request: locations_pb2.GetLocationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + + r"""Call the get location method over HTTP. + + Args: + request (locations_pb2.GetLocationRequest): + The request object for GetLocation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + locations_pb2.Location: Response from GetLocation method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*}", + }, + ] + + request, metadata = self._interceptor.pre_get_location(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = locations_pb2.Location() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_get_location(resp) + return resp + + @property + def list_locations(self): + return self._ListLocations(self._session, self._host, self._interceptor) # type: ignore + + class _ListLocations(ModelMonitoringServiceRestStub): + def __call__( + self, + request: locations_pb2.ListLocationsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + + r"""Call the list locations method over HTTP. + + Args: + request (locations_pb2.ListLocationsRequest): + The request object for ListLocations method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + locations_pb2.ListLocationsResponse: Response from ListLocations method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*}/locations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*}/locations", + }, + ] + + request, metadata = self._interceptor.pre_list_locations(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = locations_pb2.ListLocationsResponse() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_list_locations(resp) + return resp + + @property + def get_iam_policy(self): + return self._GetIamPolicy(self._session, self._host, self._interceptor) # type: ignore + + class _GetIamPolicy(ModelMonitoringServiceRestStub): + def __call__( + self, + request: iam_policy_pb2.GetIamPolicyRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + + r"""Call the get iam policy method over HTTP. + + Args: + request (iam_policy_pb2.GetIamPolicyRequest): + The request object for GetIamPolicy method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + policy_pb2.Policy: Response from GetIamPolicy method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featurestores/*}:getIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/models/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/endpoints/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/publishers/*/models/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featureOnlineStores/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/models/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/endpoints/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/publishers/*/models/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:getIamPolicy", + }, + ] + + request, metadata = self._interceptor.pre_get_iam_policy(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + body = json.dumps(transcoded_request["body"]) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = policy_pb2.Policy() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_get_iam_policy(resp) + return resp + + @property + def set_iam_policy(self): + return self._SetIamPolicy(self._session, self._host, self._interceptor) # type: ignore + + class _SetIamPolicy(ModelMonitoringServiceRestStub): + def __call__( + self, + request: iam_policy_pb2.SetIamPolicyRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + + r"""Call the set iam policy method over HTTP. + + Args: + request (iam_policy_pb2.SetIamPolicyRequest): + The request object for SetIamPolicy method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + policy_pb2.Policy: Response from SetIamPolicy method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featurestores/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/models/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/endpoints/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featureOnlineStores/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/models/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/endpoints/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:setIamPolicy", + "body": "*", + }, + ] + + request, metadata = self._interceptor.pre_set_iam_policy(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + body = json.dumps(transcoded_request["body"]) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = policy_pb2.Policy() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_set_iam_policy(resp) + return resp + + @property + def test_iam_permissions(self): + return self._TestIamPermissions(self._session, self._host, self._interceptor) # type: ignore + + class _TestIamPermissions(ModelMonitoringServiceRestStub): + def __call__( + self, + request: iam_policy_pb2.TestIamPermissionsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + + r"""Call the test iam permissions method over HTTP. + + Args: + request (iam_policy_pb2.TestIamPermissionsRequest): + The request object for TestIamPermissions method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + iam_policy_pb2.TestIamPermissionsResponse: Response from TestIamPermissions method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featurestores/*}:testIamPermissions", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/models/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/endpoints/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featureOnlineStores/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/models/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/endpoints/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:testIamPermissions", + }, + ] + + request, metadata = self._interceptor.pre_test_iam_permissions( + request, metadata + ) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + body = json.dumps(transcoded_request["body"]) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = iam_policy_pb2.TestIamPermissionsResponse() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_test_iam_permissions(resp) + return resp + + @property + def cancel_operation(self): + return self._CancelOperation(self._session, self._host, self._interceptor) # type: ignore + + class _CancelOperation(ModelMonitoringServiceRestStub): + def __call__( + self, + request: operations_pb2.CancelOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + + r"""Call the cancel operation method over HTTP. + + Args: + request (operations_pb2.CancelOperationRequest): + The request object for CancelOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", + }, + ] + + request, metadata = self._interceptor.pre_cancel_operation( + request, metadata + ) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + return self._interceptor.post_cancel_operation(None) + + @property + def delete_operation(self): + return self._DeleteOperation(self._session, self._host, self._interceptor) # type: ignore + + class _DeleteOperation(ModelMonitoringServiceRestStub): + def __call__( + self, + request: operations_pb2.DeleteOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + + r"""Call the delete operation method over HTTP. + + Args: + request (operations_pb2.DeleteOperationRequest): + The request object for DeleteOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/evaluationTasks/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/solvers/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + ] + + request, metadata = self._interceptor.pre_delete_operation( + request, metadata + ) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + return self._interceptor.post_delete_operation(None) + + @property + def get_operation(self): + return self._GetOperation(self._session, self._host, self._interceptor) # type: ignore + + class _GetOperation(ModelMonitoringServiceRestStub): + def __call__( + self, + request: operations_pb2.GetOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + + r"""Call the get operation method over HTTP. + + Args: + request (operations_pb2.GetOperationRequest): + The request object for GetOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + operations_pb2.Operation: Response from GetOperation method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDeploymentJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/evaluationTasks/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/solvers/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + ] + + request, metadata = self._interceptor.pre_get_operation(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = operations_pb2.Operation() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_get_operation(resp) + return resp + + @property + def list_operations(self): + return self._ListOperations(self._session, self._host, self._interceptor) # type: ignore + + class _ListOperations(ModelMonitoringServiceRestStub): + def __call__( + self, + request: operations_pb2.ListOperationsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + + r"""Call the list operations method over HTTP. + + Args: + request (operations_pb2.ListOperationsRequest): + The request object for ListOperations method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + operations_pb2.ListOperationsResponse: Response from ListOperations method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/evaluationTasks/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/solvers/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", + }, + ] + + request, metadata = self._interceptor.pre_list_operations(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = operations_pb2.ListOperationsResponse() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_list_operations(resp) + return resp + + @property + def wait_operation(self): + return self._WaitOperation(self._session, self._host, self._interceptor) # type: ignore + + class _WaitOperation(ModelMonitoringServiceRestStub): + def __call__( + self, + request: operations_pb2.WaitOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + + r"""Call the wait operation method over HTTP. + + Args: + request (operations_pb2.WaitOperationRequest): + The request object for WaitOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + operations_pb2.Operation: Response from WaitOperation method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/evaluationTasks/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + ] + + request, metadata = self._interceptor.pre_wait_operation(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = operations_pb2.Operation() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_wait_operation(resp) + return resp + + @property + def kind(self) -> str: + return "rest" + + def close(self): + self._session.close() + + +__all__ = ("ModelMonitoringServiceRestTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/schedule_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/schedule_service/async_client.py index f83959b393..7c5a077e74 100644 --- a/google/cloud/aiplatform_v1beta1/services/schedule_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/schedule_service/async_client.py @@ -45,6 +45,7 @@ from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1beta1.services.schedule_service import pagers +from google.cloud.aiplatform_v1beta1.types import model_monitoring_service from google.cloud.aiplatform_v1beta1.types import operation as gca_operation from google.cloud.aiplatform_v1beta1.types import pipeline_service from google.cloud.aiplatform_v1beta1.types import schedule @@ -79,12 +80,32 @@ class ScheduleServiceAsyncClient: artifact_path = staticmethod(ScheduleServiceClient.artifact_path) parse_artifact_path = staticmethod(ScheduleServiceClient.parse_artifact_path) + batch_prediction_job_path = staticmethod( + ScheduleServiceClient.batch_prediction_job_path + ) + parse_batch_prediction_job_path = staticmethod( + ScheduleServiceClient.parse_batch_prediction_job_path + ) context_path = staticmethod(ScheduleServiceClient.context_path) parse_context_path = staticmethod(ScheduleServiceClient.parse_context_path) custom_job_path = staticmethod(ScheduleServiceClient.custom_job_path) parse_custom_job_path = staticmethod(ScheduleServiceClient.parse_custom_job_path) + dataset_path = staticmethod(ScheduleServiceClient.dataset_path) + parse_dataset_path = staticmethod(ScheduleServiceClient.parse_dataset_path) + endpoint_path = staticmethod(ScheduleServiceClient.endpoint_path) + parse_endpoint_path = staticmethod(ScheduleServiceClient.parse_endpoint_path) execution_path = staticmethod(ScheduleServiceClient.execution_path) parse_execution_path = staticmethod(ScheduleServiceClient.parse_execution_path) + model_monitor_path = staticmethod(ScheduleServiceClient.model_monitor_path) + parse_model_monitor_path = staticmethod( + ScheduleServiceClient.parse_model_monitor_path + ) + model_monitoring_job_path = staticmethod( + ScheduleServiceClient.model_monitoring_job_path + ) + parse_model_monitoring_job_path = staticmethod( + ScheduleServiceClient.parse_model_monitoring_job_path + ) network_path = staticmethod(ScheduleServiceClient.network_path) parse_network_path = staticmethod(ScheduleServiceClient.parse_network_path) pipeline_job_path = staticmethod(ScheduleServiceClient.pipeline_job_path) diff --git a/google/cloud/aiplatform_v1beta1/services/schedule_service/client.py b/google/cloud/aiplatform_v1beta1/services/schedule_service/client.py index 8f529775db..f6ff65c41d 100644 --- a/google/cloud/aiplatform_v1beta1/services/schedule_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/schedule_service/client.py @@ -50,6 +50,7 @@ from google.api_core import operation as gac_operation # type: ignore from google.api_core import operation_async # type: ignore from google.cloud.aiplatform_v1beta1.services.schedule_service import pagers +from google.cloud.aiplatform_v1beta1.types import model_monitoring_service from google.cloud.aiplatform_v1beta1.types import operation as gca_operation from google.cloud.aiplatform_v1beta1.types import pipeline_service from google.cloud.aiplatform_v1beta1.types import schedule @@ -221,6 +222,28 @@ def parse_artifact_path(path: str) -> Dict[str, str]: ) return m.groupdict() if m else {} + @staticmethod + def batch_prediction_job_path( + project: str, + location: str, + batch_prediction_job: str, + ) -> str: + """Returns a fully-qualified batch_prediction_job string.""" + return "projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}".format( + project=project, + location=location, + batch_prediction_job=batch_prediction_job, + ) + + @staticmethod + def parse_batch_prediction_job_path(path: str) -> Dict[str, str]: + """Parses a batch_prediction_job path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/batchPredictionJobs/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def context_path( project: str, @@ -267,6 +290,50 @@ def parse_custom_job_path(path: str) -> Dict[str, str]: ) return m.groupdict() if m else {} + @staticmethod + def dataset_path( + project: str, + location: str, + dataset: str, + ) -> str: + """Returns a fully-qualified dataset string.""" + return "projects/{project}/locations/{location}/datasets/{dataset}".format( + project=project, + location=location, + dataset=dataset, + ) + + @staticmethod + def parse_dataset_path(path: str) -> Dict[str, str]: + """Parses a dataset path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def endpoint_path( + project: str, + location: str, + endpoint: str, + ) -> str: + """Returns a fully-qualified endpoint string.""" + return "projects/{project}/locations/{location}/endpoints/{endpoint}".format( + project=project, + location=location, + endpoint=endpoint, + ) + + @staticmethod + def parse_endpoint_path(path: str) -> Dict[str, str]: + """Parses a endpoint path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def execution_path( project: str, @@ -291,6 +358,52 @@ def parse_execution_path(path: str) -> Dict[str, str]: ) return m.groupdict() if m else {} + @staticmethod + def model_monitor_path( + project: str, + location: str, + model_monitor: str, + ) -> str: + """Returns a fully-qualified model_monitor string.""" + return "projects/{project}/locations/{location}/modelMonitors/{model_monitor}".format( + project=project, + location=location, + model_monitor=model_monitor, + ) + + @staticmethod + def parse_model_monitor_path(path: str) -> Dict[str, str]: + """Parses a model_monitor path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/modelMonitors/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def model_monitoring_job_path( + project: str, + location: str, + model_monitor: str, + model_monitoring_job: str, + ) -> str: + """Returns a fully-qualified model_monitoring_job string.""" + return "projects/{project}/locations/{location}/modelMonitors/{model_monitor}/modelMonitoringJobs/{model_monitoring_job}".format( + project=project, + location=location, + model_monitor=model_monitor, + model_monitoring_job=model_monitoring_job, + ) + + @staticmethod + def parse_model_monitoring_job_path(path: str) -> Dict[str, str]: + """Parses a model_monitoring_job path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/modelMonitors/(?P.+?)/modelMonitoringJobs/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def network_path( project: str, diff --git a/google/cloud/aiplatform_v1beta1/types/__init__.py b/google/cloud/aiplatform_v1beta1/types/__init__.py index 230f2b0478..d16c74107a 100644 --- a/google/cloud/aiplatform_v1beta1/types/__init__.py +++ b/google/cloud/aiplatform_v1beta1/types/__init__.py @@ -686,6 +686,10 @@ ListPublisherModelsResponse, PublisherModelView, ) +from .model_monitor import ( + ModelMonitor, + ModelMonitoringSchema, +) from .model_monitoring import ( ModelMonitoringAlertConfig, ModelMonitoringConfig, @@ -693,6 +697,47 @@ SamplingStrategy, ThresholdConfig, ) +from .model_monitoring_alert import ( + ModelMonitoringAlert, + ModelMonitoringAlertCondition, + ModelMonitoringAnomaly, +) +from .model_monitoring_job import ( + ModelMonitoringJob, + ModelMonitoringJobExecutionDetail, +) +from .model_monitoring_service import ( + CreateModelMonitoringJobRequest, + CreateModelMonitorOperationMetadata, + CreateModelMonitorRequest, + DeleteModelMonitoringJobRequest, + DeleteModelMonitorRequest, + GetModelMonitoringJobRequest, + GetModelMonitorRequest, + ListModelMonitoringJobsRequest, + ListModelMonitoringJobsResponse, + ListModelMonitorsRequest, + ListModelMonitorsResponse, + SearchModelMonitoringAlertsRequest, + SearchModelMonitoringAlertsResponse, + SearchModelMonitoringStatsRequest, + SearchModelMonitoringStatsResponse, + UpdateModelMonitorOperationMetadata, + UpdateModelMonitorRequest, +) +from .model_monitoring_spec import ( + ModelMonitoringInput, + ModelMonitoringNotificationSpec, + ModelMonitoringObjectiveSpec, + ModelMonitoringOutputSpec, + ModelMonitoringSpec, +) +from .model_monitoring_stats import ( + ModelMonitoringStats, + ModelMonitoringStatsDataPoint, + ModelMonitoringTabularStats, + SearchModelMonitoringStatsFilter, +) from .model_service import ( BatchImportEvaluatedAnnotationsRequest, BatchImportEvaluatedAnnotationsResponse, @@ -1613,11 +1658,44 @@ "ListPublisherModelsRequest", "ListPublisherModelsResponse", "PublisherModelView", + "ModelMonitor", + "ModelMonitoringSchema", "ModelMonitoringAlertConfig", "ModelMonitoringConfig", "ModelMonitoringObjectiveConfig", "SamplingStrategy", "ThresholdConfig", + "ModelMonitoringAlert", + "ModelMonitoringAlertCondition", + "ModelMonitoringAnomaly", + "ModelMonitoringJob", + "ModelMonitoringJobExecutionDetail", + "CreateModelMonitoringJobRequest", + "CreateModelMonitorOperationMetadata", + "CreateModelMonitorRequest", + "DeleteModelMonitoringJobRequest", + "DeleteModelMonitorRequest", + "GetModelMonitoringJobRequest", + "GetModelMonitorRequest", + "ListModelMonitoringJobsRequest", + "ListModelMonitoringJobsResponse", + "ListModelMonitorsRequest", + "ListModelMonitorsResponse", + "SearchModelMonitoringAlertsRequest", + "SearchModelMonitoringAlertsResponse", + "SearchModelMonitoringStatsRequest", + "SearchModelMonitoringStatsResponse", + "UpdateModelMonitorOperationMetadata", + "UpdateModelMonitorRequest", + "ModelMonitoringInput", + "ModelMonitoringNotificationSpec", + "ModelMonitoringObjectiveSpec", + "ModelMonitoringOutputSpec", + "ModelMonitoringSpec", + "ModelMonitoringStats", + "ModelMonitoringStatsDataPoint", + "ModelMonitoringTabularStats", + "SearchModelMonitoringStatsFilter", "BatchImportEvaluatedAnnotationsRequest", "BatchImportEvaluatedAnnotationsResponse", "BatchImportModelEvaluationSlicesRequest", diff --git a/google/cloud/aiplatform_v1beta1/types/model_monitor.py b/google/cloud/aiplatform_v1beta1/types/model_monitor.py new file mode 100644 index 0000000000..33ced09c7d --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/model_monitor.py @@ -0,0 +1,295 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import explanation +from google.cloud.aiplatform_v1beta1.types import model_monitoring_spec +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", + manifest={ + "ModelMonitor", + "ModelMonitoringSchema", + }, +) + + +class ModelMonitor(proto.Message): + r"""Vertex AI Model Monitoring Service serves as a central hub + for the analysis and visualization of data quality and + performance related to models. ModelMonitor stands as a top + level resource for overseeing your model monitoring tasks. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + tabular_objective (google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveSpec.TabularObjective): + Optional default tabular model monitoring + objective. + + This field is a member of `oneof`_ ``default_objective``. + name (str): + Immutable. Resource name of the ModelMonitor. Format: + ``projects/{project}/locations/{location}/modelMonitors/{model_monitor}``. + display_name (str): + The display name of the ModelMonitor. + The name can be up to 128 characters long and + can consist of any UTF-8. + model_monitoring_target (google.cloud.aiplatform_v1beta1.types.ModelMonitor.ModelMonitoringTarget): + The entity that is subject to analysis. + Currently only models in Vertex AI Model + Registry are supported. If you want to analyze + the model which is outside the Vertex AI, you + could register a model in Vertex AI Model + Registry using just a display name. + training_dataset (google.cloud.aiplatform_v1beta1.types.ModelMonitoringInput): + Optional training dataset used to train the + model. It can serve as a reference dataset to + identify changes in production. + notification_spec (google.cloud.aiplatform_v1beta1.types.ModelMonitoringNotificationSpec): + Optional default notification spec, it can be + overridden in the ModelMonitoringJob + notification spec. + output_spec (google.cloud.aiplatform_v1beta1.types.ModelMonitoringOutputSpec): + Optional default monitoring metrics/logs + export spec, it can be overridden in the + ModelMonitoringJob output spec. If not + specified, a default Google Cloud Storage bucket + will be created under your project. + explanation_spec (google.cloud.aiplatform_v1beta1.types.ExplanationSpec): + Optional model explanation spec. It is used + for feature attribution monitoring. + model_monitoring_schema (google.cloud.aiplatform_v1beta1.types.ModelMonitoringSchema): + Monitoring Schema is to specify the model's + features, prediction outputs and ground truth + properties. It is used to extract pertinent data + from the dataset and to process features based + on their properties. Make sure that the schema + aligns with your dataset, if it does not, we + will be unable to extract data from the dataset. + It is required for most models, but optional for + Vertex AI AutoML Tables unless the schem + information is not available. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this ModelMonitor + was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this ModelMonitor + was updated most recently. + """ + + class ModelMonitoringTarget(proto.Message): + r"""The monitoring target refers to the entity that is subject to + analysis. e.g. Vertex AI Model version. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + vertex_model (google.cloud.aiplatform_v1beta1.types.ModelMonitor.ModelMonitoringTarget.VertexModelSource): + Model in Vertex AI Model Registry. + + This field is a member of `oneof`_ ``source``. + """ + + class VertexModelSource(proto.Message): + r"""Model in Vertex AI Model Registry. + + Attributes: + model (str): + Model resource name. Format: + + projects/{project}/locations/{location}/models/{model}. + model_version_id (str): + Model version id. + """ + + model: str = proto.Field( + proto.STRING, + number=1, + ) + model_version_id: str = proto.Field( + proto.STRING, + number=2, + ) + + vertex_model: "ModelMonitor.ModelMonitoringTarget.VertexModelSource" = ( + proto.Field( + proto.MESSAGE, + number=1, + oneof="source", + message="ModelMonitor.ModelMonitoringTarget.VertexModelSource", + ) + ) + + tabular_objective: model_monitoring_spec.ModelMonitoringObjectiveSpec.TabularObjective = proto.Field( + proto.MESSAGE, + number=11, + oneof="default_objective", + message=model_monitoring_spec.ModelMonitoringObjectiveSpec.TabularObjective, + ) + name: str = proto.Field( + proto.STRING, + number=1, + ) + display_name: str = proto.Field( + proto.STRING, + number=2, + ) + model_monitoring_target: ModelMonitoringTarget = proto.Field( + proto.MESSAGE, + number=3, + message=ModelMonitoringTarget, + ) + training_dataset: model_monitoring_spec.ModelMonitoringInput = proto.Field( + proto.MESSAGE, + number=10, + message=model_monitoring_spec.ModelMonitoringInput, + ) + notification_spec: model_monitoring_spec.ModelMonitoringNotificationSpec = ( + proto.Field( + proto.MESSAGE, + number=12, + message=model_monitoring_spec.ModelMonitoringNotificationSpec, + ) + ) + output_spec: model_monitoring_spec.ModelMonitoringOutputSpec = proto.Field( + proto.MESSAGE, + number=13, + message=model_monitoring_spec.ModelMonitoringOutputSpec, + ) + explanation_spec: explanation.ExplanationSpec = proto.Field( + proto.MESSAGE, + number=16, + message=explanation.ExplanationSpec, + ) + model_monitoring_schema: "ModelMonitoringSchema" = proto.Field( + proto.MESSAGE, + number=9, + message="ModelMonitoringSchema", + ) + create_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=6, + message=timestamp_pb2.Timestamp, + ) + update_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=7, + message=timestamp_pb2.Timestamp, + ) + + +class ModelMonitoringSchema(proto.Message): + r"""The Model Monitoring Schema definition. + + Attributes: + feature_fields (MutableSequence[google.cloud.aiplatform_v1beta1.types.ModelMonitoringSchema.FieldSchema]): + Feature names of the model. Vertex AI will try to match the + features from your dataset as follows: + + - For 'csv' files, the header names are required, and we + will extract the corresponding feature values when the + header names align with the feature names. + - For 'jsonl' files, we will extract the corresponding + feature values if the key names match the feature names. + Note: Nested features are not supported, so please ensure + your features are flattened. Ensure the feature values + are scalar or an array of scalars. + - For 'bigquery' dataset, we will extract the corresponding + feature values if the column names match the feature + names. Note: The column type can be a scalar or an array + of scalars. STRUCT or JSON types are not supported. You + may use SQL queries to select or aggregate the relevant + features from your original table. However, ensure that + the 'schema' of the query results meets our requirements. + - For the Vertex AI Endpoint Request Response Logging table + or Vertex AI Batch Prediction Job results. If the + [instance_type][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.instance_type] + is an array, ensure that the sequence in + [feature_fields][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.feature_fields] + matches the order of features in the prediction instance. + We will match the feature with the array in the order + specified in [feature_fields]. + prediction_fields (MutableSequence[google.cloud.aiplatform_v1beta1.types.ModelMonitoringSchema.FieldSchema]): + Prediction output names of the model. The requirements are + the same as the + [feature_fields][google.cloud.aiplatform.v1beta1.ModelMonitoringSchema.feature_fields]. + For AutoML Tables, the prediction output name presented in + schema will be: ``predicted_{target_column}``, the + ``target_column`` is the one you specified when you train + the model. For Prediction output drift analysis: + + - AutoML Classification, the distribution of the argmax + label will be analyzed. + - AutoML Regression, the distribution of the value will be + analyzed. + ground_truth_fields (MutableSequence[google.cloud.aiplatform_v1beta1.types.ModelMonitoringSchema.FieldSchema]): + Target /ground truth names of the model. + """ + + class FieldSchema(proto.Message): + r"""Schema field definition. + + Attributes: + name (str): + Field name. + data_type (str): + Supported data types are: ``float`` ``integer`` ``boolean`` + ``string`` ``categorical`` + repeated (bool): + Describes if the schema field is an array of + given data type. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + data_type: str = proto.Field( + proto.STRING, + number=2, + ) + repeated: bool = proto.Field( + proto.BOOL, + number=3, + ) + + feature_fields: MutableSequence[FieldSchema] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=FieldSchema, + ) + prediction_fields: MutableSequence[FieldSchema] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=FieldSchema, + ) + ground_truth_fields: MutableSequence[FieldSchema] = proto.RepeatedField( + proto.MESSAGE, + number=3, + message=FieldSchema, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/model_monitoring_alert.py b/google/cloud/aiplatform_v1beta1/types/model_monitoring_alert.py new file mode 100644 index 0000000000..5cf969929c --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/model_monitoring_alert.py @@ -0,0 +1,170 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", + manifest={ + "ModelMonitoringAlertCondition", + "ModelMonitoringAnomaly", + "ModelMonitoringAlert", + }, +) + + +class ModelMonitoringAlertCondition(proto.Message): + r"""Monitoring alert triggered condition. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + threshold (float): + A condition that compares a stats value + against a threshold. Alert will be triggered if + value above the threshold. + + This field is a member of `oneof`_ ``condition``. + """ + + threshold: float = proto.Field( + proto.DOUBLE, + number=1, + oneof="condition", + ) + + +class ModelMonitoringAnomaly(proto.Message): + r"""Represents a single model monitoring anomaly. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + tabular_anomaly (google.cloud.aiplatform_v1beta1.types.ModelMonitoringAnomaly.TabularAnomaly): + Tabular anomaly. + + This field is a member of `oneof`_ ``anomaly``. + model_monitoring_job (str): + Model monitoring job resource name. + algorithm (str): + Algorithm used to calculated the metrics, eg: + jensen_shannon_divergence, l_infinity. + """ + + class TabularAnomaly(proto.Message): + r"""Tabular anomaly details. + + Attributes: + anomaly_uri (str): + Additional anomaly information. e.g. Google + Cloud Storage uri. + summary (str): + Overview of this anomaly. + anomaly (google.protobuf.struct_pb2.Value): + Anomaly body. + trigger_time (google.protobuf.timestamp_pb2.Timestamp): + The time the anomaly was triggered. + condition (google.cloud.aiplatform_v1beta1.types.ModelMonitoringAlertCondition): + The alert condition associated with this + anomaly. + """ + + anomaly_uri: str = proto.Field( + proto.STRING, + number=1, + ) + summary: str = proto.Field( + proto.STRING, + number=2, + ) + anomaly: struct_pb2.Value = proto.Field( + proto.MESSAGE, + number=3, + message=struct_pb2.Value, + ) + trigger_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + condition: "ModelMonitoringAlertCondition" = proto.Field( + proto.MESSAGE, + number=5, + message="ModelMonitoringAlertCondition", + ) + + tabular_anomaly: TabularAnomaly = proto.Field( + proto.MESSAGE, + number=1, + oneof="anomaly", + message=TabularAnomaly, + ) + model_monitoring_job: str = proto.Field( + proto.STRING, + number=2, + ) + algorithm: str = proto.Field( + proto.STRING, + number=3, + ) + + +class ModelMonitoringAlert(proto.Message): + r"""Represents a single monitoring alert. This is currently used + in the SearchModelMonitoringAlerts api, thus the alert wrapped + in this message belongs to the resource asked in the request. + + Attributes: + stats_name (str): + The stats name. + objective_type (str): + One of the supported monitoring objectives: + ``raw-feature-drift`` ``prediction-output-drift`` + ``feature-attribution`` + alert_time (google.protobuf.timestamp_pb2.Timestamp): + Alert creation time. + anomaly (google.cloud.aiplatform_v1beta1.types.ModelMonitoringAnomaly): + Anomaly details. + """ + + stats_name: str = proto.Field( + proto.STRING, + number=1, + ) + objective_type: str = proto.Field( + proto.STRING, + number=2, + ) + alert_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + anomaly: "ModelMonitoringAnomaly" = proto.Field( + proto.MESSAGE, + number=4, + message="ModelMonitoringAnomaly", + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/model_monitoring_job.py b/google/cloud/aiplatform_v1beta1/types/model_monitoring_job.py new file mode 100644 index 0000000000..728c397393 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/model_monitoring_job.py @@ -0,0 +1,193 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import job_state +from google.cloud.aiplatform_v1beta1.types import ( + model_monitoring_spec as gca_model_monitoring_spec, +) +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore +from google.type import interval_pb2 # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", + manifest={ + "ModelMonitoringJob", + "ModelMonitoringJobExecutionDetail", + }, +) + + +class ModelMonitoringJob(proto.Message): + r"""Represents a model monitoring job that analyze dataset using + different monitoring algorithm. + + Attributes: + name (str): + Output only. Resource name of a ModelMonitoringJob. Format: + ``projects/{project_id}/locations/{location_id}/modelMonitors/{model_monitor_id}/modelMonitoringJobs/{model_monitoring_job_id}`` + display_name (str): + The display name of the ModelMonitoringJob. + The name can be up to 128 characters long and + can consist of any UTF-8. + model_monitoring_spec (google.cloud.aiplatform_v1beta1.types.ModelMonitoringSpec): + Monitoring monitoring job spec. It outlines + the specifications for monitoring objectives, + notifications, and result exports. If left + blank, the default monitoring specifications + from the top-level resource 'ModelMonitor' will + be applied. If provided, we will use the + specification defined here rather than the + default one. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + ModelMonitoringJob was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + ModelMonitoringJob was updated most recently. + state (google.cloud.aiplatform_v1beta1.types.JobState): + Output only. The state of the monitoring job. + + - When the job is still creating, the state will be + 'JOB_STATE_PENDING'. + - Once the job is successfully created, the state will be + 'JOB_STATE_RUNNING'. + - Once the job is finished, the state will be one of + 'JOB_STATE_FAILED', 'JOB_STATE_SUCCEEDED', + 'JOB_STATE_PARTIALLY_SUCCEEDED'. + schedule (str): + Output only. Schedule resource name. It will + only appear when this job is triggered by a + schedule. + job_execution_detail (google.cloud.aiplatform_v1beta1.types.ModelMonitoringJobExecutionDetail): + Output only. Execution results for all the + monitoring objectives. + schedule_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + ModelMonitoringJob was scheduled. It will only + appear when this job is triggered by a schedule. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + display_name: str = proto.Field( + proto.STRING, + number=2, + ) + model_monitoring_spec: gca_model_monitoring_spec.ModelMonitoringSpec = proto.Field( + proto.MESSAGE, + number=3, + message=gca_model_monitoring_spec.ModelMonitoringSpec, + ) + create_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + update_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + state: job_state.JobState = proto.Field( + proto.ENUM, + number=6, + enum=job_state.JobState, + ) + schedule: str = proto.Field( + proto.STRING, + number=7, + ) + job_execution_detail: "ModelMonitoringJobExecutionDetail" = proto.Field( + proto.MESSAGE, + number=8, + message="ModelMonitoringJobExecutionDetail", + ) + schedule_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=9, + message=timestamp_pb2.Timestamp, + ) + + +class ModelMonitoringJobExecutionDetail(proto.Message): + r"""Represent the execution details of the job. + + Attributes: + baseline_datasets (MutableSequence[google.cloud.aiplatform_v1beta1.types.ModelMonitoringJobExecutionDetail.ProcessedDataset]): + Processed baseline datasets. + target_datasets (MutableSequence[google.cloud.aiplatform_v1beta1.types.ModelMonitoringJobExecutionDetail.ProcessedDataset]): + Processed target datasets. + objective_status (MutableMapping[str, google.rpc.status_pb2.Status]): + Status of data processing for each monitoring + objective. Key is the objective. + error (google.rpc.status_pb2.Status): + Additional job error status. + """ + + class ProcessedDataset(proto.Message): + r"""Processed dataset information. + + Attributes: + location (str): + Actual data location of the processed + dataset. + time_range (google.type.interval_pb2.Interval): + Dataset time range information if any. + """ + + location: str = proto.Field( + proto.STRING, + number=1, + ) + time_range: interval_pb2.Interval = proto.Field( + proto.MESSAGE, + number=2, + message=interval_pb2.Interval, + ) + + baseline_datasets: MutableSequence[ProcessedDataset] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=ProcessedDataset, + ) + target_datasets: MutableSequence[ProcessedDataset] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=ProcessedDataset, + ) + objective_status: MutableMapping[str, status_pb2.Status] = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=3, + message=status_pb2.Status, + ) + error: status_pb2.Status = proto.Field( + proto.MESSAGE, + number=4, + message=status_pb2.Status, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/model_monitoring_service.py b/google/cloud/aiplatform_v1beta1/types/model_monitoring_service.py new file mode 100644 index 0000000000..c4a04b3d1d --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/model_monitoring_service.py @@ -0,0 +1,573 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import model_monitor as gca_model_monitor +from google.cloud.aiplatform_v1beta1.types import model_monitoring_alert +from google.cloud.aiplatform_v1beta1.types import ( + model_monitoring_job as gca_model_monitoring_job, +) +from google.cloud.aiplatform_v1beta1.types import model_monitoring_stats +from google.cloud.aiplatform_v1beta1.types import operation +from google.protobuf import field_mask_pb2 # type: ignore +from google.type import interval_pb2 # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", + manifest={ + "CreateModelMonitorRequest", + "CreateModelMonitorOperationMetadata", + "UpdateModelMonitorRequest", + "UpdateModelMonitorOperationMetadata", + "GetModelMonitorRequest", + "ListModelMonitorsRequest", + "ListModelMonitorsResponse", + "DeleteModelMonitorRequest", + "CreateModelMonitoringJobRequest", + "GetModelMonitoringJobRequest", + "ListModelMonitoringJobsRequest", + "ListModelMonitoringJobsResponse", + "DeleteModelMonitoringJobRequest", + "SearchModelMonitoringStatsRequest", + "SearchModelMonitoringStatsResponse", + "SearchModelMonitoringAlertsRequest", + "SearchModelMonitoringAlertsResponse", + }, +) + + +class CreateModelMonitorRequest(proto.Message): + r"""Request message for + [ModelMonitoringService.CreateModelMonitor][google.cloud.aiplatform.v1beta1.ModelMonitoringService.CreateModelMonitor]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + ModelMonitor in. Format: + ``projects/{project}/locations/{location}`` + model_monitor (google.cloud.aiplatform_v1beta1.types.ModelMonitor): + Required. The ModelMonitor to create. + model_monitor_id (str): + Optional. The ID to use for the Model Monitor, which will + become the final component of the model monitor resource + name. + + The maximum length is 63 characters, and valid characters + are ``/^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$/``. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + model_monitor: gca_model_monitor.ModelMonitor = proto.Field( + proto.MESSAGE, + number=2, + message=gca_model_monitor.ModelMonitor, + ) + model_monitor_id: str = proto.Field( + proto.STRING, + number=3, + ) + + +class CreateModelMonitorOperationMetadata(proto.Message): + r"""Runtime operation information for + [ModelMonitoringService.CreateModelMonitor][google.cloud.aiplatform.v1beta1.ModelMonitoringService.CreateModelMonitor]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The operation generic information. + """ + + generic_metadata: operation.GenericOperationMetadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class UpdateModelMonitorRequest(proto.Message): + r"""Request message for + [ModelMonitoringService.UpdateModelMonitor][google.cloud.aiplatform.v1beta1.ModelMonitoringService.UpdateModelMonitor]. + + Attributes: + model_monitor (google.cloud.aiplatform_v1beta1.types.ModelMonitor): + Required. The model monitoring configuration + which replaces the resource on the server. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. Mask specifying which fields to + update. + """ + + model_monitor: gca_model_monitor.ModelMonitor = proto.Field( + proto.MESSAGE, + number=1, + message=gca_model_monitor.ModelMonitor, + ) + update_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class UpdateModelMonitorOperationMetadata(proto.Message): + r"""Runtime operation information for + [ModelMonitoringService.UpdateModelMonitor][google.cloud.aiplatform.v1beta1.ModelMonitoringService.UpdateModelMonitor]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The operation generic information. + """ + + generic_metadata: operation.GenericOperationMetadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class GetModelMonitorRequest(proto.Message): + r"""Request message for + [ModelMonitoringService.GetModelMonitor][google.cloud.aiplatform.v1beta1.ModelMonitoringService.GetModelMonitor]. + + Attributes: + name (str): + Required. The name of the ModelMonitor resource. Format: + ``projects/{project}/locations/{location}/modelMonitors/{model_monitor}`` + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ListModelMonitorsRequest(proto.Message): + r"""Request message for + [ModelMonitoringService.ListModelMonitors][google.cloud.aiplatform.v1beta1.ModelMonitoringService.ListModelMonitors]. + + Attributes: + parent (str): + Required. The resource name of the Location to list the + ModelMonitors from. Format: + ``projects/{project}/locations/{location}`` + filter (str): + The standard list filter. More detail in + `AIP-160 `__. + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + filter: str = proto.Field( + proto.STRING, + number=2, + ) + page_size: int = proto.Field( + proto.INT32, + number=3, + ) + page_token: str = proto.Field( + proto.STRING, + number=4, + ) + read_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + + +class ListModelMonitorsResponse(proto.Message): + r"""Response message for + [ModelMonitoringService.ListModelMonitors][google.cloud.aiplatform.v1beta1.ModelMonitoringService.ListModelMonitors] + + Attributes: + model_monitors (MutableSequence[google.cloud.aiplatform_v1beta1.types.ModelMonitor]): + List of ModelMonitor in the requested page. + next_page_token (str): + A token to retrieve the next page of results. Pass to + [ListModelMonitorsRequest.page_token][google.cloud.aiplatform.v1beta1.ListModelMonitorsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + model_monitors: MutableSequence[ + gca_model_monitor.ModelMonitor + ] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_model_monitor.ModelMonitor, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteModelMonitorRequest(proto.Message): + r"""Request message for + [ModelMonitoringService.DeleteModelMonitor][google.cloud.aiplatform.v1beta1.ModelMonitoringService.DeleteModelMonitor]. + + Attributes: + name (str): + Required. The name of the ModelMonitor resource to be + deleted. Format: + ``projects/{project}/locations/{location}/modelMonitords/{model_monitor}`` + force (bool): + Optional. Force delete the model monitor with + schedules. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + force: bool = proto.Field( + proto.BOOL, + number=2, + ) + + +class CreateModelMonitoringJobRequest(proto.Message): + r"""Request message for + [ModelMonitoringService.CreateModelMonitoringJob][google.cloud.aiplatform.v1beta1.ModelMonitoringService.CreateModelMonitoringJob]. + + Attributes: + parent (str): + Required. The parent of the ModelMonitoringJob. Format: + ``projects/{project}/locations/{location}/modelMoniitors/{model_monitor}`` + model_monitoring_job (google.cloud.aiplatform_v1beta1.types.ModelMonitoringJob): + Required. The ModelMonitoringJob to create + model_monitoring_job_id (str): + Optional. The ID to use for the Model Monitoring Job, which + will become the final component of the model monitoring job + resource name. + + The maximum length is 63 characters, and valid characters + are ``/^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$/``. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + model_monitoring_job: gca_model_monitoring_job.ModelMonitoringJob = proto.Field( + proto.MESSAGE, + number=2, + message=gca_model_monitoring_job.ModelMonitoringJob, + ) + model_monitoring_job_id: str = proto.Field( + proto.STRING, + number=3, + ) + + +class GetModelMonitoringJobRequest(proto.Message): + r"""Request message for + [ModelMonitoringService.GetModelMonitoringJob][google.cloud.aiplatform.v1beta1.ModelMonitoringService.GetModelMonitoringJob]. + + Attributes: + name (str): + Required. The resource name of the ModelMonitoringJob. + Format: + ``projects/{project}/locations/{location}/modelMonitors/{model_monitor}/modelMonitoringJobs/{model_monitoring_job}`` + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ListModelMonitoringJobsRequest(proto.Message): + r"""Request message for + [ModelMonitoringService.ListModelMonitoringJobs][google.cloud.aiplatform.v1beta1.ModelMonitoringService.ListModelMonitoringJobs]. + + Attributes: + parent (str): + Required. The parent of the ModelMonitoringJob. Format: + ``projects/{project}/locations/{location}/modelMonitors/{model_monitor}`` + filter (str): + The standard list filter. More detail in + `AIP-160 `__. + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + filter: str = proto.Field( + proto.STRING, + number=2, + ) + page_size: int = proto.Field( + proto.INT32, + number=3, + ) + page_token: str = proto.Field( + proto.STRING, + number=4, + ) + read_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + + +class ListModelMonitoringJobsResponse(proto.Message): + r"""Response message for + [ModelMonitoringService.ListModelMonitoringJobs][google.cloud.aiplatform.v1beta1.ModelMonitoringService.ListModelMonitoringJobs]. + + Attributes: + model_monitoring_jobs (MutableSequence[google.cloud.aiplatform_v1beta1.types.ModelMonitoringJob]): + A list of ModelMonitoringJobs that matches + the specified filter in the request. + next_page_token (str): + The standard List next-page token. + """ + + @property + def raw_page(self): + return self + + model_monitoring_jobs: MutableSequence[ + gca_model_monitoring_job.ModelMonitoringJob + ] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_model_monitoring_job.ModelMonitoringJob, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteModelMonitoringJobRequest(proto.Message): + r"""Request message for + [ModelMonitoringService.DeleteModelMonitoringJob][google.cloud.aiplatform.v1beta1.ModelMonitoringService.DeleteModelMonitoringJob]. + + Attributes: + name (str): + Required. The resource name of the model monitoring job to + delete. Format: + ``projects/{project}/locations/{location}/modelMonitors/{model_monitor}/modelMonitoringJobs/{model_monitoring_job}`` + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class SearchModelMonitoringStatsRequest(proto.Message): + r"""Request message for + [ModelMonitoringService.SearchModelMonitoringStats][google.cloud.aiplatform.v1beta1.ModelMonitoringService.SearchModelMonitoringStats]. + + Attributes: + model_monitor (str): + Required. ModelMonitor resource name. Format: + ``projects/{project}/locations/{location}/modelMonitors/{model_monitor}`` + stats_filter (google.cloud.aiplatform_v1beta1.types.SearchModelMonitoringStatsFilter): + Filter for search different stats. + time_interval (google.type.interval_pb2.Interval): + The time interval for which results should be + returned. + page_size (int): + The standard list page size. + page_token (str): + A page token received from a previous + [ModelMonitoringService.SearchModelMonitoringStats][google.cloud.aiplatform.v1beta1.ModelMonitoringService.SearchModelMonitoringStats] + call. + """ + + model_monitor: str = proto.Field( + proto.STRING, + number=1, + ) + stats_filter: model_monitoring_stats.SearchModelMonitoringStatsFilter = proto.Field( + proto.MESSAGE, + number=2, + message=model_monitoring_stats.SearchModelMonitoringStatsFilter, + ) + time_interval: interval_pb2.Interval = proto.Field( + proto.MESSAGE, + number=3, + message=interval_pb2.Interval, + ) + page_size: int = proto.Field( + proto.INT32, + number=4, + ) + page_token: str = proto.Field( + proto.STRING, + number=5, + ) + + +class SearchModelMonitoringStatsResponse(proto.Message): + r"""Response message for + [ModelMonitoringService.SearchModelMonitoringStats][google.cloud.aiplatform.v1beta1.ModelMonitoringService.SearchModelMonitoringStats]. + + Attributes: + monitoring_stats (MutableSequence[google.cloud.aiplatform_v1beta1.types.ModelMonitoringStats]): + Stats retrieved for requested objectives. + next_page_token (str): + The page token that can be used by the next + [ModelMonitoringService.SearchModelMonitoringStats][google.cloud.aiplatform.v1beta1.ModelMonitoringService.SearchModelMonitoringStats] + call. + """ + + @property + def raw_page(self): + return self + + monitoring_stats: MutableSequence[ + model_monitoring_stats.ModelMonitoringStats + ] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=model_monitoring_stats.ModelMonitoringStats, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class SearchModelMonitoringAlertsRequest(proto.Message): + r"""Request message for + [ModelMonitoringService.SearchModelMonitoringAlerts][google.cloud.aiplatform.v1beta1.ModelMonitoringService.SearchModelMonitoringAlerts]. + + Attributes: + model_monitor (str): + Required. ModelMonitor resource name. Format: + ``projects/{project}/locations/{location}/modelMonitors/{model_monitor}`` + model_monitoring_job (str): + If non-empty, returns the alerts of this + model monitoring job. + alert_time_interval (google.type.interval_pb2.Interval): + If non-empty, returns the alerts in this time + interval. + stats_name (str): + If non-empty, returns the alerts of this stats_name. + objective_type (str): + If non-empty, returns the alerts of this objective type. + Supported monitoring objectives: ``raw-feature-drift`` + ``prediction-output-drift`` ``feature-attribution`` + page_size (int): + The standard list page size. + page_token (str): + A page token received from a previous + [ModelMonitoringService.SearchModelMonitoringAlerts][google.cloud.aiplatform.v1beta1.ModelMonitoringService.SearchModelMonitoringAlerts] + call. + """ + + model_monitor: str = proto.Field( + proto.STRING, + number=1, + ) + model_monitoring_job: str = proto.Field( + proto.STRING, + number=2, + ) + alert_time_interval: interval_pb2.Interval = proto.Field( + proto.MESSAGE, + number=3, + message=interval_pb2.Interval, + ) + stats_name: str = proto.Field( + proto.STRING, + number=4, + ) + objective_type: str = proto.Field( + proto.STRING, + number=5, + ) + page_size: int = proto.Field( + proto.INT32, + number=6, + ) + page_token: str = proto.Field( + proto.STRING, + number=7, + ) + + +class SearchModelMonitoringAlertsResponse(proto.Message): + r"""Response message for + [ModelMonitoringService.SearchModelMonitoringAlerts][google.cloud.aiplatform.v1beta1.ModelMonitoringService.SearchModelMonitoringAlerts]. + + Attributes: + model_monitoring_alerts (MutableSequence[google.cloud.aiplatform_v1beta1.types.ModelMonitoringAlert]): + Alerts retrieved for the requested + objectives. Sorted by alert time descendingly. + total_number_alerts (int): + The total number of alerts retrieved by the + requested objectives. + next_page_token (str): + The page token that can be used by the next + [ModelMonitoringService.SearchModelMonitoringAlerts][google.cloud.aiplatform.v1beta1.ModelMonitoringService.SearchModelMonitoringAlerts] + call. + """ + + @property + def raw_page(self): + return self + + model_monitoring_alerts: MutableSequence[ + model_monitoring_alert.ModelMonitoringAlert + ] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=model_monitoring_alert.ModelMonitoringAlert, + ) + total_number_alerts: int = proto.Field( + proto.INT64, + number=2, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/model_monitoring_spec.py b/google/cloud/aiplatform_v1beta1/types/model_monitoring_spec.py new file mode 100644 index 0000000000..ab52c8c627 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/model_monitoring_spec.py @@ -0,0 +1,600 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import explanation +from google.cloud.aiplatform_v1beta1.types import io +from google.cloud.aiplatform_v1beta1.types import machine_resources +from google.cloud.aiplatform_v1beta1.types import model_monitoring_alert +from google.type import interval_pb2 # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", + manifest={ + "ModelMonitoringSpec", + "ModelMonitoringObjectiveSpec", + "ModelMonitoringOutputSpec", + "ModelMonitoringInput", + "ModelMonitoringNotificationSpec", + }, +) + + +class ModelMonitoringSpec(proto.Message): + r"""Monitoring monitoring job spec. It outlines the + specifications for monitoring objectives, notifications, and + result exports. + + Attributes: + objective_spec (google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveSpec): + The monitoring objective spec. + notification_spec (google.cloud.aiplatform_v1beta1.types.ModelMonitoringNotificationSpec): + The model monitoring notification spec. + output_spec (google.cloud.aiplatform_v1beta1.types.ModelMonitoringOutputSpec): + The Output destination spec for metrics, + error logs, etc. + """ + + objective_spec: "ModelMonitoringObjectiveSpec" = proto.Field( + proto.MESSAGE, + number=1, + message="ModelMonitoringObjectiveSpec", + ) + notification_spec: "ModelMonitoringNotificationSpec" = proto.Field( + proto.MESSAGE, + number=2, + message="ModelMonitoringNotificationSpec", + ) + output_spec: "ModelMonitoringOutputSpec" = proto.Field( + proto.MESSAGE, + number=3, + message="ModelMonitoringOutputSpec", + ) + + +class ModelMonitoringObjectiveSpec(proto.Message): + r"""Monitoring objectives spec. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + tabular_objective (google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveSpec.TabularObjective): + Tabular monitoring objective. + + This field is a member of `oneof`_ ``objective``. + explanation_spec (google.cloud.aiplatform_v1beta1.types.ExplanationSpec): + The explanation spec. + This spec is required when the objectives spec + includes feature attribution objectives. + baseline_dataset (google.cloud.aiplatform_v1beta1.types.ModelMonitoringInput): + Baseline dataset. + It could be the training dataset or production + serving dataset from a previous period. + target_dataset (google.cloud.aiplatform_v1beta1.types.ModelMonitoringInput): + Target dataset. + """ + + class DataDriftSpec(proto.Message): + r"""Data drift monitoring spec. + Data drift measures the distribution distance between the + current dataset and a baseline dataset. A typical use case is to + detect data drift between the recent production serving dataset + and the training dataset, or to compare the recent production + dataset with a dataset from a previous period. + + Attributes: + features (MutableSequence[str]): + Feature names / Prediction output names + interested in monitoring. These should be a + subset of the input feature names or prediction + output names specified in the monitoring schema. + If the field is not specified all features / + prediction outputs outlied in the monitoring + schema will be used. + categorical_metric_type (str): + Supported metrics type: + + - l_infinity + - jensen_shannon_divergence + numeric_metric_type (str): + Supported metrics type: + + - jensen_shannon_divergence + default_categorical_alert_condition (google.cloud.aiplatform_v1beta1.types.ModelMonitoringAlertCondition): + Default alert condition for all the + categorical features. + default_numeric_alert_condition (google.cloud.aiplatform_v1beta1.types.ModelMonitoringAlertCondition): + Default alert condition for all the numeric + features. + feature_alert_conditions (MutableMapping[str, google.cloud.aiplatform_v1beta1.types.ModelMonitoringAlertCondition]): + Per feature alert condition will override + default alert condition. + """ + + features: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=1, + ) + categorical_metric_type: str = proto.Field( + proto.STRING, + number=2, + ) + numeric_metric_type: str = proto.Field( + proto.STRING, + number=3, + ) + default_categorical_alert_condition: model_monitoring_alert.ModelMonitoringAlertCondition = proto.Field( + proto.MESSAGE, + number=4, + message=model_monitoring_alert.ModelMonitoringAlertCondition, + ) + default_numeric_alert_condition: model_monitoring_alert.ModelMonitoringAlertCondition = proto.Field( + proto.MESSAGE, + number=5, + message=model_monitoring_alert.ModelMonitoringAlertCondition, + ) + feature_alert_conditions: MutableMapping[ + str, model_monitoring_alert.ModelMonitoringAlertCondition + ] = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=6, + message=model_monitoring_alert.ModelMonitoringAlertCondition, + ) + + class FeatureAttributionSpec(proto.Message): + r"""Feature attribution monitoring spec. + + Attributes: + features (MutableSequence[str]): + Feature names interested in monitoring. + These should be a subset of the input feature + names specified in the monitoring schema. If the + field is not specified all features outlied in + the monitoring schema will be used. + default_alert_condition (google.cloud.aiplatform_v1beta1.types.ModelMonitoringAlertCondition): + Default alert condition for all the features. + feature_alert_conditions (MutableMapping[str, google.cloud.aiplatform_v1beta1.types.ModelMonitoringAlertCondition]): + Per feature alert condition will override + default alert condition. + batch_explanation_dedicated_resources (google.cloud.aiplatform_v1beta1.types.BatchDedicatedResources): + The config of resources used by the Model Monitoring during + the batch explanation for non-AutoML models. If not set, + ``n1-standard-2`` machine type will be used by default. + """ + + features: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=1, + ) + default_alert_condition: model_monitoring_alert.ModelMonitoringAlertCondition = proto.Field( + proto.MESSAGE, + number=2, + message=model_monitoring_alert.ModelMonitoringAlertCondition, + ) + feature_alert_conditions: MutableMapping[ + str, model_monitoring_alert.ModelMonitoringAlertCondition + ] = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=3, + message=model_monitoring_alert.ModelMonitoringAlertCondition, + ) + batch_explanation_dedicated_resources: machine_resources.BatchDedicatedResources = proto.Field( + proto.MESSAGE, + number=4, + message=machine_resources.BatchDedicatedResources, + ) + + class TabularObjective(proto.Message): + r"""Tabular monitoring objective. + + Attributes: + feature_drift_spec (google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveSpec.DataDriftSpec): + Input feature distribution drift monitoring + spec. + prediction_output_drift_spec (google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveSpec.DataDriftSpec): + Prediction output distribution drift + monitoring spec. + feature_attribution_spec (google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveSpec.FeatureAttributionSpec): + Feature attribution monitoring spec. + """ + + feature_drift_spec: "ModelMonitoringObjectiveSpec.DataDriftSpec" = proto.Field( + proto.MESSAGE, + number=10, + message="ModelMonitoringObjectiveSpec.DataDriftSpec", + ) + prediction_output_drift_spec: "ModelMonitoringObjectiveSpec.DataDriftSpec" = ( + proto.Field( + proto.MESSAGE, + number=11, + message="ModelMonitoringObjectiveSpec.DataDriftSpec", + ) + ) + feature_attribution_spec: "ModelMonitoringObjectiveSpec.FeatureAttributionSpec" = proto.Field( + proto.MESSAGE, + number=12, + message="ModelMonitoringObjectiveSpec.FeatureAttributionSpec", + ) + + tabular_objective: TabularObjective = proto.Field( + proto.MESSAGE, + number=1, + oneof="objective", + message=TabularObjective, + ) + explanation_spec: explanation.ExplanationSpec = proto.Field( + proto.MESSAGE, + number=3, + message=explanation.ExplanationSpec, + ) + baseline_dataset: "ModelMonitoringInput" = proto.Field( + proto.MESSAGE, + number=4, + message="ModelMonitoringInput", + ) + target_dataset: "ModelMonitoringInput" = proto.Field( + proto.MESSAGE, + number=5, + message="ModelMonitoringInput", + ) + + +class ModelMonitoringOutputSpec(proto.Message): + r"""Specification for the export destination of monitoring + results, including metrics, logs, etc. + + Attributes: + gcs_base_directory (google.cloud.aiplatform_v1beta1.types.GcsDestination): + Google Cloud Storage base folder path for + metrics, error logs, etc. + """ + + gcs_base_directory: io.GcsDestination = proto.Field( + proto.MESSAGE, + number=1, + message=io.GcsDestination, + ) + + +class ModelMonitoringInput(proto.Message): + r"""Model monitoring data input spec. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + columnized_dataset (google.cloud.aiplatform_v1beta1.types.ModelMonitoringInput.ModelMonitoringDataset): + Columnized dataset. + + This field is a member of `oneof`_ ``dataset``. + batch_prediction_output (google.cloud.aiplatform_v1beta1.types.ModelMonitoringInput.BatchPredictionOutput): + Vertex AI Batch prediction Job. + + This field is a member of `oneof`_ ``dataset``. + vertex_endpoint_logs (google.cloud.aiplatform_v1beta1.types.ModelMonitoringInput.VertexEndpointLogs): + Vertex AI Endpoint request & response + logging. + + This field is a member of `oneof`_ ``dataset``. + time_interval (google.type.interval_pb2.Interval): + The time interval (pair of start_time and end_time) for + which results should be returned. + + This field is a member of `oneof`_ ``time_spec``. + time_offset (google.cloud.aiplatform_v1beta1.types.ModelMonitoringInput.TimeOffset): + The time offset setting for which results + should be returned. + + This field is a member of `oneof`_ ``time_spec``. + """ + + class ModelMonitoringDataset(proto.Message): + r"""Input dataset spec. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + vertex_dataset (str): + Resource name of the Vertex AI managed + dataset. + + This field is a member of `oneof`_ ``data_location``. + gcs_source (google.cloud.aiplatform_v1beta1.types.ModelMonitoringInput.ModelMonitoringDataset.ModelMonitoringGcsSource): + Google Cloud Storage data source. + + This field is a member of `oneof`_ ``data_location``. + bigquery_source (google.cloud.aiplatform_v1beta1.types.ModelMonitoringInput.ModelMonitoringDataset.ModelMonitoringBigQuerySource): + BigQuery data source. + + This field is a member of `oneof`_ ``data_location``. + timestamp_field (str): + The timestamp field. Usually for serving + data. + """ + + class ModelMonitoringGcsSource(proto.Message): + r"""Dataset spec for data stored in Google Cloud Storage. + + Attributes: + gcs_uri (str): + Google Cloud Storage URI to the input + file(s). May contain wildcards. For more + information on wildcards, see + https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames. + format_ (google.cloud.aiplatform_v1beta1.types.ModelMonitoringInput.ModelMonitoringDataset.ModelMonitoringGcsSource.DataFormat): + Data format of the dataset. + """ + + class DataFormat(proto.Enum): + r"""Supported data format. + + Values: + DATA_FORMAT_UNSPECIFIED (0): + Data format unspecified, used when this field + is unset. + CSV (1): + CSV files. + TF_RECORD (2): + TfRecord files + JSONL (3): + JsonL files. + """ + DATA_FORMAT_UNSPECIFIED = 0 + CSV = 1 + TF_RECORD = 2 + JSONL = 3 + + gcs_uri: str = proto.Field( + proto.STRING, + number=1, + ) + format_: "ModelMonitoringInput.ModelMonitoringDataset.ModelMonitoringGcsSource.DataFormat" = proto.Field( + proto.ENUM, + number=2, + enum="ModelMonitoringInput.ModelMonitoringDataset.ModelMonitoringGcsSource.DataFormat", + ) + + class ModelMonitoringBigQuerySource(proto.Message): + r"""Dataset spec for data sotred in BigQuery. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + table_uri (str): + BigQuery URI to a table, up to 2000 characters long. All the + columns in the table will be selected. Accepted forms: + + - BigQuery path. For example: + ``bq://projectId.bqDatasetId.bqTableId``. + + This field is a member of `oneof`_ ``connection``. + query (str): + Standard SQL to be used instead of the ``table_uri``. + + This field is a member of `oneof`_ ``connection``. + """ + + table_uri: str = proto.Field( + proto.STRING, + number=1, + oneof="connection", + ) + query: str = proto.Field( + proto.STRING, + number=2, + oneof="connection", + ) + + vertex_dataset: str = proto.Field( + proto.STRING, + number=1, + oneof="data_location", + ) + gcs_source: "ModelMonitoringInput.ModelMonitoringDataset.ModelMonitoringGcsSource" = proto.Field( + proto.MESSAGE, + number=2, + oneof="data_location", + message="ModelMonitoringInput.ModelMonitoringDataset.ModelMonitoringGcsSource", + ) + bigquery_source: "ModelMonitoringInput.ModelMonitoringDataset.ModelMonitoringBigQuerySource" = proto.Field( + proto.MESSAGE, + number=6, + oneof="data_location", + message="ModelMonitoringInput.ModelMonitoringDataset.ModelMonitoringBigQuerySource", + ) + timestamp_field: str = proto.Field( + proto.STRING, + number=7, + ) + + class BatchPredictionOutput(proto.Message): + r"""Data from Vertex AI Batch prediction job output. + + Attributes: + batch_prediction_job (str): + Vertex AI Batch prediction job resource name. The job must + match the model version specified in + [ModelMonitor].[model_monitoring_target]. + """ + + batch_prediction_job: str = proto.Field( + proto.STRING, + number=1, + ) + + class VertexEndpointLogs(proto.Message): + r"""Data from Vertex AI Endpoint request response logging. + + Attributes: + endpoints (MutableSequence[str]): + List of endpoint resource names. The endpoints must enable + the logging with the + [Endpoint].[request_response_logging_config], and must + contain the deployed model corresponding to the model + version specified in + [ModelMonitor].[model_monitoring_target]. + """ + + endpoints: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=1, + ) + + class TimeOffset(proto.Message): + r"""Time offset setting. + + Attributes: + offset (str): + [offset] is the time difference from the cut-off time. For + scheduled jobs, the cut-off time is the scheduled time. For + non-scheduled jobs, it's the time when the job was created. + Currently we support the following format: 'w|W': Week, + 'd|D': Day, 'h|H': Hour E.g. '1h' stands for 1 hour, '2d' + stands for 2 days. + window (str): + [window] refers to the scope of data selected for analysis. + It allows you to specify the quantity of data you wish to + examine. Currently we support the following format: 'w|W': + Week, 'd|D': Day, 'h|H': Hour E.g. '1h' stands for 1 hour, + '2d' stands for 2 days. + """ + + offset: str = proto.Field( + proto.STRING, + number=1, + ) + window: str = proto.Field( + proto.STRING, + number=2, + ) + + columnized_dataset: ModelMonitoringDataset = proto.Field( + proto.MESSAGE, + number=1, + oneof="dataset", + message=ModelMonitoringDataset, + ) + batch_prediction_output: BatchPredictionOutput = proto.Field( + proto.MESSAGE, + number=2, + oneof="dataset", + message=BatchPredictionOutput, + ) + vertex_endpoint_logs: VertexEndpointLogs = proto.Field( + proto.MESSAGE, + number=3, + oneof="dataset", + message=VertexEndpointLogs, + ) + time_interval: interval_pb2.Interval = proto.Field( + proto.MESSAGE, + number=6, + oneof="time_spec", + message=interval_pb2.Interval, + ) + time_offset: TimeOffset = proto.Field( + proto.MESSAGE, + number=7, + oneof="time_spec", + message=TimeOffset, + ) + + +class ModelMonitoringNotificationSpec(proto.Message): + r"""Notification spec(email, notification channel) for model + monitoring statistics/alerts. + + Attributes: + email_config (google.cloud.aiplatform_v1beta1.types.ModelMonitoringNotificationSpec.EmailConfig): + Email alert config. + enable_cloud_logging (bool): + Dump the anomalies to Cloud Logging. The anomalies will be + put to json payload encoded from proto + [google.cloud.aiplatform.logging.ModelMonitoringAnomaliesLogEntry][]. + This can be further sinked to Pub/Sub or any other services + supported by Cloud Logging. + notification_channel_configs (MutableSequence[google.cloud.aiplatform_v1beta1.types.ModelMonitoringNotificationSpec.NotificationChannelConfig]): + Notification channel config. + """ + + class EmailConfig(proto.Message): + r"""The config for email alerts. + + Attributes: + user_emails (MutableSequence[str]): + The email addresses to send the alerts. + """ + + user_emails: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=1, + ) + + class NotificationChannelConfig(proto.Message): + r"""Google Cloud Notification Channel config. + + Attributes: + notification_channel (str): + Resource names of the NotificationChannels. Must be of the + format + ``projects//notificationChannels/`` + """ + + notification_channel: str = proto.Field( + proto.STRING, + number=1, + ) + + email_config: EmailConfig = proto.Field( + proto.MESSAGE, + number=1, + message=EmailConfig, + ) + enable_cloud_logging: bool = proto.Field( + proto.BOOL, + number=2, + ) + notification_channel_configs: MutableSequence[ + NotificationChannelConfig + ] = proto.RepeatedField( + proto.MESSAGE, + number=3, + message=NotificationChannelConfig, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/model_monitoring_stats.py b/google/cloud/aiplatform_v1beta1/types/model_monitoring_stats.py new file mode 100644 index 0000000000..8ca71fb488 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/model_monitoring_stats.py @@ -0,0 +1,270 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", + manifest={ + "ModelMonitoringStats", + "ModelMonitoringStatsDataPoint", + "ModelMonitoringTabularStats", + "SearchModelMonitoringStatsFilter", + }, +) + + +class ModelMonitoringStats(proto.Message): + r"""Represents the collection of statistics for a metric. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + tabular_stats (google.cloud.aiplatform_v1beta1.types.ModelMonitoringTabularStats): + Generated tabular statistics. + + This field is a member of `oneof`_ ``stats``. + """ + + tabular_stats: "ModelMonitoringTabularStats" = proto.Field( + proto.MESSAGE, + number=1, + oneof="stats", + message="ModelMonitoringTabularStats", + ) + + +class ModelMonitoringStatsDataPoint(proto.Message): + r"""Represents a single statistics data point. + + Attributes: + current_stats (google.cloud.aiplatform_v1beta1.types.ModelMonitoringStatsDataPoint.TypedValue): + Statistics from current dataset. + baseline_stats (google.cloud.aiplatform_v1beta1.types.ModelMonitoringStatsDataPoint.TypedValue): + Statistics from baseline dataset. + threshold_value (float): + Threshold value. + has_anomaly (bool): + Indicate if the statistics has anomaly. + model_monitoring_job (str): + Model monitoring job resource name. + schedule (str): + Schedule resource name. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Statistics create time. + algorithm (str): + Algorithm used to calculated the metrics, eg: + jensen_shannon_divergence, l_infinity. + """ + + class TypedValue(proto.Message): + r"""Typed value of the statistics. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + double_value (float): + Double. + + This field is a member of `oneof`_ ``value``. + distribution_value (google.cloud.aiplatform_v1beta1.types.ModelMonitoringStatsDataPoint.TypedValue.DistributionDataValue): + Distribution. + + This field is a member of `oneof`_ ``value``. + """ + + class DistributionDataValue(proto.Message): + r"""Summary statistics for a population of values. + + Attributes: + distribution (google.protobuf.struct_pb2.Value): + tensorflow.metadata.v0.DatasetFeatureStatistics + format. + distribution_deviation (float): + Distribution distance deviation from the current dataset's + statistics to baseline dataset's statistics. + + - For categorical feature, the distribution distance is + calculated by L-inifinity norm or Jensen–Shannon + divergence. + - For numerical feature, the distribution distance is + calculated by Jensen–Shannon divergence. + """ + + distribution: struct_pb2.Value = proto.Field( + proto.MESSAGE, + number=1, + message=struct_pb2.Value, + ) + distribution_deviation: float = proto.Field( + proto.DOUBLE, + number=2, + ) + + double_value: float = proto.Field( + proto.DOUBLE, + number=1, + oneof="value", + ) + distribution_value: "ModelMonitoringStatsDataPoint.TypedValue.DistributionDataValue" = proto.Field( + proto.MESSAGE, + number=2, + oneof="value", + message="ModelMonitoringStatsDataPoint.TypedValue.DistributionDataValue", + ) + + current_stats: TypedValue = proto.Field( + proto.MESSAGE, + number=1, + message=TypedValue, + ) + baseline_stats: TypedValue = proto.Field( + proto.MESSAGE, + number=2, + message=TypedValue, + ) + threshold_value: float = proto.Field( + proto.DOUBLE, + number=3, + ) + has_anomaly: bool = proto.Field( + proto.BOOL, + number=4, + ) + model_monitoring_job: str = proto.Field( + proto.STRING, + number=5, + ) + schedule: str = proto.Field( + proto.STRING, + number=6, + ) + create_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=7, + message=timestamp_pb2.Timestamp, + ) + algorithm: str = proto.Field( + proto.STRING, + number=8, + ) + + +class ModelMonitoringTabularStats(proto.Message): + r"""A collection of data points that describes the time-varying + values of a tabular metric. + + Attributes: + stats_name (str): + The stats name. + objective_type (str): + One of the supported monitoring objectives: + ``raw-feature-drift`` ``prediction-output-drift`` + ``feature-attribution`` + data_points (MutableSequence[google.cloud.aiplatform_v1beta1.types.ModelMonitoringStatsDataPoint]): + The data points of this time series. When + listing time series, points are returned in + reverse time order. + """ + + stats_name: str = proto.Field( + proto.STRING, + number=1, + ) + objective_type: str = proto.Field( + proto.STRING, + number=2, + ) + data_points: MutableSequence["ModelMonitoringStatsDataPoint"] = proto.RepeatedField( + proto.MESSAGE, + number=3, + message="ModelMonitoringStatsDataPoint", + ) + + +class SearchModelMonitoringStatsFilter(proto.Message): + r"""Filter for searching ModelMonitoringStats. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + tabular_stats_filter (google.cloud.aiplatform_v1beta1.types.SearchModelMonitoringStatsFilter.TabularStatsFilter): + Tabular statistics filter. + + This field is a member of `oneof`_ ``filter``. + """ + + class TabularStatsFilter(proto.Message): + r"""Tabular statistics filter. + + Attributes: + stats_name (str): + If not specified, will return all the stats_names. + objective_type (str): + One of the supported monitoring objectives: + ``raw-feature-drift`` ``prediction-output-drift`` + ``feature-attribution`` + model_monitoring_job (str): + From a particular monitoring job. + model_monitoring_schedule (str): + From a particular monitoring schedule. + algorithm (str): + Specify the algorithm type used for distance calculation, + eg: jensen_shannon_divergence, l_infinity. + """ + + stats_name: str = proto.Field( + proto.STRING, + number=1, + ) + objective_type: str = proto.Field( + proto.STRING, + number=2, + ) + model_monitoring_job: str = proto.Field( + proto.STRING, + number=3, + ) + model_monitoring_schedule: str = proto.Field( + proto.STRING, + number=4, + ) + algorithm: str = proto.Field( + proto.STRING, + number=5, + ) + + tabular_stats_filter: TabularStatsFilter = proto.Field( + proto.MESSAGE, + number=1, + oneof="filter", + message=TabularStatsFilter, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/schedule.py b/google/cloud/aiplatform_v1beta1/types/schedule.py index 79da857902..93a4c6a497 100644 --- a/google/cloud/aiplatform_v1beta1/types/schedule.py +++ b/google/cloud/aiplatform_v1beta1/types/schedule.py @@ -19,6 +19,7 @@ import proto # type: ignore +from google.cloud.aiplatform_v1beta1.types import model_monitoring_service from google.cloud.aiplatform_v1beta1.types import pipeline_service from google.protobuf import timestamp_pb2 # type: ignore @@ -36,6 +37,10 @@ class Schedule(proto.Message): API calls based on user specified time specification and API request type. + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields @@ -56,6 +61,11 @@ class Schedule(proto.Message): CreatePipelineJobRequest.parent field is required (format: projects/{project}/locations/{location}). + This field is a member of `oneof`_ ``request``. + create_model_monitoring_job_request (google.cloud.aiplatform_v1beta1.types.CreateModelMonitoringJobRequest): + Request for + [ModelMonitoringService.CreateModelMonitoringJob][google.cloud.aiplatform.v1beta1.ModelMonitoringService.CreateModelMonitoringJob]. + This field is a member of `oneof`_ ``request``. name (str): Immutable. The resource name of the Schedule. @@ -186,6 +196,12 @@ class RunResponse(proto.Message): message=pipeline_service.CreatePipelineJobRequest, ) ) + create_model_monitoring_job_request: model_monitoring_service.CreateModelMonitoringJobRequest = proto.Field( + proto.MESSAGE, + number=15, + oneof="request", + message=model_monitoring_service.CreateModelMonitoringJobRequest, + ) name: str = proto.Field( proto.STRING, number=1, diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_create_model_monitor_async.py b/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_create_model_monitor_async.py new file mode 100644 index 0000000000..8d8a07ff65 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_create_model_monitor_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateModelMonitor +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelMonitoringService_CreateModelMonitor_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_model_monitor(): + # Create a client + client = aiplatform_v1beta1.ModelMonitoringServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateModelMonitorRequest( + parent="parent_value", + ) + + # Make the request + operation = client.create_model_monitor(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ModelMonitoringService_CreateModelMonitor_async] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_create_model_monitor_sync.py b/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_create_model_monitor_sync.py new file mode 100644 index 0000000000..691d1c3f3b --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_create_model_monitor_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateModelMonitor +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelMonitoringService_CreateModelMonitor_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +def sample_create_model_monitor(): + # Create a client + client = aiplatform_v1beta1.ModelMonitoringServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateModelMonitorRequest( + parent="parent_value", + ) + + # Make the request + operation = client.create_model_monitor(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ModelMonitoringService_CreateModelMonitor_sync] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_create_model_monitoring_job_async.py b/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_create_model_monitoring_job_async.py new file mode 100644 index 0000000000..47dfcb2ca9 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_create_model_monitoring_job_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateModelMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelMonitoringService_CreateModelMonitoringJob_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_model_monitoring_job(): + # Create a client + client = aiplatform_v1beta1.ModelMonitoringServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateModelMonitoringJobRequest( + parent="parent_value", + ) + + # Make the request + response = await client.create_model_monitoring_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ModelMonitoringService_CreateModelMonitoringJob_async] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_create_model_monitoring_job_sync.py b/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_create_model_monitoring_job_sync.py new file mode 100644 index 0000000000..494fc86ee6 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_create_model_monitoring_job_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateModelMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelMonitoringService_CreateModelMonitoringJob_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +def sample_create_model_monitoring_job(): + # Create a client + client = aiplatform_v1beta1.ModelMonitoringServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.CreateModelMonitoringJobRequest( + parent="parent_value", + ) + + # Make the request + response = client.create_model_monitoring_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ModelMonitoringService_CreateModelMonitoringJob_sync] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_delete_model_monitor_async.py b/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_delete_model_monitor_async.py new file mode 100644 index 0000000000..4b5a51d537 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_delete_model_monitor_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteModelMonitor +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelMonitoringService_DeleteModelMonitor_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_model_monitor(): + # Create a client + client = aiplatform_v1beta1.ModelMonitoringServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteModelMonitorRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_model_monitor(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ModelMonitoringService_DeleteModelMonitor_async] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_delete_model_monitor_sync.py b/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_delete_model_monitor_sync.py new file mode 100644 index 0000000000..e4644bed73 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_delete_model_monitor_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteModelMonitor +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelMonitoringService_DeleteModelMonitor_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_model_monitor(): + # Create a client + client = aiplatform_v1beta1.ModelMonitoringServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteModelMonitorRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_model_monitor(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ModelMonitoringService_DeleteModelMonitor_sync] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_delete_model_monitoring_job_async.py b/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_delete_model_monitoring_job_async.py new file mode 100644 index 0000000000..282679b0c5 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_delete_model_monitoring_job_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteModelMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelMonitoringService_DeleteModelMonitoringJob_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_model_monitoring_job(): + # Create a client + client = aiplatform_v1beta1.ModelMonitoringServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteModelMonitoringJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_model_monitoring_job(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ModelMonitoringService_DeleteModelMonitoringJob_async] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_delete_model_monitoring_job_sync.py b/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_delete_model_monitoring_job_sync.py new file mode 100644 index 0000000000..37872be9d9 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_delete_model_monitoring_job_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteModelMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelMonitoringService_DeleteModelMonitoringJob_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_model_monitoring_job(): + # Create a client + client = aiplatform_v1beta1.ModelMonitoringServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteModelMonitoringJobRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_model_monitoring_job(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ModelMonitoringService_DeleteModelMonitoringJob_sync] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_get_model_monitor_async.py b/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_get_model_monitor_async.py new file mode 100644 index 0000000000..61cf2b6e09 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_get_model_monitor_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetModelMonitor +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelMonitoringService_GetModelMonitor_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_model_monitor(): + # Create a client + client = aiplatform_v1beta1.ModelMonitoringServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetModelMonitorRequest( + name="name_value", + ) + + # Make the request + response = await client.get_model_monitor(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ModelMonitoringService_GetModelMonitor_async] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_get_model_monitor_sync.py b/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_get_model_monitor_sync.py new file mode 100644 index 0000000000..4f1a18c5ee --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_get_model_monitor_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetModelMonitor +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelMonitoringService_GetModelMonitor_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +def sample_get_model_monitor(): + # Create a client + client = aiplatform_v1beta1.ModelMonitoringServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetModelMonitorRequest( + name="name_value", + ) + + # Make the request + response = client.get_model_monitor(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ModelMonitoringService_GetModelMonitor_sync] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_get_model_monitoring_job_async.py b/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_get_model_monitoring_job_async.py new file mode 100644 index 0000000000..b2261fdf4a --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_get_model_monitoring_job_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetModelMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelMonitoringService_GetModelMonitoringJob_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_model_monitoring_job(): + # Create a client + client = aiplatform_v1beta1.ModelMonitoringServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetModelMonitoringJobRequest( + name="name_value", + ) + + # Make the request + response = await client.get_model_monitoring_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ModelMonitoringService_GetModelMonitoringJob_async] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_get_model_monitoring_job_sync.py b/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_get_model_monitoring_job_sync.py new file mode 100644 index 0000000000..8304c4fbeb --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_get_model_monitoring_job_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetModelMonitoringJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelMonitoringService_GetModelMonitoringJob_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +def sample_get_model_monitoring_job(): + # Create a client + client = aiplatform_v1beta1.ModelMonitoringServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetModelMonitoringJobRequest( + name="name_value", + ) + + # Make the request + response = client.get_model_monitoring_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ModelMonitoringService_GetModelMonitoringJob_sync] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_list_model_monitoring_jobs_async.py b/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_list_model_monitoring_jobs_async.py new file mode 100644 index 0000000000..27ad68537f --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_list_model_monitoring_jobs_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModelMonitoringJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelMonitoringService_ListModelMonitoringJobs_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_model_monitoring_jobs(): + # Create a client + client = aiplatform_v1beta1.ModelMonitoringServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListModelMonitoringJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_model_monitoring_jobs(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_ModelMonitoringService_ListModelMonitoringJobs_async] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_list_model_monitoring_jobs_sync.py b/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_list_model_monitoring_jobs_sync.py new file mode 100644 index 0000000000..a8fa07b53c --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_list_model_monitoring_jobs_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModelMonitoringJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelMonitoringService_ListModelMonitoringJobs_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +def sample_list_model_monitoring_jobs(): + # Create a client + client = aiplatform_v1beta1.ModelMonitoringServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListModelMonitoringJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_model_monitoring_jobs(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_ModelMonitoringService_ListModelMonitoringJobs_sync] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_list_model_monitors_async.py b/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_list_model_monitors_async.py new file mode 100644 index 0000000000..b5a70bb606 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_list_model_monitors_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModelMonitors +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelMonitoringService_ListModelMonitors_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_model_monitors(): + # Create a client + client = aiplatform_v1beta1.ModelMonitoringServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListModelMonitorsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_model_monitors(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_ModelMonitoringService_ListModelMonitors_async] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_list_model_monitors_sync.py b/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_list_model_monitors_sync.py new file mode 100644 index 0000000000..a5b4a4a313 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_list_model_monitors_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListModelMonitors +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelMonitoringService_ListModelMonitors_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +def sample_list_model_monitors(): + # Create a client + client = aiplatform_v1beta1.ModelMonitoringServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListModelMonitorsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_model_monitors(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_ModelMonitoringService_ListModelMonitors_sync] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_search_model_monitoring_alerts_async.py b/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_search_model_monitoring_alerts_async.py new file mode 100644 index 0000000000..01fe488ce2 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_search_model_monitoring_alerts_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SearchModelMonitoringAlerts +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelMonitoringService_SearchModelMonitoringAlerts_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +async def sample_search_model_monitoring_alerts(): + # Create a client + client = aiplatform_v1beta1.ModelMonitoringServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.SearchModelMonitoringAlertsRequest( + model_monitor="model_monitor_value", + ) + + # Make the request + page_result = client.search_model_monitoring_alerts(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_ModelMonitoringService_SearchModelMonitoringAlerts_async] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_search_model_monitoring_alerts_sync.py b/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_search_model_monitoring_alerts_sync.py new file mode 100644 index 0000000000..73e0e16133 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_search_model_monitoring_alerts_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SearchModelMonitoringAlerts +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelMonitoringService_SearchModelMonitoringAlerts_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +def sample_search_model_monitoring_alerts(): + # Create a client + client = aiplatform_v1beta1.ModelMonitoringServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.SearchModelMonitoringAlertsRequest( + model_monitor="model_monitor_value", + ) + + # Make the request + page_result = client.search_model_monitoring_alerts(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_ModelMonitoringService_SearchModelMonitoringAlerts_sync] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_search_model_monitoring_stats_async.py b/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_search_model_monitoring_stats_async.py new file mode 100644 index 0000000000..072ed21c76 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_search_model_monitoring_stats_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SearchModelMonitoringStats +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelMonitoringService_SearchModelMonitoringStats_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +async def sample_search_model_monitoring_stats(): + # Create a client + client = aiplatform_v1beta1.ModelMonitoringServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.SearchModelMonitoringStatsRequest( + model_monitor="model_monitor_value", + ) + + # Make the request + page_result = client.search_model_monitoring_stats(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_ModelMonitoringService_SearchModelMonitoringStats_async] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_search_model_monitoring_stats_sync.py b/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_search_model_monitoring_stats_sync.py new file mode 100644 index 0000000000..754e6099d9 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_search_model_monitoring_stats_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SearchModelMonitoringStats +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelMonitoringService_SearchModelMonitoringStats_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +def sample_search_model_monitoring_stats(): + # Create a client + client = aiplatform_v1beta1.ModelMonitoringServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.SearchModelMonitoringStatsRequest( + model_monitor="model_monitor_value", + ) + + # Make the request + page_result = client.search_model_monitoring_stats(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_ModelMonitoringService_SearchModelMonitoringStats_sync] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_update_model_monitor_async.py b/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_update_model_monitor_async.py new file mode 100644 index 0000000000..ac17380751 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_update_model_monitor_async.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateModelMonitor +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelMonitoringService_UpdateModelMonitor_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +async def sample_update_model_monitor(): + # Create a client + client = aiplatform_v1beta1.ModelMonitoringServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UpdateModelMonitorRequest( + ) + + # Make the request + operation = client.update_model_monitor(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ModelMonitoringService_UpdateModelMonitor_async] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_update_model_monitor_sync.py b/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_update_model_monitor_sync.py new file mode 100644 index 0000000000..d363c07ac3 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_model_monitoring_service_update_model_monitor_sync.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateModelMonitor +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ModelMonitoringService_UpdateModelMonitor_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +def sample_update_model_monitor(): + # Create a client + client = aiplatform_v1beta1.ModelMonitoringServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UpdateModelMonitorRequest( + ) + + # Make the request + operation = client.update_model_monitor(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ModelMonitoringService_UpdateModelMonitor_sync] diff --git a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json index acf1a729bb..d74a00a616 100644 --- a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json +++ b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json @@ -29235,6 +29235,1801 @@ ], "title": "aiplatform_v1beta1_generated_model_garden_service_list_publisher_models_sync.py" }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelMonitoringServiceAsyncClient", + "shortName": "ModelMonitoringServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelMonitoringServiceAsyncClient.create_model_monitor", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelMonitoringService.CreateModelMonitor", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelMonitoringService", + "shortName": "ModelMonitoringService" + }, + "shortName": "CreateModelMonitor" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateModelMonitorRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "model_monitor", + "type": "google.cloud.aiplatform_v1beta1.types.ModelMonitor" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_model_monitor" + }, + "description": "Sample for CreateModelMonitor", + "file": "aiplatform_v1beta1_generated_model_monitoring_service_create_model_monitor_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelMonitoringService_CreateModelMonitor_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_monitoring_service_create_model_monitor_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelMonitoringServiceClient", + "shortName": "ModelMonitoringServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelMonitoringServiceClient.create_model_monitor", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelMonitoringService.CreateModelMonitor", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelMonitoringService", + "shortName": "ModelMonitoringService" + }, + "shortName": "CreateModelMonitor" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateModelMonitorRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "model_monitor", + "type": "google.cloud.aiplatform_v1beta1.types.ModelMonitor" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_model_monitor" + }, + "description": "Sample for CreateModelMonitor", + "file": "aiplatform_v1beta1_generated_model_monitoring_service_create_model_monitor_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelMonitoringService_CreateModelMonitor_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_monitoring_service_create_model_monitor_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelMonitoringServiceAsyncClient", + "shortName": "ModelMonitoringServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelMonitoringServiceAsyncClient.create_model_monitoring_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelMonitoringService.CreateModelMonitoringJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelMonitoringService", + "shortName": "ModelMonitoringService" + }, + "shortName": "CreateModelMonitoringJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateModelMonitoringJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "model_monitoring_job", + "type": "google.cloud.aiplatform_v1beta1.types.ModelMonitoringJob" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.ModelMonitoringJob", + "shortName": "create_model_monitoring_job" + }, + "description": "Sample for CreateModelMonitoringJob", + "file": "aiplatform_v1beta1_generated_model_monitoring_service_create_model_monitoring_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelMonitoringService_CreateModelMonitoringJob_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_monitoring_service_create_model_monitoring_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelMonitoringServiceClient", + "shortName": "ModelMonitoringServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelMonitoringServiceClient.create_model_monitoring_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelMonitoringService.CreateModelMonitoringJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelMonitoringService", + "shortName": "ModelMonitoringService" + }, + "shortName": "CreateModelMonitoringJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateModelMonitoringJobRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "model_monitoring_job", + "type": "google.cloud.aiplatform_v1beta1.types.ModelMonitoringJob" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.ModelMonitoringJob", + "shortName": "create_model_monitoring_job" + }, + "description": "Sample for CreateModelMonitoringJob", + "file": "aiplatform_v1beta1_generated_model_monitoring_service_create_model_monitoring_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelMonitoringService_CreateModelMonitoringJob_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_monitoring_service_create_model_monitoring_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelMonitoringServiceAsyncClient", + "shortName": "ModelMonitoringServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelMonitoringServiceAsyncClient.delete_model_monitor", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelMonitoringService.DeleteModelMonitor", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelMonitoringService", + "shortName": "ModelMonitoringService" + }, + "shortName": "DeleteModelMonitor" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteModelMonitorRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_model_monitor" + }, + "description": "Sample for DeleteModelMonitor", + "file": "aiplatform_v1beta1_generated_model_monitoring_service_delete_model_monitor_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelMonitoringService_DeleteModelMonitor_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_monitoring_service_delete_model_monitor_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelMonitoringServiceClient", + "shortName": "ModelMonitoringServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelMonitoringServiceClient.delete_model_monitor", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelMonitoringService.DeleteModelMonitor", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelMonitoringService", + "shortName": "ModelMonitoringService" + }, + "shortName": "DeleteModelMonitor" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteModelMonitorRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_model_monitor" + }, + "description": "Sample for DeleteModelMonitor", + "file": "aiplatform_v1beta1_generated_model_monitoring_service_delete_model_monitor_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelMonitoringService_DeleteModelMonitor_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_monitoring_service_delete_model_monitor_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelMonitoringServiceAsyncClient", + "shortName": "ModelMonitoringServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelMonitoringServiceAsyncClient.delete_model_monitoring_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelMonitoringService.DeleteModelMonitoringJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelMonitoringService", + "shortName": "ModelMonitoringService" + }, + "shortName": "DeleteModelMonitoringJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteModelMonitoringJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_model_monitoring_job" + }, + "description": "Sample for DeleteModelMonitoringJob", + "file": "aiplatform_v1beta1_generated_model_monitoring_service_delete_model_monitoring_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelMonitoringService_DeleteModelMonitoringJob_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_monitoring_service_delete_model_monitoring_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelMonitoringServiceClient", + "shortName": "ModelMonitoringServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelMonitoringServiceClient.delete_model_monitoring_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelMonitoringService.DeleteModelMonitoringJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelMonitoringService", + "shortName": "ModelMonitoringService" + }, + "shortName": "DeleteModelMonitoringJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteModelMonitoringJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_model_monitoring_job" + }, + "description": "Sample for DeleteModelMonitoringJob", + "file": "aiplatform_v1beta1_generated_model_monitoring_service_delete_model_monitoring_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelMonitoringService_DeleteModelMonitoringJob_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_monitoring_service_delete_model_monitoring_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelMonitoringServiceAsyncClient", + "shortName": "ModelMonitoringServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelMonitoringServiceAsyncClient.get_model_monitor", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelMonitoringService.GetModelMonitor", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelMonitoringService", + "shortName": "ModelMonitoringService" + }, + "shortName": "GetModelMonitor" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetModelMonitorRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.ModelMonitor", + "shortName": "get_model_monitor" + }, + "description": "Sample for GetModelMonitor", + "file": "aiplatform_v1beta1_generated_model_monitoring_service_get_model_monitor_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelMonitoringService_GetModelMonitor_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_monitoring_service_get_model_monitor_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelMonitoringServiceClient", + "shortName": "ModelMonitoringServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelMonitoringServiceClient.get_model_monitor", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelMonitoringService.GetModelMonitor", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelMonitoringService", + "shortName": "ModelMonitoringService" + }, + "shortName": "GetModelMonitor" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetModelMonitorRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.ModelMonitor", + "shortName": "get_model_monitor" + }, + "description": "Sample for GetModelMonitor", + "file": "aiplatform_v1beta1_generated_model_monitoring_service_get_model_monitor_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelMonitoringService_GetModelMonitor_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_monitoring_service_get_model_monitor_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelMonitoringServiceAsyncClient", + "shortName": "ModelMonitoringServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelMonitoringServiceAsyncClient.get_model_monitoring_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelMonitoringService.GetModelMonitoringJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelMonitoringService", + "shortName": "ModelMonitoringService" + }, + "shortName": "GetModelMonitoringJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetModelMonitoringJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.ModelMonitoringJob", + "shortName": "get_model_monitoring_job" + }, + "description": "Sample for GetModelMonitoringJob", + "file": "aiplatform_v1beta1_generated_model_monitoring_service_get_model_monitoring_job_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelMonitoringService_GetModelMonitoringJob_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_monitoring_service_get_model_monitoring_job_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelMonitoringServiceClient", + "shortName": "ModelMonitoringServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelMonitoringServiceClient.get_model_monitoring_job", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelMonitoringService.GetModelMonitoringJob", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelMonitoringService", + "shortName": "ModelMonitoringService" + }, + "shortName": "GetModelMonitoringJob" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetModelMonitoringJobRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.ModelMonitoringJob", + "shortName": "get_model_monitoring_job" + }, + "description": "Sample for GetModelMonitoringJob", + "file": "aiplatform_v1beta1_generated_model_monitoring_service_get_model_monitoring_job_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelMonitoringService_GetModelMonitoringJob_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_monitoring_service_get_model_monitoring_job_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelMonitoringServiceAsyncClient", + "shortName": "ModelMonitoringServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelMonitoringServiceAsyncClient.list_model_monitoring_jobs", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelMonitoringService.ListModelMonitoringJobs", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelMonitoringService", + "shortName": "ModelMonitoringService" + }, + "shortName": "ListModelMonitoringJobs" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListModelMonitoringJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.model_monitoring_service.pagers.ListModelMonitoringJobsAsyncPager", + "shortName": "list_model_monitoring_jobs" + }, + "description": "Sample for ListModelMonitoringJobs", + "file": "aiplatform_v1beta1_generated_model_monitoring_service_list_model_monitoring_jobs_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelMonitoringService_ListModelMonitoringJobs_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_monitoring_service_list_model_monitoring_jobs_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelMonitoringServiceClient", + "shortName": "ModelMonitoringServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelMonitoringServiceClient.list_model_monitoring_jobs", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelMonitoringService.ListModelMonitoringJobs", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelMonitoringService", + "shortName": "ModelMonitoringService" + }, + "shortName": "ListModelMonitoringJobs" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListModelMonitoringJobsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.model_monitoring_service.pagers.ListModelMonitoringJobsPager", + "shortName": "list_model_monitoring_jobs" + }, + "description": "Sample for ListModelMonitoringJobs", + "file": "aiplatform_v1beta1_generated_model_monitoring_service_list_model_monitoring_jobs_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelMonitoringService_ListModelMonitoringJobs_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_monitoring_service_list_model_monitoring_jobs_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelMonitoringServiceAsyncClient", + "shortName": "ModelMonitoringServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelMonitoringServiceAsyncClient.list_model_monitors", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelMonitoringService.ListModelMonitors", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelMonitoringService", + "shortName": "ModelMonitoringService" + }, + "shortName": "ListModelMonitors" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListModelMonitorsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.model_monitoring_service.pagers.ListModelMonitorsAsyncPager", + "shortName": "list_model_monitors" + }, + "description": "Sample for ListModelMonitors", + "file": "aiplatform_v1beta1_generated_model_monitoring_service_list_model_monitors_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelMonitoringService_ListModelMonitors_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_monitoring_service_list_model_monitors_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelMonitoringServiceClient", + "shortName": "ModelMonitoringServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelMonitoringServiceClient.list_model_monitors", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelMonitoringService.ListModelMonitors", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelMonitoringService", + "shortName": "ModelMonitoringService" + }, + "shortName": "ListModelMonitors" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListModelMonitorsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.model_monitoring_service.pagers.ListModelMonitorsPager", + "shortName": "list_model_monitors" + }, + "description": "Sample for ListModelMonitors", + "file": "aiplatform_v1beta1_generated_model_monitoring_service_list_model_monitors_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelMonitoringService_ListModelMonitors_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_monitoring_service_list_model_monitors_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelMonitoringServiceAsyncClient", + "shortName": "ModelMonitoringServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelMonitoringServiceAsyncClient.search_model_monitoring_alerts", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelMonitoringService.SearchModelMonitoringAlerts", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelMonitoringService", + "shortName": "ModelMonitoringService" + }, + "shortName": "SearchModelMonitoringAlerts" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.SearchModelMonitoringAlertsRequest" + }, + { + "name": "model_monitor", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.model_monitoring_service.pagers.SearchModelMonitoringAlertsAsyncPager", + "shortName": "search_model_monitoring_alerts" + }, + "description": "Sample for SearchModelMonitoringAlerts", + "file": "aiplatform_v1beta1_generated_model_monitoring_service_search_model_monitoring_alerts_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelMonitoringService_SearchModelMonitoringAlerts_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_monitoring_service_search_model_monitoring_alerts_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelMonitoringServiceClient", + "shortName": "ModelMonitoringServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelMonitoringServiceClient.search_model_monitoring_alerts", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelMonitoringService.SearchModelMonitoringAlerts", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelMonitoringService", + "shortName": "ModelMonitoringService" + }, + "shortName": "SearchModelMonitoringAlerts" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.SearchModelMonitoringAlertsRequest" + }, + { + "name": "model_monitor", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.model_monitoring_service.pagers.SearchModelMonitoringAlertsPager", + "shortName": "search_model_monitoring_alerts" + }, + "description": "Sample for SearchModelMonitoringAlerts", + "file": "aiplatform_v1beta1_generated_model_monitoring_service_search_model_monitoring_alerts_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelMonitoringService_SearchModelMonitoringAlerts_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_monitoring_service_search_model_monitoring_alerts_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelMonitoringServiceAsyncClient", + "shortName": "ModelMonitoringServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelMonitoringServiceAsyncClient.search_model_monitoring_stats", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelMonitoringService.SearchModelMonitoringStats", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelMonitoringService", + "shortName": "ModelMonitoringService" + }, + "shortName": "SearchModelMonitoringStats" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.SearchModelMonitoringStatsRequest" + }, + { + "name": "model_monitor", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.model_monitoring_service.pagers.SearchModelMonitoringStatsAsyncPager", + "shortName": "search_model_monitoring_stats" + }, + "description": "Sample for SearchModelMonitoringStats", + "file": "aiplatform_v1beta1_generated_model_monitoring_service_search_model_monitoring_stats_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelMonitoringService_SearchModelMonitoringStats_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_monitoring_service_search_model_monitoring_stats_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelMonitoringServiceClient", + "shortName": "ModelMonitoringServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelMonitoringServiceClient.search_model_monitoring_stats", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelMonitoringService.SearchModelMonitoringStats", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelMonitoringService", + "shortName": "ModelMonitoringService" + }, + "shortName": "SearchModelMonitoringStats" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.SearchModelMonitoringStatsRequest" + }, + { + "name": "model_monitor", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.model_monitoring_service.pagers.SearchModelMonitoringStatsPager", + "shortName": "search_model_monitoring_stats" + }, + "description": "Sample for SearchModelMonitoringStats", + "file": "aiplatform_v1beta1_generated_model_monitoring_service_search_model_monitoring_stats_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelMonitoringService_SearchModelMonitoringStats_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_monitoring_service_search_model_monitoring_stats_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelMonitoringServiceAsyncClient", + "shortName": "ModelMonitoringServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelMonitoringServiceAsyncClient.update_model_monitor", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelMonitoringService.UpdateModelMonitor", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelMonitoringService", + "shortName": "ModelMonitoringService" + }, + "shortName": "UpdateModelMonitor" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateModelMonitorRequest" + }, + { + "name": "model_monitor", + "type": "google.cloud.aiplatform_v1beta1.types.ModelMonitor" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "update_model_monitor" + }, + "description": "Sample for UpdateModelMonitor", + "file": "aiplatform_v1beta1_generated_model_monitoring_service_update_model_monitor_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelMonitoringService_UpdateModelMonitor_async", + "segments": [ + { + "end": 54, + "start": 27, + "type": "FULL" + }, + { + "end": 54, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 51, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 55, + "start": 52, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_monitoring_service_update_model_monitor_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.ModelMonitoringServiceClient", + "shortName": "ModelMonitoringServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.ModelMonitoringServiceClient.update_model_monitor", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelMonitoringService.UpdateModelMonitor", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.ModelMonitoringService", + "shortName": "ModelMonitoringService" + }, + "shortName": "UpdateModelMonitor" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateModelMonitorRequest" + }, + { + "name": "model_monitor", + "type": "google.cloud.aiplatform_v1beta1.types.ModelMonitor" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "update_model_monitor" + }, + "description": "Sample for UpdateModelMonitor", + "file": "aiplatform_v1beta1_generated_model_monitoring_service_update_model_monitor_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_ModelMonitoringService_UpdateModelMonitor_sync", + "segments": [ + { + "end": 54, + "start": 27, + "type": "FULL" + }, + { + "end": 54, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 51, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 55, + "start": 52, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_model_monitoring_service_update_model_monitor_sync.py" + }, { "canonical": true, "clientMethod": { diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py index 2440168e48..824b17f1b1 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py @@ -3257,22 +3257,19 @@ def test_parse_annotated_dataset_path(): def test_dataset_path(): project = "cuttlefish" - location = "mussel" - dataset = "winkle" - expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( + dataset = "mussel" + expected = "projects/{project}/datasets/{dataset}".format( project=project, - location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, location, dataset) + actual = MigrationServiceClient.dataset_path(project, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "nautilus", - "location": "scallop", - "dataset": "abalone", + "project": "winkle", + "dataset": "nautilus", } path = MigrationServiceClient.dataset_path(**expected) @@ -3282,19 +3279,22 @@ def test_parse_dataset_path(): def test_dataset_path(): - project = "squid" - dataset = "clam" - expected = "projects/{project}/datasets/{dataset}".format( + project = "scallop" + location = "abalone" + dataset = "squid" + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, + location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, dataset) + actual = MigrationServiceClient.dataset_path(project, location, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "whelk", + "project": "clam", + "location": "whelk", "dataset": "octopus", } path = MigrationServiceClient.dataset_path(**expected) diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_model_monitoring_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_model_monitoring_service.py new file mode 100644 index 0000000000..0ec0c963a9 --- /dev/null +++ b/tests/unit/gapic/aiplatform_v1beta1/test_model_monitoring_service.py @@ -0,0 +1,12620 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock # pragma: NO COVER +except ImportError: # pragma: NO COVER + import mock + +import grpc +from grpc.experimental import aio +from collections.abc import Iterable +from google.protobuf import json_format +import json +import math +import pytest +from google.api_core import api_core_version +from proto.marshal.rules.dates import DurationRule, TimestampRule +from proto.marshal.rules import wrappers +from requests import Response +from requests import Request, PreparedRequest +from requests.sessions import Session +from google.protobuf import json_format + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1beta1.services.model_monitoring_service import ( + ModelMonitoringServiceAsyncClient, +) +from google.cloud.aiplatform_v1beta1.services.model_monitoring_service import ( + ModelMonitoringServiceClient, +) +from google.cloud.aiplatform_v1beta1.services.model_monitoring_service import pagers +from google.cloud.aiplatform_v1beta1.services.model_monitoring_service import transports +from google.cloud.aiplatform_v1beta1.types import accelerator_type +from google.cloud.aiplatform_v1beta1.types import explanation +from google.cloud.aiplatform_v1beta1.types import explanation_metadata +from google.cloud.aiplatform_v1beta1.types import io +from google.cloud.aiplatform_v1beta1.types import job_state +from google.cloud.aiplatform_v1beta1.types import machine_resources +from google.cloud.aiplatform_v1beta1.types import model_monitor +from google.cloud.aiplatform_v1beta1.types import model_monitor as gca_model_monitor +from google.cloud.aiplatform_v1beta1.types import model_monitoring_alert +from google.cloud.aiplatform_v1beta1.types import model_monitoring_job +from google.cloud.aiplatform_v1beta1.types import ( + model_monitoring_job as gca_model_monitoring_job, +) +from google.cloud.aiplatform_v1beta1.types import model_monitoring_service +from google.cloud.aiplatform_v1beta1.types import model_monitoring_spec +from google.cloud.aiplatform_v1beta1.types import model_monitoring_stats +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.location import locations_pb2 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import options_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.oauth2 import service_account +from google.protobuf import any_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore +from google.type import interval_pb2 # type: ignore +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +# If default endpoint template is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint template so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint_template(client): + return ( + "test.{UNIVERSE_DOMAIN}" + if ("localhost" in client._DEFAULT_ENDPOINT_TEMPLATE) + else client._DEFAULT_ENDPOINT_TEMPLATE + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert ModelMonitoringServiceClient._get_default_mtls_endpoint(None) is None + assert ( + ModelMonitoringServiceClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + ModelMonitoringServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + ModelMonitoringServiceClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + ModelMonitoringServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + ModelMonitoringServiceClient._get_default_mtls_endpoint(non_googleapi) + == non_googleapi + ) + + +def test__read_environment_variables(): + assert ModelMonitoringServiceClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + assert ModelMonitoringServiceClient._read_environment_variables() == ( + True, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + assert ModelMonitoringServiceClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError) as excinfo: + ModelMonitoringServiceClient._read_environment_variables() + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + assert ModelMonitoringServiceClient._read_environment_variables() == ( + False, + "never", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + assert ModelMonitoringServiceClient._read_environment_variables() == ( + False, + "always", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}): + assert ModelMonitoringServiceClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + ModelMonitoringServiceClient._read_environment_variables() + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + with mock.patch.dict(os.environ, {"GOOGLE_CLOUD_UNIVERSE_DOMAIN": "foo.com"}): + assert ModelMonitoringServiceClient._read_environment_variables() == ( + False, + "auto", + "foo.com", + ) + + +def test__get_client_cert_source(): + mock_provided_cert_source = mock.Mock() + mock_default_cert_source = mock.Mock() + + assert ModelMonitoringServiceClient._get_client_cert_source(None, False) is None + assert ( + ModelMonitoringServiceClient._get_client_cert_source( + mock_provided_cert_source, False + ) + is None + ) + assert ( + ModelMonitoringServiceClient._get_client_cert_source( + mock_provided_cert_source, True + ) + == mock_provided_cert_source + ) + + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", return_value=True + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=mock_default_cert_source, + ): + assert ( + ModelMonitoringServiceClient._get_client_cert_source(None, True) + is mock_default_cert_source + ) + assert ( + ModelMonitoringServiceClient._get_client_cert_source( + mock_provided_cert_source, "true" + ) + is mock_provided_cert_source + ) + + +@mock.patch.object( + ModelMonitoringServiceClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(ModelMonitoringServiceClient), +) +@mock.patch.object( + ModelMonitoringServiceAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(ModelMonitoringServiceAsyncClient), +) +def test__get_api_endpoint(): + api_override = "foo.com" + mock_client_cert_source = mock.Mock() + default_universe = ModelMonitoringServiceClient._DEFAULT_UNIVERSE + default_endpoint = ModelMonitoringServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=default_universe + ) + mock_universe = "bar.com" + mock_endpoint = ModelMonitoringServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=mock_universe + ) + + assert ( + ModelMonitoringServiceClient._get_api_endpoint( + api_override, mock_client_cert_source, default_universe, "always" + ) + == api_override + ) + assert ( + ModelMonitoringServiceClient._get_api_endpoint( + None, mock_client_cert_source, default_universe, "auto" + ) + == ModelMonitoringServiceClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + ModelMonitoringServiceClient._get_api_endpoint( + None, None, default_universe, "auto" + ) + == default_endpoint + ) + assert ( + ModelMonitoringServiceClient._get_api_endpoint( + None, None, default_universe, "always" + ) + == ModelMonitoringServiceClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + ModelMonitoringServiceClient._get_api_endpoint( + None, mock_client_cert_source, default_universe, "always" + ) + == ModelMonitoringServiceClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + ModelMonitoringServiceClient._get_api_endpoint( + None, None, mock_universe, "never" + ) + == mock_endpoint + ) + assert ( + ModelMonitoringServiceClient._get_api_endpoint( + None, None, default_universe, "never" + ) + == default_endpoint + ) + + with pytest.raises(MutualTLSChannelError) as excinfo: + ModelMonitoringServiceClient._get_api_endpoint( + None, mock_client_cert_source, mock_universe, "auto" + ) + assert ( + str(excinfo.value) + == "mTLS is not supported in any universe other than googleapis.com." + ) + + +def test__get_universe_domain(): + client_universe_domain = "foo.com" + universe_domain_env = "bar.com" + + assert ( + ModelMonitoringServiceClient._get_universe_domain( + client_universe_domain, universe_domain_env + ) + == client_universe_domain + ) + assert ( + ModelMonitoringServiceClient._get_universe_domain(None, universe_domain_env) + == universe_domain_env + ) + assert ( + ModelMonitoringServiceClient._get_universe_domain(None, None) + == ModelMonitoringServiceClient._DEFAULT_UNIVERSE + ) + + with pytest.raises(ValueError) as excinfo: + ModelMonitoringServiceClient._get_universe_domain("", None) + assert str(excinfo.value) == "Universe Domain cannot be an empty string." + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + ModelMonitoringServiceClient, + transports.ModelMonitoringServiceGrpcTransport, + "grpc", + ), + ( + ModelMonitoringServiceClient, + transports.ModelMonitoringServiceRestTransport, + "rest", + ), + ], +) +def test__validate_universe_domain(client_class, transport_class, transport_name): + client = client_class( + transport=transport_class(credentials=ga_credentials.AnonymousCredentials()) + ) + assert client._validate_universe_domain() == True + + # Test the case when universe is already validated. + assert client._validate_universe_domain() == True + + if transport_name == "grpc": + # Test the case where credentials are provided by the + # `local_channel_credentials`. The default universes in both match. + channel = grpc.secure_channel( + "http://localhost/", grpc.local_channel_credentials() + ) + client = client_class(transport=transport_class(channel=channel)) + assert client._validate_universe_domain() == True + + # Test the case where credentials do not exist: e.g. a transport is provided + # with no credentials. Validation should still succeed because there is no + # mismatch with non-existent credentials. + channel = grpc.secure_channel( + "http://localhost/", grpc.local_channel_credentials() + ) + transport = transport_class(channel=channel) + transport._credentials = None + client = client_class(transport=transport) + assert client._validate_universe_domain() == True + + # TODO: This is needed to cater for older versions of google-auth + # Make this test unconditional once the minimum supported version of + # google-auth becomes 2.23.0 or higher. + google_auth_major, google_auth_minor = [ + int(part) for part in google.auth.__version__.split(".")[0:2] + ] + if google_auth_major > 2 or (google_auth_major == 2 and google_auth_minor >= 23): + credentials = ga_credentials.AnonymousCredentials() + credentials._universe_domain = "foo.com" + # Test the case when there is a universe mismatch from the credentials. + client = client_class(transport=transport_class(credentials=credentials)) + with pytest.raises(ValueError) as excinfo: + client._validate_universe_domain() + assert ( + str(excinfo.value) + == "The configured universe domain (googleapis.com) does not match the universe domain found in the credentials (foo.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." + ) + + # Test the case when there is a universe mismatch from the client. + # + # TODO: Make this test unconditional once the minimum supported version of + # google-api-core becomes 2.15.0 or higher. + api_core_major, api_core_minor = [ + int(part) for part in api_core_version.__version__.split(".")[0:2] + ] + if api_core_major > 2 or (api_core_major == 2 and api_core_minor >= 15): + client = client_class( + client_options={"universe_domain": "bar.com"}, + transport=transport_class( + credentials=ga_credentials.AnonymousCredentials(), + ), + ) + with pytest.raises(ValueError) as excinfo: + client._validate_universe_domain() + assert ( + str(excinfo.value) + == "The configured universe domain (bar.com) does not match the universe domain found in the credentials (googleapis.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." + ) + + # Test that ValueError is raised if universe_domain is provided via client options and credentials is None + with pytest.raises(ValueError): + client._compare_universes("foo.bar", None) + + +@pytest.mark.parametrize( + "client_class,transport_name", + [ + (ModelMonitoringServiceClient, "grpc"), + (ModelMonitoringServiceAsyncClient, "grpc_asyncio"), + (ModelMonitoringServiceClient, "rest"), + ], +) +def test_model_monitoring_service_client_from_service_account_info( + client_class, transport_name +): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + "aiplatform.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://aiplatform.googleapis.com" + ) + + +@pytest.mark.parametrize( + "transport_class,transport_name", + [ + (transports.ModelMonitoringServiceGrpcTransport, "grpc"), + (transports.ModelMonitoringServiceGrpcAsyncIOTransport, "grpc_asyncio"), + (transports.ModelMonitoringServiceRestTransport, "rest"), + ], +) +def test_model_monitoring_service_client_service_account_always_use_jwt( + transport_class, transport_name +): + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize( + "client_class,transport_name", + [ + (ModelMonitoringServiceClient, "grpc"), + (ModelMonitoringServiceAsyncClient, "grpc_asyncio"), + (ModelMonitoringServiceClient, "rest"), + ], +) +def test_model_monitoring_service_client_from_service_account_file( + client_class, transport_name +): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + "aiplatform.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://aiplatform.googleapis.com" + ) + + +def test_model_monitoring_service_client_get_transport_class(): + transport = ModelMonitoringServiceClient.get_transport_class() + available_transports = [ + transports.ModelMonitoringServiceGrpcTransport, + transports.ModelMonitoringServiceRestTransport, + ] + assert transport in available_transports + + transport = ModelMonitoringServiceClient.get_transport_class("grpc") + assert transport == transports.ModelMonitoringServiceGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + ModelMonitoringServiceClient, + transports.ModelMonitoringServiceGrpcTransport, + "grpc", + ), + ( + ModelMonitoringServiceAsyncClient, + transports.ModelMonitoringServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ( + ModelMonitoringServiceClient, + transports.ModelMonitoringServiceRestTransport, + "rest", + ), + ], +) +@mock.patch.object( + ModelMonitoringServiceClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(ModelMonitoringServiceClient), +) +@mock.patch.object( + ModelMonitoringServiceAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(ModelMonitoringServiceAsyncClient), +) +def test_model_monitoring_service_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(ModelMonitoringServiceClient, "get_transport_class") as gtc: + transport = transport_class(credentials=ga_credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(ModelMonitoringServiceClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + client = client_class(transport=transport_name) + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError) as excinfo: + client = client_class(transport=transport_name) + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + # Check the case api_endpoint is provided + options = client_options.ClientOptions( + api_audience="https://language.googleapis.com" + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience="https://language.googleapis.com", + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + ( + ModelMonitoringServiceClient, + transports.ModelMonitoringServiceGrpcTransport, + "grpc", + "true", + ), + ( + ModelMonitoringServiceAsyncClient, + transports.ModelMonitoringServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + ( + ModelMonitoringServiceClient, + transports.ModelMonitoringServiceGrpcTransport, + "grpc", + "false", + ), + ( + ModelMonitoringServiceAsyncClient, + transports.ModelMonitoringServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ( + ModelMonitoringServiceClient, + transports.ModelMonitoringServiceRestTransport, + "rest", + "true", + ), + ( + ModelMonitoringServiceClient, + transports.ModelMonitoringServiceRestTransport, + "rest", + "false", + ), + ], +) +@mock.patch.object( + ModelMonitoringServiceClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(ModelMonitoringServiceClient), +) +@mock.patch.object( + ModelMonitoringServiceAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(ModelMonitoringServiceAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_model_monitoring_service_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ) + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ) + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class", [ModelMonitoringServiceClient, ModelMonitoringServiceAsyncClient] +) +@mock.patch.object( + ModelMonitoringServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(ModelMonitoringServiceClient), +) +@mock.patch.object( + ModelMonitoringServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(ModelMonitoringServiceAsyncClient), +) +def test_model_monitoring_service_client_get_mtls_endpoint_and_cert_source( + client_class, +): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=mock_client_cert_source, + ): + ( + api_endpoint, + cert_source, + ) = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + client_class.get_mtls_endpoint_and_cert_source() + + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError) as excinfo: + client_class.get_mtls_endpoint_and_cert_source() + + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + + +@pytest.mark.parametrize( + "client_class", [ModelMonitoringServiceClient, ModelMonitoringServiceAsyncClient] +) +@mock.patch.object( + ModelMonitoringServiceClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(ModelMonitoringServiceClient), +) +@mock.patch.object( + ModelMonitoringServiceAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(ModelMonitoringServiceAsyncClient), +) +def test_model_monitoring_service_client_client_api_endpoint(client_class): + mock_client_cert_source = client_cert_source_callback + api_override = "foo.com" + default_universe = ModelMonitoringServiceClient._DEFAULT_UNIVERSE + default_endpoint = ModelMonitoringServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=default_universe + ) + mock_universe = "bar.com" + mock_endpoint = ModelMonitoringServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=mock_universe + ) + + # If ClientOptions.api_endpoint is set and GOOGLE_API_USE_CLIENT_CERTIFICATE="true", + # use ClientOptions.api_endpoint as the api endpoint regardless. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" + ): + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=api_override + ) + client = client_class( + client_options=options, + credentials=ga_credentials.AnonymousCredentials(), + ) + assert client.api_endpoint == api_override + + # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="never", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + client = client_class(credentials=ga_credentials.AnonymousCredentials()) + assert client.api_endpoint == default_endpoint + + # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="always", + # use the DEFAULT_MTLS_ENDPOINT as the api endpoint. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + client = client_class(credentials=ga_credentials.AnonymousCredentials()) + assert client.api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + + # If ClientOptions.api_endpoint is not set, GOOGLE_API_USE_MTLS_ENDPOINT="auto" (default), + # GOOGLE_API_USE_CLIENT_CERTIFICATE="false" (default), default cert source doesn't exist, + # and ClientOptions.universe_domain="bar.com", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with universe domain as the api endpoint. + options = client_options.ClientOptions() + universe_exists = hasattr(options, "universe_domain") + if universe_exists: + options = client_options.ClientOptions(universe_domain=mock_universe) + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + else: + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + assert client.api_endpoint == ( + mock_endpoint if universe_exists else default_endpoint + ) + assert client.universe_domain == ( + mock_universe if universe_exists else default_universe + ) + + # If ClientOptions does not have a universe domain attribute and GOOGLE_API_USE_MTLS_ENDPOINT="never", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. + options = client_options.ClientOptions() + if hasattr(options, "universe_domain"): + delattr(options, "universe_domain") + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + assert client.api_endpoint == default_endpoint + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + ModelMonitoringServiceClient, + transports.ModelMonitoringServiceGrpcTransport, + "grpc", + ), + ( + ModelMonitoringServiceAsyncClient, + transports.ModelMonitoringServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ( + ModelMonitoringServiceClient, + transports.ModelMonitoringServiceRestTransport, + "rest", + ), + ], +) +def test_model_monitoring_service_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + ( + ModelMonitoringServiceClient, + transports.ModelMonitoringServiceGrpcTransport, + "grpc", + grpc_helpers, + ), + ( + ModelMonitoringServiceAsyncClient, + transports.ModelMonitoringServiceGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + ( + ModelMonitoringServiceClient, + transports.ModelMonitoringServiceRestTransport, + "rest", + None, + ), + ], +) +def test_model_monitoring_service_client_client_options_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +def test_model_monitoring_service_client_client_options_from_dict(): + with mock.patch( + "google.cloud.aiplatform_v1beta1.services.model_monitoring_service.transports.ModelMonitoringServiceGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = ModelMonitoringServiceClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + ( + ModelMonitoringServiceClient, + transports.ModelMonitoringServiceGrpcTransport, + "grpc", + grpc_helpers, + ), + ( + ModelMonitoringServiceAsyncClient, + transports.ModelMonitoringServiceGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + ], +) +def test_model_monitoring_service_client_create_channel_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=None, + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "request_type", + [ + model_monitoring_service.CreateModelMonitorRequest, + dict, + ], +) +def test_create_model_monitor(request_type, transport: str = "grpc"): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model_monitor), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.create_model_monitor(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = model_monitoring_service.CreateModelMonitorRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_model_monitor_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model_monitor), "__call__" + ) as call: + client.create_model_monitor() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_monitoring_service.CreateModelMonitorRequest() + + +def test_create_model_monitor_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = model_monitoring_service.CreateModelMonitorRequest( + parent="parent_value", + model_monitor_id="model_monitor_id_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model_monitor), "__call__" + ) as call: + client.create_model_monitor(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_monitoring_service.CreateModelMonitorRequest( + parent="parent_value", + model_monitor_id="model_monitor_id_value", + ) + + +@pytest.mark.asyncio +async def test_create_model_monitor_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model_monitor), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.create_model_monitor() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_monitoring_service.CreateModelMonitorRequest() + + +@pytest.mark.asyncio +async def test_create_model_monitor_async( + transport: str = "grpc_asyncio", + request_type=model_monitoring_service.CreateModelMonitorRequest, +): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model_monitor), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.create_model_monitor(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = model_monitoring_service.CreateModelMonitorRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_model_monitor_async_from_dict(): + await test_create_model_monitor_async(request_type=dict) + + +def test_create_model_monitor_field_headers(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_monitoring_service.CreateModelMonitorRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model_monitor), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_model_monitor(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_model_monitor_field_headers_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_monitoring_service.CreateModelMonitorRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model_monitor), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.create_model_monitor(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_create_model_monitor_flattened(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model_monitor), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_model_monitor( + parent="parent_value", + model_monitor=gca_model_monitor.ModelMonitor( + tabular_objective=model_monitoring_spec.ModelMonitoringObjectiveSpec.TabularObjective( + feature_drift_spec=model_monitoring_spec.ModelMonitoringObjectiveSpec.DataDriftSpec( + features=["features_value"] + ) + ) + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].model_monitor + mock_val = gca_model_monitor.ModelMonitor( + tabular_objective=model_monitoring_spec.ModelMonitoringObjectiveSpec.TabularObjective( + feature_drift_spec=model_monitoring_spec.ModelMonitoringObjectiveSpec.DataDriftSpec( + features=["features_value"] + ) + ) + ) + assert arg == mock_val + + +def test_create_model_monitor_flattened_error(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_model_monitor( + model_monitoring_service.CreateModelMonitorRequest(), + parent="parent_value", + model_monitor=gca_model_monitor.ModelMonitor( + tabular_objective=model_monitoring_spec.ModelMonitoringObjectiveSpec.TabularObjective( + feature_drift_spec=model_monitoring_spec.ModelMonitoringObjectiveSpec.DataDriftSpec( + features=["features_value"] + ) + ) + ), + ) + + +@pytest.mark.asyncio +async def test_create_model_monitor_flattened_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model_monitor), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_model_monitor( + parent="parent_value", + model_monitor=gca_model_monitor.ModelMonitor( + tabular_objective=model_monitoring_spec.ModelMonitoringObjectiveSpec.TabularObjective( + feature_drift_spec=model_monitoring_spec.ModelMonitoringObjectiveSpec.DataDriftSpec( + features=["features_value"] + ) + ) + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].model_monitor + mock_val = gca_model_monitor.ModelMonitor( + tabular_objective=model_monitoring_spec.ModelMonitoringObjectiveSpec.TabularObjective( + feature_drift_spec=model_monitoring_spec.ModelMonitoringObjectiveSpec.DataDriftSpec( + features=["features_value"] + ) + ) + ) + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_create_model_monitor_flattened_error_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_model_monitor( + model_monitoring_service.CreateModelMonitorRequest(), + parent="parent_value", + model_monitor=gca_model_monitor.ModelMonitor( + tabular_objective=model_monitoring_spec.ModelMonitoringObjectiveSpec.TabularObjective( + feature_drift_spec=model_monitoring_spec.ModelMonitoringObjectiveSpec.DataDriftSpec( + features=["features_value"] + ) + ) + ), + ) + + +@pytest.mark.parametrize( + "request_type", + [ + model_monitoring_service.UpdateModelMonitorRequest, + dict, + ], +) +def test_update_model_monitor(request_type, transport: str = "grpc"): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model_monitor), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.update_model_monitor(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = model_monitoring_service.UpdateModelMonitorRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_update_model_monitor_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model_monitor), "__call__" + ) as call: + client.update_model_monitor() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_monitoring_service.UpdateModelMonitorRequest() + + +def test_update_model_monitor_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = model_monitoring_service.UpdateModelMonitorRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model_monitor), "__call__" + ) as call: + client.update_model_monitor(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_monitoring_service.UpdateModelMonitorRequest() + + +@pytest.mark.asyncio +async def test_update_model_monitor_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model_monitor), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.update_model_monitor() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_monitoring_service.UpdateModelMonitorRequest() + + +@pytest.mark.asyncio +async def test_update_model_monitor_async( + transport: str = "grpc_asyncio", + request_type=model_monitoring_service.UpdateModelMonitorRequest, +): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model_monitor), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.update_model_monitor(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = model_monitoring_service.UpdateModelMonitorRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_update_model_monitor_async_from_dict(): + await test_update_model_monitor_async(request_type=dict) + + +def test_update_model_monitor_field_headers(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_monitoring_service.UpdateModelMonitorRequest() + + request.model_monitor.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model_monitor), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.update_model_monitor(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "model_monitor.name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_model_monitor_field_headers_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_monitoring_service.UpdateModelMonitorRequest() + + request.model_monitor.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model_monitor), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.update_model_monitor(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "model_monitor.name=name_value", + ) in kw["metadata"] + + +def test_update_model_monitor_flattened(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model_monitor), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_model_monitor( + model_monitor=gca_model_monitor.ModelMonitor( + tabular_objective=model_monitoring_spec.ModelMonitoringObjectiveSpec.TabularObjective( + feature_drift_spec=model_monitoring_spec.ModelMonitoringObjectiveSpec.DataDriftSpec( + features=["features_value"] + ) + ) + ), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].model_monitor + mock_val = gca_model_monitor.ModelMonitor( + tabular_objective=model_monitoring_spec.ModelMonitoringObjectiveSpec.TabularObjective( + feature_drift_spec=model_monitoring_spec.ModelMonitoringObjectiveSpec.DataDriftSpec( + features=["features_value"] + ) + ) + ) + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val + + +def test_update_model_monitor_flattened_error(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_model_monitor( + model_monitoring_service.UpdateModelMonitorRequest(), + model_monitor=gca_model_monitor.ModelMonitor( + tabular_objective=model_monitoring_spec.ModelMonitoringObjectiveSpec.TabularObjective( + feature_drift_spec=model_monitoring_spec.ModelMonitoringObjectiveSpec.DataDriftSpec( + features=["features_value"] + ) + ) + ), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_update_model_monitor_flattened_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model_monitor), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_model_monitor( + model_monitor=gca_model_monitor.ModelMonitor( + tabular_objective=model_monitoring_spec.ModelMonitoringObjectiveSpec.TabularObjective( + feature_drift_spec=model_monitoring_spec.ModelMonitoringObjectiveSpec.DataDriftSpec( + features=["features_value"] + ) + ) + ), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].model_monitor + mock_val = gca_model_monitor.ModelMonitor( + tabular_objective=model_monitoring_spec.ModelMonitoringObjectiveSpec.TabularObjective( + feature_drift_spec=model_monitoring_spec.ModelMonitoringObjectiveSpec.DataDriftSpec( + features=["features_value"] + ) + ) + ) + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_update_model_monitor_flattened_error_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_model_monitor( + model_monitoring_service.UpdateModelMonitorRequest(), + model_monitor=gca_model_monitor.ModelMonitor( + tabular_objective=model_monitoring_spec.ModelMonitoringObjectiveSpec.TabularObjective( + feature_drift_spec=model_monitoring_spec.ModelMonitoringObjectiveSpec.DataDriftSpec( + features=["features_value"] + ) + ) + ), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.parametrize( + "request_type", + [ + model_monitoring_service.GetModelMonitorRequest, + dict, + ], +) +def test_get_model_monitor(request_type, transport: str = "grpc"): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_monitor), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = model_monitor.ModelMonitor( + name="name_value", + display_name="display_name_value", + ) + response = client.get_model_monitor(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = model_monitoring_service.GetModelMonitorRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, model_monitor.ModelMonitor) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + + +def test_get_model_monitor_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_monitor), "__call__" + ) as call: + client.get_model_monitor() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_monitoring_service.GetModelMonitorRequest() + + +def test_get_model_monitor_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = model_monitoring_service.GetModelMonitorRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_monitor), "__call__" + ) as call: + client.get_model_monitor(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_monitoring_service.GetModelMonitorRequest( + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_model_monitor_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_monitor), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + model_monitor.ModelMonitor( + name="name_value", + display_name="display_name_value", + ) + ) + response = await client.get_model_monitor() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_monitoring_service.GetModelMonitorRequest() + + +@pytest.mark.asyncio +async def test_get_model_monitor_async( + transport: str = "grpc_asyncio", + request_type=model_monitoring_service.GetModelMonitorRequest, +): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_monitor), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + model_monitor.ModelMonitor( + name="name_value", + display_name="display_name_value", + ) + ) + response = await client.get_model_monitor(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = model_monitoring_service.GetModelMonitorRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, model_monitor.ModelMonitor) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + + +@pytest.mark.asyncio +async def test_get_model_monitor_async_from_dict(): + await test_get_model_monitor_async(request_type=dict) + + +def test_get_model_monitor_field_headers(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_monitoring_service.GetModelMonitorRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_monitor), "__call__" + ) as call: + call.return_value = model_monitor.ModelMonitor() + client.get_model_monitor(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_model_monitor_field_headers_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_monitoring_service.GetModelMonitorRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_monitor), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + model_monitor.ModelMonitor() + ) + await client.get_model_monitor(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_get_model_monitor_flattened(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_monitor), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = model_monitor.ModelMonitor() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_model_monitor( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_get_model_monitor_flattened_error(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_model_monitor( + model_monitoring_service.GetModelMonitorRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_model_monitor_flattened_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_monitor), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = model_monitor.ModelMonitor() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + model_monitor.ModelMonitor() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_model_monitor( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_get_model_monitor_flattened_error_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_model_monitor( + model_monitoring_service.GetModelMonitorRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + model_monitoring_service.ListModelMonitorsRequest, + dict, + ], +) +def test_list_model_monitors(request_type, transport: str = "grpc"): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_monitors), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = model_monitoring_service.ListModelMonitorsResponse( + next_page_token="next_page_token_value", + ) + response = client.list_model_monitors(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = model_monitoring_service.ListModelMonitorsRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelMonitorsPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_model_monitors_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_monitors), "__call__" + ) as call: + client.list_model_monitors() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_monitoring_service.ListModelMonitorsRequest() + + +def test_list_model_monitors_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = model_monitoring_service.ListModelMonitorsRequest( + parent="parent_value", + filter="filter_value", + page_token="page_token_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_monitors), "__call__" + ) as call: + client.list_model_monitors(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_monitoring_service.ListModelMonitorsRequest( + parent="parent_value", + filter="filter_value", + page_token="page_token_value", + ) + + +@pytest.mark.asyncio +async def test_list_model_monitors_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_monitors), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + model_monitoring_service.ListModelMonitorsResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_model_monitors() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_monitoring_service.ListModelMonitorsRequest() + + +@pytest.mark.asyncio +async def test_list_model_monitors_async( + transport: str = "grpc_asyncio", + request_type=model_monitoring_service.ListModelMonitorsRequest, +): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_monitors), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + model_monitoring_service.ListModelMonitorsResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_model_monitors(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = model_monitoring_service.ListModelMonitorsRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelMonitorsAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_model_monitors_async_from_dict(): + await test_list_model_monitors_async(request_type=dict) + + +def test_list_model_monitors_field_headers(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_monitoring_service.ListModelMonitorsRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_monitors), "__call__" + ) as call: + call.return_value = model_monitoring_service.ListModelMonitorsResponse() + client.list_model_monitors(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_model_monitors_field_headers_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_monitoring_service.ListModelMonitorsRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_monitors), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + model_monitoring_service.ListModelMonitorsResponse() + ) + await client.list_model_monitors(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_list_model_monitors_flattened(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_monitors), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = model_monitoring_service.ListModelMonitorsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_model_monitors( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +def test_list_model_monitors_flattened_error(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_model_monitors( + model_monitoring_service.ListModelMonitorsRequest(), + parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_model_monitors_flattened_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_monitors), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = model_monitoring_service.ListModelMonitorsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + model_monitoring_service.ListModelMonitorsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_model_monitors( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_list_model_monitors_flattened_error_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_model_monitors( + model_monitoring_service.ListModelMonitorsRequest(), + parent="parent_value", + ) + + +def test_list_model_monitors_pager(transport_name: str = "grpc"): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_monitors), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_monitoring_service.ListModelMonitorsResponse( + model_monitors=[ + model_monitor.ModelMonitor(), + model_monitor.ModelMonitor(), + model_monitor.ModelMonitor(), + ], + next_page_token="abc", + ), + model_monitoring_service.ListModelMonitorsResponse( + model_monitors=[], + next_page_token="def", + ), + model_monitoring_service.ListModelMonitorsResponse( + model_monitors=[ + model_monitor.ModelMonitor(), + ], + next_page_token="ghi", + ), + model_monitoring_service.ListModelMonitorsResponse( + model_monitors=[ + model_monitor.ModelMonitor(), + model_monitor.ModelMonitor(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_model_monitors(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, model_monitor.ModelMonitor) for i in results) + + +def test_list_model_monitors_pages(transport_name: str = "grpc"): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_monitors), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_monitoring_service.ListModelMonitorsResponse( + model_monitors=[ + model_monitor.ModelMonitor(), + model_monitor.ModelMonitor(), + model_monitor.ModelMonitor(), + ], + next_page_token="abc", + ), + model_monitoring_service.ListModelMonitorsResponse( + model_monitors=[], + next_page_token="def", + ), + model_monitoring_service.ListModelMonitorsResponse( + model_monitors=[ + model_monitor.ModelMonitor(), + ], + next_page_token="ghi", + ), + model_monitoring_service.ListModelMonitorsResponse( + model_monitors=[ + model_monitor.ModelMonitor(), + model_monitor.ModelMonitor(), + ], + ), + RuntimeError, + ) + pages = list(client.list_model_monitors(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_model_monitors_async_pager(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_monitors), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_monitoring_service.ListModelMonitorsResponse( + model_monitors=[ + model_monitor.ModelMonitor(), + model_monitor.ModelMonitor(), + model_monitor.ModelMonitor(), + ], + next_page_token="abc", + ), + model_monitoring_service.ListModelMonitorsResponse( + model_monitors=[], + next_page_token="def", + ), + model_monitoring_service.ListModelMonitorsResponse( + model_monitors=[ + model_monitor.ModelMonitor(), + ], + next_page_token="ghi", + ), + model_monitoring_service.ListModelMonitorsResponse( + model_monitors=[ + model_monitor.ModelMonitor(), + model_monitor.ModelMonitor(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_model_monitors( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, model_monitor.ModelMonitor) for i in responses) + + +@pytest.mark.asyncio +async def test_list_model_monitors_async_pages(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_monitors), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_monitoring_service.ListModelMonitorsResponse( + model_monitors=[ + model_monitor.ModelMonitor(), + model_monitor.ModelMonitor(), + model_monitor.ModelMonitor(), + ], + next_page_token="abc", + ), + model_monitoring_service.ListModelMonitorsResponse( + model_monitors=[], + next_page_token="def", + ), + model_monitoring_service.ListModelMonitorsResponse( + model_monitors=[ + model_monitor.ModelMonitor(), + ], + next_page_token="ghi", + ), + model_monitoring_service.ListModelMonitorsResponse( + model_monitors=[ + model_monitor.ModelMonitor(), + model_monitor.ModelMonitor(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_model_monitors(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + model_monitoring_service.DeleteModelMonitorRequest, + dict, + ], +) +def test_delete_model_monitor(request_type, transport: str = "grpc"): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_monitor), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.delete_model_monitor(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = model_monitoring_service.DeleteModelMonitorRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_model_monitor_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_monitor), "__call__" + ) as call: + client.delete_model_monitor() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_monitoring_service.DeleteModelMonitorRequest() + + +def test_delete_model_monitor_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = model_monitoring_service.DeleteModelMonitorRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_monitor), "__call__" + ) as call: + client.delete_model_monitor(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_monitoring_service.DeleteModelMonitorRequest( + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_model_monitor_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_monitor), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.delete_model_monitor() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_monitoring_service.DeleteModelMonitorRequest() + + +@pytest.mark.asyncio +async def test_delete_model_monitor_async( + transport: str = "grpc_asyncio", + request_type=model_monitoring_service.DeleteModelMonitorRequest, +): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_monitor), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.delete_model_monitor(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = model_monitoring_service.DeleteModelMonitorRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_model_monitor_async_from_dict(): + await test_delete_model_monitor_async(request_type=dict) + + +def test_delete_model_monitor_field_headers(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_monitoring_service.DeleteModelMonitorRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_monitor), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.delete_model_monitor(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_model_monitor_field_headers_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_monitoring_service.DeleteModelMonitorRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_monitor), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.delete_model_monitor(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_delete_model_monitor_flattened(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_monitor), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_model_monitor( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_delete_model_monitor_flattened_error(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_model_monitor( + model_monitoring_service.DeleteModelMonitorRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_model_monitor_flattened_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_monitor), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_model_monitor( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_delete_model_monitor_flattened_error_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_model_monitor( + model_monitoring_service.DeleteModelMonitorRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + model_monitoring_service.CreateModelMonitoringJobRequest, + dict, + ], +) +def test_create_model_monitoring_job(request_type, transport: str = "grpc"): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model_monitoring_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_model_monitoring_job.ModelMonitoringJob( + name="name_value", + display_name="display_name_value", + state=job_state.JobState.JOB_STATE_QUEUED, + schedule="schedule_value", + ) + response = client.create_model_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = model_monitoring_service.CreateModelMonitoringJobRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_model_monitoring_job.ModelMonitoringJob) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.state == job_state.JobState.JOB_STATE_QUEUED + assert response.schedule == "schedule_value" + + +def test_create_model_monitoring_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model_monitoring_job), "__call__" + ) as call: + client.create_model_monitoring_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_monitoring_service.CreateModelMonitoringJobRequest() + + +def test_create_model_monitoring_job_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = model_monitoring_service.CreateModelMonitoringJobRequest( + parent="parent_value", + model_monitoring_job_id="model_monitoring_job_id_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model_monitoring_job), "__call__" + ) as call: + client.create_model_monitoring_job(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_monitoring_service.CreateModelMonitoringJobRequest( + parent="parent_value", + model_monitoring_job_id="model_monitoring_job_id_value", + ) + + +@pytest.mark.asyncio +async def test_create_model_monitoring_job_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model_monitoring_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_model_monitoring_job.ModelMonitoringJob( + name="name_value", + display_name="display_name_value", + state=job_state.JobState.JOB_STATE_QUEUED, + schedule="schedule_value", + ) + ) + response = await client.create_model_monitoring_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_monitoring_service.CreateModelMonitoringJobRequest() + + +@pytest.mark.asyncio +async def test_create_model_monitoring_job_async( + transport: str = "grpc_asyncio", + request_type=model_monitoring_service.CreateModelMonitoringJobRequest, +): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model_monitoring_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_model_monitoring_job.ModelMonitoringJob( + name="name_value", + display_name="display_name_value", + state=job_state.JobState.JOB_STATE_QUEUED, + schedule="schedule_value", + ) + ) + response = await client.create_model_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = model_monitoring_service.CreateModelMonitoringJobRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_model_monitoring_job.ModelMonitoringJob) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.state == job_state.JobState.JOB_STATE_QUEUED + assert response.schedule == "schedule_value" + + +@pytest.mark.asyncio +async def test_create_model_monitoring_job_async_from_dict(): + await test_create_model_monitoring_job_async(request_type=dict) + + +def test_create_model_monitoring_job_field_headers(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_monitoring_service.CreateModelMonitoringJobRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model_monitoring_job), "__call__" + ) as call: + call.return_value = gca_model_monitoring_job.ModelMonitoringJob() + client.create_model_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_model_monitoring_job_field_headers_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_monitoring_service.CreateModelMonitoringJobRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model_monitoring_job), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_model_monitoring_job.ModelMonitoringJob() + ) + await client.create_model_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_create_model_monitoring_job_flattened(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model_monitoring_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_model_monitoring_job.ModelMonitoringJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_model_monitoring_job( + parent="parent_value", + model_monitoring_job=gca_model_monitoring_job.ModelMonitoringJob( + name="name_value" + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].model_monitoring_job + mock_val = gca_model_monitoring_job.ModelMonitoringJob(name="name_value") + assert arg == mock_val + + +def test_create_model_monitoring_job_flattened_error(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_model_monitoring_job( + model_monitoring_service.CreateModelMonitoringJobRequest(), + parent="parent_value", + model_monitoring_job=gca_model_monitoring_job.ModelMonitoringJob( + name="name_value" + ), + ) + + +@pytest.mark.asyncio +async def test_create_model_monitoring_job_flattened_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model_monitoring_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_model_monitoring_job.ModelMonitoringJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_model_monitoring_job.ModelMonitoringJob() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_model_monitoring_job( + parent="parent_value", + model_monitoring_job=gca_model_monitoring_job.ModelMonitoringJob( + name="name_value" + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].model_monitoring_job + mock_val = gca_model_monitoring_job.ModelMonitoringJob(name="name_value") + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_create_model_monitoring_job_flattened_error_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_model_monitoring_job( + model_monitoring_service.CreateModelMonitoringJobRequest(), + parent="parent_value", + model_monitoring_job=gca_model_monitoring_job.ModelMonitoringJob( + name="name_value" + ), + ) + + +@pytest.mark.parametrize( + "request_type", + [ + model_monitoring_service.GetModelMonitoringJobRequest, + dict, + ], +) +def test_get_model_monitoring_job(request_type, transport: str = "grpc"): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_monitoring_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = model_monitoring_job.ModelMonitoringJob( + name="name_value", + display_name="display_name_value", + state=job_state.JobState.JOB_STATE_QUEUED, + schedule="schedule_value", + ) + response = client.get_model_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = model_monitoring_service.GetModelMonitoringJobRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, model_monitoring_job.ModelMonitoringJob) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.state == job_state.JobState.JOB_STATE_QUEUED + assert response.schedule == "schedule_value" + + +def test_get_model_monitoring_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_monitoring_job), "__call__" + ) as call: + client.get_model_monitoring_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_monitoring_service.GetModelMonitoringJobRequest() + + +def test_get_model_monitoring_job_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = model_monitoring_service.GetModelMonitoringJobRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_monitoring_job), "__call__" + ) as call: + client.get_model_monitoring_job(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_monitoring_service.GetModelMonitoringJobRequest( + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_model_monitoring_job_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_monitoring_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + model_monitoring_job.ModelMonitoringJob( + name="name_value", + display_name="display_name_value", + state=job_state.JobState.JOB_STATE_QUEUED, + schedule="schedule_value", + ) + ) + response = await client.get_model_monitoring_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_monitoring_service.GetModelMonitoringJobRequest() + + +@pytest.mark.asyncio +async def test_get_model_monitoring_job_async( + transport: str = "grpc_asyncio", + request_type=model_monitoring_service.GetModelMonitoringJobRequest, +): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_monitoring_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + model_monitoring_job.ModelMonitoringJob( + name="name_value", + display_name="display_name_value", + state=job_state.JobState.JOB_STATE_QUEUED, + schedule="schedule_value", + ) + ) + response = await client.get_model_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = model_monitoring_service.GetModelMonitoringJobRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, model_monitoring_job.ModelMonitoringJob) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.state == job_state.JobState.JOB_STATE_QUEUED + assert response.schedule == "schedule_value" + + +@pytest.mark.asyncio +async def test_get_model_monitoring_job_async_from_dict(): + await test_get_model_monitoring_job_async(request_type=dict) + + +def test_get_model_monitoring_job_field_headers(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_monitoring_service.GetModelMonitoringJobRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_monitoring_job), "__call__" + ) as call: + call.return_value = model_monitoring_job.ModelMonitoringJob() + client.get_model_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_model_monitoring_job_field_headers_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_monitoring_service.GetModelMonitoringJobRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_monitoring_job), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + model_monitoring_job.ModelMonitoringJob() + ) + await client.get_model_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_get_model_monitoring_job_flattened(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_monitoring_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = model_monitoring_job.ModelMonitoringJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_model_monitoring_job( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_get_model_monitoring_job_flattened_error(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_model_monitoring_job( + model_monitoring_service.GetModelMonitoringJobRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_model_monitoring_job_flattened_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_monitoring_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = model_monitoring_job.ModelMonitoringJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + model_monitoring_job.ModelMonitoringJob() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_model_monitoring_job( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_get_model_monitoring_job_flattened_error_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_model_monitoring_job( + model_monitoring_service.GetModelMonitoringJobRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + model_monitoring_service.ListModelMonitoringJobsRequest, + dict, + ], +) +def test_list_model_monitoring_jobs(request_type, transport: str = "grpc"): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_monitoring_jobs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = model_monitoring_service.ListModelMonitoringJobsResponse( + next_page_token="next_page_token_value", + ) + response = client.list_model_monitoring_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = model_monitoring_service.ListModelMonitoringJobsRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelMonitoringJobsPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_model_monitoring_jobs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_monitoring_jobs), "__call__" + ) as call: + client.list_model_monitoring_jobs() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_monitoring_service.ListModelMonitoringJobsRequest() + + +def test_list_model_monitoring_jobs_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = model_monitoring_service.ListModelMonitoringJobsRequest( + parent="parent_value", + filter="filter_value", + page_token="page_token_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_monitoring_jobs), "__call__" + ) as call: + client.list_model_monitoring_jobs(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_monitoring_service.ListModelMonitoringJobsRequest( + parent="parent_value", + filter="filter_value", + page_token="page_token_value", + ) + + +@pytest.mark.asyncio +async def test_list_model_monitoring_jobs_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_monitoring_jobs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + model_monitoring_service.ListModelMonitoringJobsResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_model_monitoring_jobs() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_monitoring_service.ListModelMonitoringJobsRequest() + + +@pytest.mark.asyncio +async def test_list_model_monitoring_jobs_async( + transport: str = "grpc_asyncio", + request_type=model_monitoring_service.ListModelMonitoringJobsRequest, +): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_monitoring_jobs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + model_monitoring_service.ListModelMonitoringJobsResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_model_monitoring_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = model_monitoring_service.ListModelMonitoringJobsRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelMonitoringJobsAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_model_monitoring_jobs_async_from_dict(): + await test_list_model_monitoring_jobs_async(request_type=dict) + + +def test_list_model_monitoring_jobs_field_headers(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_monitoring_service.ListModelMonitoringJobsRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_monitoring_jobs), "__call__" + ) as call: + call.return_value = model_monitoring_service.ListModelMonitoringJobsResponse() + client.list_model_monitoring_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_model_monitoring_jobs_field_headers_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_monitoring_service.ListModelMonitoringJobsRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_monitoring_jobs), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + model_monitoring_service.ListModelMonitoringJobsResponse() + ) + await client.list_model_monitoring_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_list_model_monitoring_jobs_flattened(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_monitoring_jobs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = model_monitoring_service.ListModelMonitoringJobsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_model_monitoring_jobs( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +def test_list_model_monitoring_jobs_flattened_error(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_model_monitoring_jobs( + model_monitoring_service.ListModelMonitoringJobsRequest(), + parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_model_monitoring_jobs_flattened_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_monitoring_jobs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = model_monitoring_service.ListModelMonitoringJobsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + model_monitoring_service.ListModelMonitoringJobsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_model_monitoring_jobs( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_list_model_monitoring_jobs_flattened_error_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_model_monitoring_jobs( + model_monitoring_service.ListModelMonitoringJobsRequest(), + parent="parent_value", + ) + + +def test_list_model_monitoring_jobs_pager(transport_name: str = "grpc"): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_monitoring_jobs), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_monitoring_service.ListModelMonitoringJobsResponse( + model_monitoring_jobs=[ + model_monitoring_job.ModelMonitoringJob(), + model_monitoring_job.ModelMonitoringJob(), + model_monitoring_job.ModelMonitoringJob(), + ], + next_page_token="abc", + ), + model_monitoring_service.ListModelMonitoringJobsResponse( + model_monitoring_jobs=[], + next_page_token="def", + ), + model_monitoring_service.ListModelMonitoringJobsResponse( + model_monitoring_jobs=[ + model_monitoring_job.ModelMonitoringJob(), + ], + next_page_token="ghi", + ), + model_monitoring_service.ListModelMonitoringJobsResponse( + model_monitoring_jobs=[ + model_monitoring_job.ModelMonitoringJob(), + model_monitoring_job.ModelMonitoringJob(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_model_monitoring_jobs(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, model_monitoring_job.ModelMonitoringJob) for i in results + ) + + +def test_list_model_monitoring_jobs_pages(transport_name: str = "grpc"): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_monitoring_jobs), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_monitoring_service.ListModelMonitoringJobsResponse( + model_monitoring_jobs=[ + model_monitoring_job.ModelMonitoringJob(), + model_monitoring_job.ModelMonitoringJob(), + model_monitoring_job.ModelMonitoringJob(), + ], + next_page_token="abc", + ), + model_monitoring_service.ListModelMonitoringJobsResponse( + model_monitoring_jobs=[], + next_page_token="def", + ), + model_monitoring_service.ListModelMonitoringJobsResponse( + model_monitoring_jobs=[ + model_monitoring_job.ModelMonitoringJob(), + ], + next_page_token="ghi", + ), + model_monitoring_service.ListModelMonitoringJobsResponse( + model_monitoring_jobs=[ + model_monitoring_job.ModelMonitoringJob(), + model_monitoring_job.ModelMonitoringJob(), + ], + ), + RuntimeError, + ) + pages = list(client.list_model_monitoring_jobs(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_model_monitoring_jobs_async_pager(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_monitoring_jobs), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_monitoring_service.ListModelMonitoringJobsResponse( + model_monitoring_jobs=[ + model_monitoring_job.ModelMonitoringJob(), + model_monitoring_job.ModelMonitoringJob(), + model_monitoring_job.ModelMonitoringJob(), + ], + next_page_token="abc", + ), + model_monitoring_service.ListModelMonitoringJobsResponse( + model_monitoring_jobs=[], + next_page_token="def", + ), + model_monitoring_service.ListModelMonitoringJobsResponse( + model_monitoring_jobs=[ + model_monitoring_job.ModelMonitoringJob(), + ], + next_page_token="ghi", + ), + model_monitoring_service.ListModelMonitoringJobsResponse( + model_monitoring_jobs=[ + model_monitoring_job.ModelMonitoringJob(), + model_monitoring_job.ModelMonitoringJob(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_model_monitoring_jobs( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all( + isinstance(i, model_monitoring_job.ModelMonitoringJob) for i in responses + ) + + +@pytest.mark.asyncio +async def test_list_model_monitoring_jobs_async_pages(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_monitoring_jobs), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_monitoring_service.ListModelMonitoringJobsResponse( + model_monitoring_jobs=[ + model_monitoring_job.ModelMonitoringJob(), + model_monitoring_job.ModelMonitoringJob(), + model_monitoring_job.ModelMonitoringJob(), + ], + next_page_token="abc", + ), + model_monitoring_service.ListModelMonitoringJobsResponse( + model_monitoring_jobs=[], + next_page_token="def", + ), + model_monitoring_service.ListModelMonitoringJobsResponse( + model_monitoring_jobs=[ + model_monitoring_job.ModelMonitoringJob(), + ], + next_page_token="ghi", + ), + model_monitoring_service.ListModelMonitoringJobsResponse( + model_monitoring_jobs=[ + model_monitoring_job.ModelMonitoringJob(), + model_monitoring_job.ModelMonitoringJob(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_model_monitoring_jobs(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + model_monitoring_service.DeleteModelMonitoringJobRequest, + dict, + ], +) +def test_delete_model_monitoring_job(request_type, transport: str = "grpc"): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_monitoring_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.delete_model_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = model_monitoring_service.DeleteModelMonitoringJobRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_model_monitoring_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_monitoring_job), "__call__" + ) as call: + client.delete_model_monitoring_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_monitoring_service.DeleteModelMonitoringJobRequest() + + +def test_delete_model_monitoring_job_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = model_monitoring_service.DeleteModelMonitoringJobRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_monitoring_job), "__call__" + ) as call: + client.delete_model_monitoring_job(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_monitoring_service.DeleteModelMonitoringJobRequest( + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_model_monitoring_job_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_monitoring_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.delete_model_monitoring_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_monitoring_service.DeleteModelMonitoringJobRequest() + + +@pytest.mark.asyncio +async def test_delete_model_monitoring_job_async( + transport: str = "grpc_asyncio", + request_type=model_monitoring_service.DeleteModelMonitoringJobRequest, +): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_monitoring_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.delete_model_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = model_monitoring_service.DeleteModelMonitoringJobRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_model_monitoring_job_async_from_dict(): + await test_delete_model_monitoring_job_async(request_type=dict) + + +def test_delete_model_monitoring_job_field_headers(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_monitoring_service.DeleteModelMonitoringJobRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_monitoring_job), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.delete_model_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_model_monitoring_job_field_headers_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_monitoring_service.DeleteModelMonitoringJobRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_monitoring_job), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.delete_model_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_delete_model_monitoring_job_flattened(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_monitoring_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_model_monitoring_job( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_delete_model_monitoring_job_flattened_error(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_model_monitoring_job( + model_monitoring_service.DeleteModelMonitoringJobRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_model_monitoring_job_flattened_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_monitoring_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_model_monitoring_job( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_delete_model_monitoring_job_flattened_error_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_model_monitoring_job( + model_monitoring_service.DeleteModelMonitoringJobRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + model_monitoring_service.SearchModelMonitoringStatsRequest, + dict, + ], +) +def test_search_model_monitoring_stats(request_type, transport: str = "grpc"): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_monitoring_stats), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = model_monitoring_service.SearchModelMonitoringStatsResponse( + next_page_token="next_page_token_value", + ) + response = client.search_model_monitoring_stats(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = model_monitoring_service.SearchModelMonitoringStatsRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.SearchModelMonitoringStatsPager) + assert response.next_page_token == "next_page_token_value" + + +def test_search_model_monitoring_stats_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_monitoring_stats), "__call__" + ) as call: + client.search_model_monitoring_stats() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_monitoring_service.SearchModelMonitoringStatsRequest() + + +def test_search_model_monitoring_stats_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = model_monitoring_service.SearchModelMonitoringStatsRequest( + model_monitor="model_monitor_value", + page_token="page_token_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_monitoring_stats), "__call__" + ) as call: + client.search_model_monitoring_stats(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_monitoring_service.SearchModelMonitoringStatsRequest( + model_monitor="model_monitor_value", + page_token="page_token_value", + ) + + +@pytest.mark.asyncio +async def test_search_model_monitoring_stats_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_monitoring_stats), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + model_monitoring_service.SearchModelMonitoringStatsResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.search_model_monitoring_stats() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_monitoring_service.SearchModelMonitoringStatsRequest() + + +@pytest.mark.asyncio +async def test_search_model_monitoring_stats_async( + transport: str = "grpc_asyncio", + request_type=model_monitoring_service.SearchModelMonitoringStatsRequest, +): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_monitoring_stats), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + model_monitoring_service.SearchModelMonitoringStatsResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.search_model_monitoring_stats(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = model_monitoring_service.SearchModelMonitoringStatsRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.SearchModelMonitoringStatsAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_search_model_monitoring_stats_async_from_dict(): + await test_search_model_monitoring_stats_async(request_type=dict) + + +def test_search_model_monitoring_stats_field_headers(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_monitoring_service.SearchModelMonitoringStatsRequest() + + request.model_monitor = "model_monitor_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_monitoring_stats), "__call__" + ) as call: + call.return_value = ( + model_monitoring_service.SearchModelMonitoringStatsResponse() + ) + client.search_model_monitoring_stats(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "model_monitor=model_monitor_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_search_model_monitoring_stats_field_headers_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_monitoring_service.SearchModelMonitoringStatsRequest() + + request.model_monitor = "model_monitor_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_monitoring_stats), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + model_monitoring_service.SearchModelMonitoringStatsResponse() + ) + await client.search_model_monitoring_stats(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "model_monitor=model_monitor_value", + ) in kw["metadata"] + + +def test_search_model_monitoring_stats_flattened(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_monitoring_stats), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = ( + model_monitoring_service.SearchModelMonitoringStatsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.search_model_monitoring_stats( + model_monitor="model_monitor_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].model_monitor + mock_val = "model_monitor_value" + assert arg == mock_val + + +def test_search_model_monitoring_stats_flattened_error(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.search_model_monitoring_stats( + model_monitoring_service.SearchModelMonitoringStatsRequest(), + model_monitor="model_monitor_value", + ) + + +@pytest.mark.asyncio +async def test_search_model_monitoring_stats_flattened_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_monitoring_stats), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = ( + model_monitoring_service.SearchModelMonitoringStatsResponse() + ) + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + model_monitoring_service.SearchModelMonitoringStatsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.search_model_monitoring_stats( + model_monitor="model_monitor_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].model_monitor + mock_val = "model_monitor_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_search_model_monitoring_stats_flattened_error_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.search_model_monitoring_stats( + model_monitoring_service.SearchModelMonitoringStatsRequest(), + model_monitor="model_monitor_value", + ) + + +def test_search_model_monitoring_stats_pager(transport_name: str = "grpc"): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_monitoring_stats), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_monitoring_service.SearchModelMonitoringStatsResponse( + monitoring_stats=[ + model_monitoring_stats.ModelMonitoringStats(), + model_monitoring_stats.ModelMonitoringStats(), + model_monitoring_stats.ModelMonitoringStats(), + ], + next_page_token="abc", + ), + model_monitoring_service.SearchModelMonitoringStatsResponse( + monitoring_stats=[], + next_page_token="def", + ), + model_monitoring_service.SearchModelMonitoringStatsResponse( + monitoring_stats=[ + model_monitoring_stats.ModelMonitoringStats(), + ], + next_page_token="ghi", + ), + model_monitoring_service.SearchModelMonitoringStatsResponse( + monitoring_stats=[ + model_monitoring_stats.ModelMonitoringStats(), + model_monitoring_stats.ModelMonitoringStats(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("model_monitor", ""),)), + ) + pager = client.search_model_monitoring_stats(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, model_monitoring_stats.ModelMonitoringStats) for i in results + ) + + +def test_search_model_monitoring_stats_pages(transport_name: str = "grpc"): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_monitoring_stats), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_monitoring_service.SearchModelMonitoringStatsResponse( + monitoring_stats=[ + model_monitoring_stats.ModelMonitoringStats(), + model_monitoring_stats.ModelMonitoringStats(), + model_monitoring_stats.ModelMonitoringStats(), + ], + next_page_token="abc", + ), + model_monitoring_service.SearchModelMonitoringStatsResponse( + monitoring_stats=[], + next_page_token="def", + ), + model_monitoring_service.SearchModelMonitoringStatsResponse( + monitoring_stats=[ + model_monitoring_stats.ModelMonitoringStats(), + ], + next_page_token="ghi", + ), + model_monitoring_service.SearchModelMonitoringStatsResponse( + monitoring_stats=[ + model_monitoring_stats.ModelMonitoringStats(), + model_monitoring_stats.ModelMonitoringStats(), + ], + ), + RuntimeError, + ) + pages = list(client.search_model_monitoring_stats(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_search_model_monitoring_stats_async_pager(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_monitoring_stats), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_monitoring_service.SearchModelMonitoringStatsResponse( + monitoring_stats=[ + model_monitoring_stats.ModelMonitoringStats(), + model_monitoring_stats.ModelMonitoringStats(), + model_monitoring_stats.ModelMonitoringStats(), + ], + next_page_token="abc", + ), + model_monitoring_service.SearchModelMonitoringStatsResponse( + monitoring_stats=[], + next_page_token="def", + ), + model_monitoring_service.SearchModelMonitoringStatsResponse( + monitoring_stats=[ + model_monitoring_stats.ModelMonitoringStats(), + ], + next_page_token="ghi", + ), + model_monitoring_service.SearchModelMonitoringStatsResponse( + monitoring_stats=[ + model_monitoring_stats.ModelMonitoringStats(), + model_monitoring_stats.ModelMonitoringStats(), + ], + ), + RuntimeError, + ) + async_pager = await client.search_model_monitoring_stats( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all( + isinstance(i, model_monitoring_stats.ModelMonitoringStats) + for i in responses + ) + + +@pytest.mark.asyncio +async def test_search_model_monitoring_stats_async_pages(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_monitoring_stats), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_monitoring_service.SearchModelMonitoringStatsResponse( + monitoring_stats=[ + model_monitoring_stats.ModelMonitoringStats(), + model_monitoring_stats.ModelMonitoringStats(), + model_monitoring_stats.ModelMonitoringStats(), + ], + next_page_token="abc", + ), + model_monitoring_service.SearchModelMonitoringStatsResponse( + monitoring_stats=[], + next_page_token="def", + ), + model_monitoring_service.SearchModelMonitoringStatsResponse( + monitoring_stats=[ + model_monitoring_stats.ModelMonitoringStats(), + ], + next_page_token="ghi", + ), + model_monitoring_service.SearchModelMonitoringStatsResponse( + monitoring_stats=[ + model_monitoring_stats.ModelMonitoringStats(), + model_monitoring_stats.ModelMonitoringStats(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.search_model_monitoring_stats(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + model_monitoring_service.SearchModelMonitoringAlertsRequest, + dict, + ], +) +def test_search_model_monitoring_alerts(request_type, transport: str = "grpc"): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_monitoring_alerts), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = ( + model_monitoring_service.SearchModelMonitoringAlertsResponse( + total_number_alerts=2038, + next_page_token="next_page_token_value", + ) + ) + response = client.search_model_monitoring_alerts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = model_monitoring_service.SearchModelMonitoringAlertsRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.SearchModelMonitoringAlertsPager) + assert response.total_number_alerts == 2038 + assert response.next_page_token == "next_page_token_value" + + +def test_search_model_monitoring_alerts_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_monitoring_alerts), "__call__" + ) as call: + client.search_model_monitoring_alerts() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_monitoring_service.SearchModelMonitoringAlertsRequest() + + +def test_search_model_monitoring_alerts_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = model_monitoring_service.SearchModelMonitoringAlertsRequest( + model_monitor="model_monitor_value", + model_monitoring_job="model_monitoring_job_value", + stats_name="stats_name_value", + objective_type="objective_type_value", + page_token="page_token_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_monitoring_alerts), "__call__" + ) as call: + client.search_model_monitoring_alerts(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_monitoring_service.SearchModelMonitoringAlertsRequest( + model_monitor="model_monitor_value", + model_monitoring_job="model_monitoring_job_value", + stats_name="stats_name_value", + objective_type="objective_type_value", + page_token="page_token_value", + ) + + +@pytest.mark.asyncio +async def test_search_model_monitoring_alerts_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_monitoring_alerts), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + model_monitoring_service.SearchModelMonitoringAlertsResponse( + total_number_alerts=2038, + next_page_token="next_page_token_value", + ) + ) + response = await client.search_model_monitoring_alerts() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_monitoring_service.SearchModelMonitoringAlertsRequest() + + +@pytest.mark.asyncio +async def test_search_model_monitoring_alerts_async( + transport: str = "grpc_asyncio", + request_type=model_monitoring_service.SearchModelMonitoringAlertsRequest, +): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_monitoring_alerts), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + model_monitoring_service.SearchModelMonitoringAlertsResponse( + total_number_alerts=2038, + next_page_token="next_page_token_value", + ) + ) + response = await client.search_model_monitoring_alerts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = model_monitoring_service.SearchModelMonitoringAlertsRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.SearchModelMonitoringAlertsAsyncPager) + assert response.total_number_alerts == 2038 + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_search_model_monitoring_alerts_async_from_dict(): + await test_search_model_monitoring_alerts_async(request_type=dict) + + +def test_search_model_monitoring_alerts_field_headers(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_monitoring_service.SearchModelMonitoringAlertsRequest() + + request.model_monitor = "model_monitor_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_monitoring_alerts), "__call__" + ) as call: + call.return_value = ( + model_monitoring_service.SearchModelMonitoringAlertsResponse() + ) + client.search_model_monitoring_alerts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "model_monitor=model_monitor_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_search_model_monitoring_alerts_field_headers_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_monitoring_service.SearchModelMonitoringAlertsRequest() + + request.model_monitor = "model_monitor_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_monitoring_alerts), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + model_monitoring_service.SearchModelMonitoringAlertsResponse() + ) + await client.search_model_monitoring_alerts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "model_monitor=model_monitor_value", + ) in kw["metadata"] + + +def test_search_model_monitoring_alerts_flattened(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_monitoring_alerts), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = ( + model_monitoring_service.SearchModelMonitoringAlertsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.search_model_monitoring_alerts( + model_monitor="model_monitor_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].model_monitor + mock_val = "model_monitor_value" + assert arg == mock_val + + +def test_search_model_monitoring_alerts_flattened_error(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.search_model_monitoring_alerts( + model_monitoring_service.SearchModelMonitoringAlertsRequest(), + model_monitor="model_monitor_value", + ) + + +@pytest.mark.asyncio +async def test_search_model_monitoring_alerts_flattened_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_monitoring_alerts), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = ( + model_monitoring_service.SearchModelMonitoringAlertsResponse() + ) + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + model_monitoring_service.SearchModelMonitoringAlertsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.search_model_monitoring_alerts( + model_monitor="model_monitor_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].model_monitor + mock_val = "model_monitor_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_search_model_monitoring_alerts_flattened_error_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.search_model_monitoring_alerts( + model_monitoring_service.SearchModelMonitoringAlertsRequest(), + model_monitor="model_monitor_value", + ) + + +def test_search_model_monitoring_alerts_pager(transport_name: str = "grpc"): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_monitoring_alerts), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_monitoring_service.SearchModelMonitoringAlertsResponse( + model_monitoring_alerts=[ + model_monitoring_alert.ModelMonitoringAlert(), + model_monitoring_alert.ModelMonitoringAlert(), + model_monitoring_alert.ModelMonitoringAlert(), + ], + next_page_token="abc", + ), + model_monitoring_service.SearchModelMonitoringAlertsResponse( + model_monitoring_alerts=[], + next_page_token="def", + ), + model_monitoring_service.SearchModelMonitoringAlertsResponse( + model_monitoring_alerts=[ + model_monitoring_alert.ModelMonitoringAlert(), + ], + next_page_token="ghi", + ), + model_monitoring_service.SearchModelMonitoringAlertsResponse( + model_monitoring_alerts=[ + model_monitoring_alert.ModelMonitoringAlert(), + model_monitoring_alert.ModelMonitoringAlert(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("model_monitor", ""),)), + ) + pager = client.search_model_monitoring_alerts(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, model_monitoring_alert.ModelMonitoringAlert) for i in results + ) + + +def test_search_model_monitoring_alerts_pages(transport_name: str = "grpc"): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_monitoring_alerts), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_monitoring_service.SearchModelMonitoringAlertsResponse( + model_monitoring_alerts=[ + model_monitoring_alert.ModelMonitoringAlert(), + model_monitoring_alert.ModelMonitoringAlert(), + model_monitoring_alert.ModelMonitoringAlert(), + ], + next_page_token="abc", + ), + model_monitoring_service.SearchModelMonitoringAlertsResponse( + model_monitoring_alerts=[], + next_page_token="def", + ), + model_monitoring_service.SearchModelMonitoringAlertsResponse( + model_monitoring_alerts=[ + model_monitoring_alert.ModelMonitoringAlert(), + ], + next_page_token="ghi", + ), + model_monitoring_service.SearchModelMonitoringAlertsResponse( + model_monitoring_alerts=[ + model_monitoring_alert.ModelMonitoringAlert(), + model_monitoring_alert.ModelMonitoringAlert(), + ], + ), + RuntimeError, + ) + pages = list(client.search_model_monitoring_alerts(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_search_model_monitoring_alerts_async_pager(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_monitoring_alerts), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_monitoring_service.SearchModelMonitoringAlertsResponse( + model_monitoring_alerts=[ + model_monitoring_alert.ModelMonitoringAlert(), + model_monitoring_alert.ModelMonitoringAlert(), + model_monitoring_alert.ModelMonitoringAlert(), + ], + next_page_token="abc", + ), + model_monitoring_service.SearchModelMonitoringAlertsResponse( + model_monitoring_alerts=[], + next_page_token="def", + ), + model_monitoring_service.SearchModelMonitoringAlertsResponse( + model_monitoring_alerts=[ + model_monitoring_alert.ModelMonitoringAlert(), + ], + next_page_token="ghi", + ), + model_monitoring_service.SearchModelMonitoringAlertsResponse( + model_monitoring_alerts=[ + model_monitoring_alert.ModelMonitoringAlert(), + model_monitoring_alert.ModelMonitoringAlert(), + ], + ), + RuntimeError, + ) + async_pager = await client.search_model_monitoring_alerts( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all( + isinstance(i, model_monitoring_alert.ModelMonitoringAlert) + for i in responses + ) + + +@pytest.mark.asyncio +async def test_search_model_monitoring_alerts_async_pages(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_monitoring_alerts), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_monitoring_service.SearchModelMonitoringAlertsResponse( + model_monitoring_alerts=[ + model_monitoring_alert.ModelMonitoringAlert(), + model_monitoring_alert.ModelMonitoringAlert(), + model_monitoring_alert.ModelMonitoringAlert(), + ], + next_page_token="abc", + ), + model_monitoring_service.SearchModelMonitoringAlertsResponse( + model_monitoring_alerts=[], + next_page_token="def", + ), + model_monitoring_service.SearchModelMonitoringAlertsResponse( + model_monitoring_alerts=[ + model_monitoring_alert.ModelMonitoringAlert(), + ], + next_page_token="ghi", + ), + model_monitoring_service.SearchModelMonitoringAlertsResponse( + model_monitoring_alerts=[ + model_monitoring_alert.ModelMonitoringAlert(), + model_monitoring_alert.ModelMonitoringAlert(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.search_model_monitoring_alerts(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + model_monitoring_service.CreateModelMonitorRequest, + dict, + ], +) +def test_create_model_monitor_rest(request_type): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request_init["model_monitor"] = { + "tabular_objective": { + "feature_drift_spec": { + "features": ["features_value1", "features_value2"], + "categorical_metric_type": "categorical_metric_type_value", + "numeric_metric_type": "numeric_metric_type_value", + "default_categorical_alert_condition": {"threshold": 0.973}, + "default_numeric_alert_condition": {}, + "feature_alert_conditions": {}, + }, + "prediction_output_drift_spec": {}, + "feature_attribution_spec": { + "features": ["features_value1", "features_value2"], + "default_alert_condition": {}, + "feature_alert_conditions": {}, + "batch_explanation_dedicated_resources": { + "machine_spec": { + "machine_type": "machine_type_value", + "accelerator_type": 1, + "accelerator_count": 1805, + "tpu_topology": "tpu_topology_value", + }, + "starting_replica_count": 2355, + "max_replica_count": 1805, + }, + }, + }, + "name": "name_value", + "display_name": "display_name_value", + "model_monitoring_target": { + "vertex_model": { + "model": "model_value", + "model_version_id": "model_version_id_value", + } + }, + "training_dataset": { + "columnized_dataset": { + "vertex_dataset": "vertex_dataset_value", + "gcs_source": {"gcs_uri": "gcs_uri_value", "format_": 1}, + "bigquery_source": { + "table_uri": "table_uri_value", + "query": "query_value", + }, + "timestamp_field": "timestamp_field_value", + }, + "batch_prediction_output": { + "batch_prediction_job": "batch_prediction_job_value" + }, + "vertex_endpoint_logs": { + "endpoints": ["endpoints_value1", "endpoints_value2"] + }, + "time_interval": { + "start_time": {"seconds": 751, "nanos": 543}, + "end_time": {}, + }, + "time_offset": {"offset": "offset_value", "window": "window_value"}, + }, + "notification_spec": { + "email_config": { + "user_emails": ["user_emails_value1", "user_emails_value2"] + }, + "enable_cloud_logging": True, + "notification_channel_configs": [ + {"notification_channel": "notification_channel_value"} + ], + }, + "output_spec": { + "gcs_base_directory": {"output_uri_prefix": "output_uri_prefix_value"} + }, + "explanation_spec": { + "parameters": { + "sampled_shapley_attribution": {"path_count": 1077}, + "integrated_gradients_attribution": { + "step_count": 1092, + "smooth_grad_config": { + "noise_sigma": 0.11660000000000001, + "feature_noise_sigma": { + "noise_sigma": [{"name": "name_value", "sigma": 0.529}] + }, + "noisy_sample_count": 1947, + }, + "blur_baseline_config": {"max_blur_sigma": 0.1482}, + }, + "xrai_attribution": { + "step_count": 1092, + "smooth_grad_config": {}, + "blur_baseline_config": {}, + }, + "examples": { + "example_gcs_source": { + "data_format": 1, + "gcs_source": {"uris": ["uris_value1", "uris_value2"]}, + }, + "nearest_neighbor_search_config": { + "null_value": 0, + "number_value": 0.1285, + "string_value": "string_value_value", + "bool_value": True, + "struct_value": {"fields": {}}, + "list_value": {"values": {}}, + }, + "presets": {"query": 1, "modality": 1}, + "gcs_source": {}, + "neighbor_count": 1494, + }, + "top_k": 541, + "output_indices": {}, + }, + "metadata": { + "inputs": {}, + "outputs": {}, + "feature_attributions_schema_uri": "feature_attributions_schema_uri_value", + "latent_space_source": "latent_space_source_value", + }, + }, + "model_monitoring_schema": { + "feature_fields": [ + {"name": "name_value", "data_type": "data_type_value", "repeated": True} + ], + "prediction_fields": {}, + "ground_truth_fields": {}, + }, + "create_time": {}, + "update_time": {}, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = model_monitoring_service.CreateModelMonitorRequest.meta.fields[ + "model_monitor" + ] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["model_monitor"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["model_monitor"][field])): + del request_init["model_monitor"][field][i][subfield] + else: + del request_init["model_monitor"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.create_model_monitor(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_create_model_monitor_rest_required_fields( + request_type=model_monitoring_service.CreateModelMonitorRequest, +): + transport_class = transports.ModelMonitoringServiceRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_model_monitor._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_model_monitor._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("model_monitor_id",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.create_model_monitor(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_create_model_monitor_rest_unset_required_fields(): + transport = transports.ModelMonitoringServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.create_model_monitor._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(("modelMonitorId",)) + & set( + ( + "parent", + "modelMonitor", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_model_monitor_rest_interceptors(null_interceptor): + transport = transports.ModelMonitoringServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.ModelMonitoringServiceRestInterceptor(), + ) + client = ModelMonitoringServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.ModelMonitoringServiceRestInterceptor, "post_create_model_monitor" + ) as post, mock.patch.object( + transports.ModelMonitoringServiceRestInterceptor, "pre_create_model_monitor" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = model_monitoring_service.CreateModelMonitorRequest.pb( + model_monitoring_service.CreateModelMonitorRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() + ) + + request = model_monitoring_service.CreateModelMonitorRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.create_model_monitor( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_create_model_monitor_rest_bad_request( + transport: str = "rest", + request_type=model_monitoring_service.CreateModelMonitorRequest, +): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.create_model_monitor(request) + + +def test_create_model_monitor_rest_flattened(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/locations/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + model_monitor=gca_model_monitor.ModelMonitor( + tabular_objective=model_monitoring_spec.ModelMonitoringObjectiveSpec.TabularObjective( + feature_drift_spec=model_monitoring_spec.ModelMonitoringObjectiveSpec.DataDriftSpec( + features=["features_value"] + ) + ) + ), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.create_model_monitor(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta1/{parent=projects/*/locations/*}/modelMonitors" + % client.transport._host, + args[1], + ) + + +def test_create_model_monitor_rest_flattened_error(transport: str = "rest"): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_model_monitor( + model_monitoring_service.CreateModelMonitorRequest(), + parent="parent_value", + model_monitor=gca_model_monitor.ModelMonitor( + tabular_objective=model_monitoring_spec.ModelMonitoringObjectiveSpec.TabularObjective( + feature_drift_spec=model_monitoring_spec.ModelMonitoringObjectiveSpec.DataDriftSpec( + features=["features_value"] + ) + ) + ), + ) + + +def test_create_model_monitor_rest_error(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + model_monitoring_service.UpdateModelMonitorRequest, + dict, + ], +) +def test_update_model_monitor_rest(request_type): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = { + "model_monitor": { + "name": "projects/sample1/locations/sample2/modelMonitors/sample3" + } + } + request_init["model_monitor"] = { + "tabular_objective": { + "feature_drift_spec": { + "features": ["features_value1", "features_value2"], + "categorical_metric_type": "categorical_metric_type_value", + "numeric_metric_type": "numeric_metric_type_value", + "default_categorical_alert_condition": {"threshold": 0.973}, + "default_numeric_alert_condition": {}, + "feature_alert_conditions": {}, + }, + "prediction_output_drift_spec": {}, + "feature_attribution_spec": { + "features": ["features_value1", "features_value2"], + "default_alert_condition": {}, + "feature_alert_conditions": {}, + "batch_explanation_dedicated_resources": { + "machine_spec": { + "machine_type": "machine_type_value", + "accelerator_type": 1, + "accelerator_count": 1805, + "tpu_topology": "tpu_topology_value", + }, + "starting_replica_count": 2355, + "max_replica_count": 1805, + }, + }, + }, + "name": "projects/sample1/locations/sample2/modelMonitors/sample3", + "display_name": "display_name_value", + "model_monitoring_target": { + "vertex_model": { + "model": "model_value", + "model_version_id": "model_version_id_value", + } + }, + "training_dataset": { + "columnized_dataset": { + "vertex_dataset": "vertex_dataset_value", + "gcs_source": {"gcs_uri": "gcs_uri_value", "format_": 1}, + "bigquery_source": { + "table_uri": "table_uri_value", + "query": "query_value", + }, + "timestamp_field": "timestamp_field_value", + }, + "batch_prediction_output": { + "batch_prediction_job": "batch_prediction_job_value" + }, + "vertex_endpoint_logs": { + "endpoints": ["endpoints_value1", "endpoints_value2"] + }, + "time_interval": { + "start_time": {"seconds": 751, "nanos": 543}, + "end_time": {}, + }, + "time_offset": {"offset": "offset_value", "window": "window_value"}, + }, + "notification_spec": { + "email_config": { + "user_emails": ["user_emails_value1", "user_emails_value2"] + }, + "enable_cloud_logging": True, + "notification_channel_configs": [ + {"notification_channel": "notification_channel_value"} + ], + }, + "output_spec": { + "gcs_base_directory": {"output_uri_prefix": "output_uri_prefix_value"} + }, + "explanation_spec": { + "parameters": { + "sampled_shapley_attribution": {"path_count": 1077}, + "integrated_gradients_attribution": { + "step_count": 1092, + "smooth_grad_config": { + "noise_sigma": 0.11660000000000001, + "feature_noise_sigma": { + "noise_sigma": [{"name": "name_value", "sigma": 0.529}] + }, + "noisy_sample_count": 1947, + }, + "blur_baseline_config": {"max_blur_sigma": 0.1482}, + }, + "xrai_attribution": { + "step_count": 1092, + "smooth_grad_config": {}, + "blur_baseline_config": {}, + }, + "examples": { + "example_gcs_source": { + "data_format": 1, + "gcs_source": {"uris": ["uris_value1", "uris_value2"]}, + }, + "nearest_neighbor_search_config": { + "null_value": 0, + "number_value": 0.1285, + "string_value": "string_value_value", + "bool_value": True, + "struct_value": {"fields": {}}, + "list_value": {"values": {}}, + }, + "presets": {"query": 1, "modality": 1}, + "gcs_source": {}, + "neighbor_count": 1494, + }, + "top_k": 541, + "output_indices": {}, + }, + "metadata": { + "inputs": {}, + "outputs": {}, + "feature_attributions_schema_uri": "feature_attributions_schema_uri_value", + "latent_space_source": "latent_space_source_value", + }, + }, + "model_monitoring_schema": { + "feature_fields": [ + {"name": "name_value", "data_type": "data_type_value", "repeated": True} + ], + "prediction_fields": {}, + "ground_truth_fields": {}, + }, + "create_time": {}, + "update_time": {}, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = model_monitoring_service.UpdateModelMonitorRequest.meta.fields[ + "model_monitor" + ] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["model_monitor"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["model_monitor"][field])): + del request_init["model_monitor"][field][i][subfield] + else: + del request_init["model_monitor"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.update_model_monitor(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_update_model_monitor_rest_required_fields( + request_type=model_monitoring_service.UpdateModelMonitorRequest, +): + transport_class = transports.ModelMonitoringServiceRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_model_monitor._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_model_monitor._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("update_mask",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "patch", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.update_model_monitor(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_update_model_monitor_rest_unset_required_fields(): + transport = transports.ModelMonitoringServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.update_model_monitor._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(("updateMask",)) + & set( + ( + "modelMonitor", + "updateMask", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_update_model_monitor_rest_interceptors(null_interceptor): + transport = transports.ModelMonitoringServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.ModelMonitoringServiceRestInterceptor(), + ) + client = ModelMonitoringServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.ModelMonitoringServiceRestInterceptor, "post_update_model_monitor" + ) as post, mock.patch.object( + transports.ModelMonitoringServiceRestInterceptor, "pre_update_model_monitor" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = model_monitoring_service.UpdateModelMonitorRequest.pb( + model_monitoring_service.UpdateModelMonitorRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() + ) + + request = model_monitoring_service.UpdateModelMonitorRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.update_model_monitor( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_update_model_monitor_rest_bad_request( + transport: str = "rest", + request_type=model_monitoring_service.UpdateModelMonitorRequest, +): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = { + "model_monitor": { + "name": "projects/sample1/locations/sample2/modelMonitors/sample3" + } + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.update_model_monitor(request) + + +def test_update_model_monitor_rest_flattened(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "model_monitor": { + "name": "projects/sample1/locations/sample2/modelMonitors/sample3" + } + } + + # get truthy value for each flattened field + mock_args = dict( + model_monitor=gca_model_monitor.ModelMonitor( + tabular_objective=model_monitoring_spec.ModelMonitoringObjectiveSpec.TabularObjective( + feature_drift_spec=model_monitoring_spec.ModelMonitoringObjectiveSpec.DataDriftSpec( + features=["features_value"] + ) + ) + ), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.update_model_monitor(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta1/{model_monitor.name=projects/*/locations/*/modelMonitors/*}" + % client.transport._host, + args[1], + ) + + +def test_update_model_monitor_rest_flattened_error(transport: str = "rest"): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_model_monitor( + model_monitoring_service.UpdateModelMonitorRequest(), + model_monitor=gca_model_monitor.ModelMonitor( + tabular_objective=model_monitoring_spec.ModelMonitoringObjectiveSpec.TabularObjective( + feature_drift_spec=model_monitoring_spec.ModelMonitoringObjectiveSpec.DataDriftSpec( + features=["features_value"] + ) + ) + ), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +def test_update_model_monitor_rest_error(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + model_monitoring_service.GetModelMonitorRequest, + dict, + ], +) +def test_get_model_monitor_rest(request_type): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/locations/sample2/modelMonitors/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = model_monitor.ModelMonitor( + name="name_value", + display_name="display_name_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = model_monitor.ModelMonitor.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.get_model_monitor(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, model_monitor.ModelMonitor) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + + +def test_get_model_monitor_rest_required_fields( + request_type=model_monitoring_service.GetModelMonitorRequest, +): + transport_class = transports.ModelMonitoringServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_model_monitor._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_model_monitor._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = model_monitor.ModelMonitor() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = model_monitor.ModelMonitor.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_model_monitor(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_model_monitor_rest_unset_required_fields(): + transport = transports.ModelMonitoringServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_model_monitor._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_model_monitor_rest_interceptors(null_interceptor): + transport = transports.ModelMonitoringServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.ModelMonitoringServiceRestInterceptor(), + ) + client = ModelMonitoringServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.ModelMonitoringServiceRestInterceptor, "post_get_model_monitor" + ) as post, mock.patch.object( + transports.ModelMonitoringServiceRestInterceptor, "pre_get_model_monitor" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = model_monitoring_service.GetModelMonitorRequest.pb( + model_monitoring_service.GetModelMonitorRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = model_monitor.ModelMonitor.to_json( + model_monitor.ModelMonitor() + ) + + request = model_monitoring_service.GetModelMonitorRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = model_monitor.ModelMonitor() + + client.get_model_monitor( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_model_monitor_rest_bad_request( + transport: str = "rest", + request_type=model_monitoring_service.GetModelMonitorRequest, +): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/locations/sample2/modelMonitors/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_model_monitor(request) + + +def test_get_model_monitor_rest_flattened(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = model_monitor.ModelMonitor() + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/modelMonitors/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = model_monitor.ModelMonitor.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.get_model_monitor(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta1/{name=projects/*/locations/*/modelMonitors/*}" + % client.transport._host, + args[1], + ) + + +def test_get_model_monitor_rest_flattened_error(transport: str = "rest"): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_model_monitor( + model_monitoring_service.GetModelMonitorRequest(), + name="name_value", + ) + + +def test_get_model_monitor_rest_error(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + model_monitoring_service.ListModelMonitorsRequest, + dict, + ], +) +def test_list_model_monitors_rest(request_type): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = model_monitoring_service.ListModelMonitorsResponse( + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = model_monitoring_service.ListModelMonitorsResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.list_model_monitors(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelMonitorsPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_model_monitors_rest_required_fields( + request_type=model_monitoring_service.ListModelMonitorsRequest, +): + transport_class = transports.ModelMonitoringServiceRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_model_monitors._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_model_monitors._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "filter", + "page_size", + "page_token", + "read_mask", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = model_monitoring_service.ListModelMonitorsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = model_monitoring_service.ListModelMonitorsResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_model_monitors(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_model_monitors_rest_unset_required_fields(): + transport = transports.ModelMonitoringServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_model_monitors._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "filter", + "pageSize", + "pageToken", + "readMask", + ) + ) + & set(("parent",)) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_model_monitors_rest_interceptors(null_interceptor): + transport = transports.ModelMonitoringServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.ModelMonitoringServiceRestInterceptor(), + ) + client = ModelMonitoringServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.ModelMonitoringServiceRestInterceptor, "post_list_model_monitors" + ) as post, mock.patch.object( + transports.ModelMonitoringServiceRestInterceptor, "pre_list_model_monitors" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = model_monitoring_service.ListModelMonitorsRequest.pb( + model_monitoring_service.ListModelMonitorsRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = ( + model_monitoring_service.ListModelMonitorsResponse.to_json( + model_monitoring_service.ListModelMonitorsResponse() + ) + ) + + request = model_monitoring_service.ListModelMonitorsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = model_monitoring_service.ListModelMonitorsResponse() + + client.list_model_monitors( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_model_monitors_rest_bad_request( + transport: str = "rest", + request_type=model_monitoring_service.ListModelMonitorsRequest, +): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_model_monitors(request) + + +def test_list_model_monitors_rest_flattened(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = model_monitoring_service.ListModelMonitorsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/locations/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = model_monitoring_service.ListModelMonitorsResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.list_model_monitors(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta1/{parent=projects/*/locations/*}/modelMonitors" + % client.transport._host, + args[1], + ) + + +def test_list_model_monitors_rest_flattened_error(transport: str = "rest"): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_model_monitors( + model_monitoring_service.ListModelMonitorsRequest(), + parent="parent_value", + ) + + +def test_list_model_monitors_rest_pager(transport: str = "rest"): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + model_monitoring_service.ListModelMonitorsResponse( + model_monitors=[ + model_monitor.ModelMonitor(), + model_monitor.ModelMonitor(), + model_monitor.ModelMonitor(), + ], + next_page_token="abc", + ), + model_monitoring_service.ListModelMonitorsResponse( + model_monitors=[], + next_page_token="def", + ), + model_monitoring_service.ListModelMonitorsResponse( + model_monitors=[ + model_monitor.ModelMonitor(), + ], + next_page_token="ghi", + ), + model_monitoring_service.ListModelMonitorsResponse( + model_monitors=[ + model_monitor.ModelMonitor(), + model_monitor.ModelMonitor(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + model_monitoring_service.ListModelMonitorsResponse.to_json(x) + for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "projects/sample1/locations/sample2"} + + pager = client.list_model_monitors(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, model_monitor.ModelMonitor) for i in results) + + pages = list(client.list_model_monitors(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + model_monitoring_service.DeleteModelMonitorRequest, + dict, + ], +) +def test_delete_model_monitor_rest(request_type): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/locations/sample2/modelMonitors/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.delete_model_monitor(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_delete_model_monitor_rest_required_fields( + request_type=model_monitoring_service.DeleteModelMonitorRequest, +): + transport_class = transports.ModelMonitoringServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_model_monitor._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_model_monitor._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("force",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "delete", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.delete_model_monitor(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_delete_model_monitor_rest_unset_required_fields(): + transport = transports.ModelMonitoringServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.delete_model_monitor._get_unset_required_fields({}) + assert set(unset_fields) == (set(("force",)) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_model_monitor_rest_interceptors(null_interceptor): + transport = transports.ModelMonitoringServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.ModelMonitoringServiceRestInterceptor(), + ) + client = ModelMonitoringServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.ModelMonitoringServiceRestInterceptor, "post_delete_model_monitor" + ) as post, mock.patch.object( + transports.ModelMonitoringServiceRestInterceptor, "pre_delete_model_monitor" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = model_monitoring_service.DeleteModelMonitorRequest.pb( + model_monitoring_service.DeleteModelMonitorRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() + ) + + request = model_monitoring_service.DeleteModelMonitorRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.delete_model_monitor( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_delete_model_monitor_rest_bad_request( + transport: str = "rest", + request_type=model_monitoring_service.DeleteModelMonitorRequest, +): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/locations/sample2/modelMonitors/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_model_monitor(request) + + +def test_delete_model_monitor_rest_flattened(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/modelMonitors/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.delete_model_monitor(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta1/{name=projects/*/locations/*/modelMonitors/*}" + % client.transport._host, + args[1], + ) + + +def test_delete_model_monitor_rest_flattened_error(transport: str = "rest"): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_model_monitor( + model_monitoring_service.DeleteModelMonitorRequest(), + name="name_value", + ) + + +def test_delete_model_monitor_rest_error(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + model_monitoring_service.CreateModelMonitoringJobRequest, + dict, + ], +) +def test_create_model_monitoring_job_rest(request_type): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = { + "parent": "projects/sample1/locations/sample2/modelMonitors/sample3" + } + request_init["model_monitoring_job"] = { + "name": "name_value", + "display_name": "display_name_value", + "model_monitoring_spec": { + "objective_spec": { + "tabular_objective": { + "feature_drift_spec": { + "features": ["features_value1", "features_value2"], + "categorical_metric_type": "categorical_metric_type_value", + "numeric_metric_type": "numeric_metric_type_value", + "default_categorical_alert_condition": {"threshold": 0.973}, + "default_numeric_alert_condition": {}, + "feature_alert_conditions": {}, + }, + "prediction_output_drift_spec": {}, + "feature_attribution_spec": { + "features": ["features_value1", "features_value2"], + "default_alert_condition": {}, + "feature_alert_conditions": {}, + "batch_explanation_dedicated_resources": { + "machine_spec": { + "machine_type": "machine_type_value", + "accelerator_type": 1, + "accelerator_count": 1805, + "tpu_topology": "tpu_topology_value", + }, + "starting_replica_count": 2355, + "max_replica_count": 1805, + }, + }, + }, + "explanation_spec": { + "parameters": { + "sampled_shapley_attribution": {"path_count": 1077}, + "integrated_gradients_attribution": { + "step_count": 1092, + "smooth_grad_config": { + "noise_sigma": 0.11660000000000001, + "feature_noise_sigma": { + "noise_sigma": [ + {"name": "name_value", "sigma": 0.529} + ] + }, + "noisy_sample_count": 1947, + }, + "blur_baseline_config": {"max_blur_sigma": 0.1482}, + }, + "xrai_attribution": { + "step_count": 1092, + "smooth_grad_config": {}, + "blur_baseline_config": {}, + }, + "examples": { + "example_gcs_source": { + "data_format": 1, + "gcs_source": {"uris": ["uris_value1", "uris_value2"]}, + }, + "nearest_neighbor_search_config": { + "null_value": 0, + "number_value": 0.1285, + "string_value": "string_value_value", + "bool_value": True, + "struct_value": {"fields": {}}, + "list_value": {"values": {}}, + }, + "presets": {"query": 1, "modality": 1}, + "gcs_source": {}, + "neighbor_count": 1494, + }, + "top_k": 541, + "output_indices": {}, + }, + "metadata": { + "inputs": {}, + "outputs": {}, + "feature_attributions_schema_uri": "feature_attributions_schema_uri_value", + "latent_space_source": "latent_space_source_value", + }, + }, + "baseline_dataset": { + "columnized_dataset": { + "vertex_dataset": "vertex_dataset_value", + "gcs_source": {"gcs_uri": "gcs_uri_value", "format_": 1}, + "bigquery_source": { + "table_uri": "table_uri_value", + "query": "query_value", + }, + "timestamp_field": "timestamp_field_value", + }, + "batch_prediction_output": { + "batch_prediction_job": "batch_prediction_job_value" + }, + "vertex_endpoint_logs": { + "endpoints": ["endpoints_value1", "endpoints_value2"] + }, + "time_interval": { + "start_time": {"seconds": 751, "nanos": 543}, + "end_time": {}, + }, + "time_offset": {"offset": "offset_value", "window": "window_value"}, + }, + "target_dataset": {}, + }, + "notification_spec": { + "email_config": { + "user_emails": ["user_emails_value1", "user_emails_value2"] + }, + "enable_cloud_logging": True, + "notification_channel_configs": [ + {"notification_channel": "notification_channel_value"} + ], + }, + "output_spec": { + "gcs_base_directory": {"output_uri_prefix": "output_uri_prefix_value"} + }, + }, + "create_time": {}, + "update_time": {}, + "state": 1, + "schedule": "schedule_value", + "job_execution_detail": { + "baseline_datasets": [{"location": "location_value", "time_range": {}}], + "target_datasets": {}, + "objective_status": {}, + "error": { + "code": 411, + "message": "message_value", + "details": [ + { + "type_url": "type.googleapis.com/google.protobuf.Duration", + "value": b"\x08\x0c\x10\xdb\x07", + } + ], + }, + }, + "schedule_time": {}, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = model_monitoring_service.CreateModelMonitoringJobRequest.meta.fields[ + "model_monitoring_job" + ] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init[ + "model_monitoring_job" + ].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["model_monitoring_job"][field])): + del request_init["model_monitoring_job"][field][i][subfield] + else: + del request_init["model_monitoring_job"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = gca_model_monitoring_job.ModelMonitoringJob( + name="name_value", + display_name="display_name_value", + state=job_state.JobState.JOB_STATE_QUEUED, + schedule="schedule_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = gca_model_monitoring_job.ModelMonitoringJob.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.create_model_monitoring_job(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_model_monitoring_job.ModelMonitoringJob) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.state == job_state.JobState.JOB_STATE_QUEUED + assert response.schedule == "schedule_value" + + +def test_create_model_monitoring_job_rest_required_fields( + request_type=model_monitoring_service.CreateModelMonitoringJobRequest, +): + transport_class = transports.ModelMonitoringServiceRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_model_monitoring_job._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_model_monitoring_job._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("model_monitoring_job_id",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = gca_model_monitoring_job.ModelMonitoringJob() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = gca_model_monitoring_job.ModelMonitoringJob.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.create_model_monitoring_job(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_create_model_monitoring_job_rest_unset_required_fields(): + transport = transports.ModelMonitoringServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.create_model_monitoring_job._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(("modelMonitoringJobId",)) + & set( + ( + "parent", + "modelMonitoringJob", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_model_monitoring_job_rest_interceptors(null_interceptor): + transport = transports.ModelMonitoringServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.ModelMonitoringServiceRestInterceptor(), + ) + client = ModelMonitoringServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.ModelMonitoringServiceRestInterceptor, + "post_create_model_monitoring_job", + ) as post, mock.patch.object( + transports.ModelMonitoringServiceRestInterceptor, + "pre_create_model_monitoring_job", + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = model_monitoring_service.CreateModelMonitoringJobRequest.pb( + model_monitoring_service.CreateModelMonitoringJobRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = gca_model_monitoring_job.ModelMonitoringJob.to_json( + gca_model_monitoring_job.ModelMonitoringJob() + ) + + request = model_monitoring_service.CreateModelMonitoringJobRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = gca_model_monitoring_job.ModelMonitoringJob() + + client.create_model_monitoring_job( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_create_model_monitoring_job_rest_bad_request( + transport: str = "rest", + request_type=model_monitoring_service.CreateModelMonitoringJobRequest, +): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = { + "parent": "projects/sample1/locations/sample2/modelMonitors/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.create_model_monitoring_job(request) + + +def test_create_model_monitoring_job_rest_flattened(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = gca_model_monitoring_job.ModelMonitoringJob() + + # get arguments that satisfy an http rule for this method + sample_request = { + "parent": "projects/sample1/locations/sample2/modelMonitors/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + model_monitoring_job=gca_model_monitoring_job.ModelMonitoringJob( + name="name_value" + ), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = gca_model_monitoring_job.ModelMonitoringJob.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.create_model_monitoring_job(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta1/{parent=projects/*/locations/*/modelMonitors/*}/modelMonitoringJobs" + % client.transport._host, + args[1], + ) + + +def test_create_model_monitoring_job_rest_flattened_error(transport: str = "rest"): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_model_monitoring_job( + model_monitoring_service.CreateModelMonitoringJobRequest(), + parent="parent_value", + model_monitoring_job=gca_model_monitoring_job.ModelMonitoringJob( + name="name_value" + ), + ) + + +def test_create_model_monitoring_job_rest_error(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + model_monitoring_service.GetModelMonitoringJobRequest, + dict, + ], +) +def test_get_model_monitoring_job_rest(request_type): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/modelMonitors/sample3/modelMonitoringJobs/sample4" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = model_monitoring_job.ModelMonitoringJob( + name="name_value", + display_name="display_name_value", + state=job_state.JobState.JOB_STATE_QUEUED, + schedule="schedule_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = model_monitoring_job.ModelMonitoringJob.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.get_model_monitoring_job(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, model_monitoring_job.ModelMonitoringJob) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.state == job_state.JobState.JOB_STATE_QUEUED + assert response.schedule == "schedule_value" + + +def test_get_model_monitoring_job_rest_required_fields( + request_type=model_monitoring_service.GetModelMonitoringJobRequest, +): + transport_class = transports.ModelMonitoringServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_model_monitoring_job._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_model_monitoring_job._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = model_monitoring_job.ModelMonitoringJob() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = model_monitoring_job.ModelMonitoringJob.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_model_monitoring_job(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_model_monitoring_job_rest_unset_required_fields(): + transport = transports.ModelMonitoringServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_model_monitoring_job._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_model_monitoring_job_rest_interceptors(null_interceptor): + transport = transports.ModelMonitoringServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.ModelMonitoringServiceRestInterceptor(), + ) + client = ModelMonitoringServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.ModelMonitoringServiceRestInterceptor, + "post_get_model_monitoring_job", + ) as post, mock.patch.object( + transports.ModelMonitoringServiceRestInterceptor, "pre_get_model_monitoring_job" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = model_monitoring_service.GetModelMonitoringJobRequest.pb( + model_monitoring_service.GetModelMonitoringJobRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = model_monitoring_job.ModelMonitoringJob.to_json( + model_monitoring_job.ModelMonitoringJob() + ) + + request = model_monitoring_service.GetModelMonitoringJobRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = model_monitoring_job.ModelMonitoringJob() + + client.get_model_monitoring_job( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_model_monitoring_job_rest_bad_request( + transport: str = "rest", + request_type=model_monitoring_service.GetModelMonitoringJobRequest, +): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/modelMonitors/sample3/modelMonitoringJobs/sample4" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_model_monitoring_job(request) + + +def test_get_model_monitoring_job_rest_flattened(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = model_monitoring_job.ModelMonitoringJob() + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/modelMonitors/sample3/modelMonitoringJobs/sample4" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = model_monitoring_job.ModelMonitoringJob.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.get_model_monitoring_job(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta1/{name=projects/*/locations/*/modelMonitors/*/modelMonitoringJobs/*}" + % client.transport._host, + args[1], + ) + + +def test_get_model_monitoring_job_rest_flattened_error(transport: str = "rest"): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_model_monitoring_job( + model_monitoring_service.GetModelMonitoringJobRequest(), + name="name_value", + ) + + +def test_get_model_monitoring_job_rest_error(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + model_monitoring_service.ListModelMonitoringJobsRequest, + dict, + ], +) +def test_list_model_monitoring_jobs_rest(request_type): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = { + "parent": "projects/sample1/locations/sample2/modelMonitors/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = model_monitoring_service.ListModelMonitoringJobsResponse( + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = model_monitoring_service.ListModelMonitoringJobsResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.list_model_monitoring_jobs(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelMonitoringJobsPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_model_monitoring_jobs_rest_required_fields( + request_type=model_monitoring_service.ListModelMonitoringJobsRequest, +): + transport_class = transports.ModelMonitoringServiceRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_model_monitoring_jobs._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_model_monitoring_jobs._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "filter", + "page_size", + "page_token", + "read_mask", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = model_monitoring_service.ListModelMonitoringJobsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = model_monitoring_service.ListModelMonitoringJobsResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_model_monitoring_jobs(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_model_monitoring_jobs_rest_unset_required_fields(): + transport = transports.ModelMonitoringServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_model_monitoring_jobs._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "filter", + "pageSize", + "pageToken", + "readMask", + ) + ) + & set(("parent",)) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_model_monitoring_jobs_rest_interceptors(null_interceptor): + transport = transports.ModelMonitoringServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.ModelMonitoringServiceRestInterceptor(), + ) + client = ModelMonitoringServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.ModelMonitoringServiceRestInterceptor, + "post_list_model_monitoring_jobs", + ) as post, mock.patch.object( + transports.ModelMonitoringServiceRestInterceptor, + "pre_list_model_monitoring_jobs", + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = model_monitoring_service.ListModelMonitoringJobsRequest.pb( + model_monitoring_service.ListModelMonitoringJobsRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = ( + model_monitoring_service.ListModelMonitoringJobsResponse.to_json( + model_monitoring_service.ListModelMonitoringJobsResponse() + ) + ) + + request = model_monitoring_service.ListModelMonitoringJobsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = model_monitoring_service.ListModelMonitoringJobsResponse() + + client.list_model_monitoring_jobs( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_model_monitoring_jobs_rest_bad_request( + transport: str = "rest", + request_type=model_monitoring_service.ListModelMonitoringJobsRequest, +): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = { + "parent": "projects/sample1/locations/sample2/modelMonitors/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_model_monitoring_jobs(request) + + +def test_list_model_monitoring_jobs_rest_flattened(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = model_monitoring_service.ListModelMonitoringJobsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = { + "parent": "projects/sample1/locations/sample2/modelMonitors/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = model_monitoring_service.ListModelMonitoringJobsResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.list_model_monitoring_jobs(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta1/{parent=projects/*/locations/*/modelMonitors/*}/modelMonitoringJobs" + % client.transport._host, + args[1], + ) + + +def test_list_model_monitoring_jobs_rest_flattened_error(transport: str = "rest"): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_model_monitoring_jobs( + model_monitoring_service.ListModelMonitoringJobsRequest(), + parent="parent_value", + ) + + +def test_list_model_monitoring_jobs_rest_pager(transport: str = "rest"): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + model_monitoring_service.ListModelMonitoringJobsResponse( + model_monitoring_jobs=[ + model_monitoring_job.ModelMonitoringJob(), + model_monitoring_job.ModelMonitoringJob(), + model_monitoring_job.ModelMonitoringJob(), + ], + next_page_token="abc", + ), + model_monitoring_service.ListModelMonitoringJobsResponse( + model_monitoring_jobs=[], + next_page_token="def", + ), + model_monitoring_service.ListModelMonitoringJobsResponse( + model_monitoring_jobs=[ + model_monitoring_job.ModelMonitoringJob(), + ], + next_page_token="ghi", + ), + model_monitoring_service.ListModelMonitoringJobsResponse( + model_monitoring_jobs=[ + model_monitoring_job.ModelMonitoringJob(), + model_monitoring_job.ModelMonitoringJob(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + model_monitoring_service.ListModelMonitoringJobsResponse.to_json(x) + for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = { + "parent": "projects/sample1/locations/sample2/modelMonitors/sample3" + } + + pager = client.list_model_monitoring_jobs(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, model_monitoring_job.ModelMonitoringJob) for i in results + ) + + pages = list(client.list_model_monitoring_jobs(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + model_monitoring_service.DeleteModelMonitoringJobRequest, + dict, + ], +) +def test_delete_model_monitoring_job_rest(request_type): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/modelMonitors/sample3/modelMonitoringJobs/sample4" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.delete_model_monitoring_job(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_delete_model_monitoring_job_rest_required_fields( + request_type=model_monitoring_service.DeleteModelMonitoringJobRequest, +): + transport_class = transports.ModelMonitoringServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_model_monitoring_job._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_model_monitoring_job._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "delete", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.delete_model_monitoring_job(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_delete_model_monitoring_job_rest_unset_required_fields(): + transport = transports.ModelMonitoringServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.delete_model_monitoring_job._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_model_monitoring_job_rest_interceptors(null_interceptor): + transport = transports.ModelMonitoringServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.ModelMonitoringServiceRestInterceptor(), + ) + client = ModelMonitoringServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.ModelMonitoringServiceRestInterceptor, + "post_delete_model_monitoring_job", + ) as post, mock.patch.object( + transports.ModelMonitoringServiceRestInterceptor, + "pre_delete_model_monitoring_job", + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = model_monitoring_service.DeleteModelMonitoringJobRequest.pb( + model_monitoring_service.DeleteModelMonitoringJobRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() + ) + + request = model_monitoring_service.DeleteModelMonitoringJobRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.delete_model_monitoring_job( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_delete_model_monitoring_job_rest_bad_request( + transport: str = "rest", + request_type=model_monitoring_service.DeleteModelMonitoringJobRequest, +): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/modelMonitors/sample3/modelMonitoringJobs/sample4" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_model_monitoring_job(request) + + +def test_delete_model_monitoring_job_rest_flattened(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/modelMonitors/sample3/modelMonitoringJobs/sample4" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.delete_model_monitoring_job(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta1/{name=projects/*/locations/*/modelMonitors/*/modelMonitoringJobs/*}" + % client.transport._host, + args[1], + ) + + +def test_delete_model_monitoring_job_rest_flattened_error(transport: str = "rest"): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_model_monitoring_job( + model_monitoring_service.DeleteModelMonitoringJobRequest(), + name="name_value", + ) + + +def test_delete_model_monitoring_job_rest_error(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + model_monitoring_service.SearchModelMonitoringStatsRequest, + dict, + ], +) +def test_search_model_monitoring_stats_rest(request_type): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = { + "model_monitor": "projects/sample1/locations/sample2/modelMonitors/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = model_monitoring_service.SearchModelMonitoringStatsResponse( + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = model_monitoring_service.SearchModelMonitoringStatsResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.search_model_monitoring_stats(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.SearchModelMonitoringStatsPager) + assert response.next_page_token == "next_page_token_value" + + +def test_search_model_monitoring_stats_rest_required_fields( + request_type=model_monitoring_service.SearchModelMonitoringStatsRequest, +): + transport_class = transports.ModelMonitoringServiceRestTransport + + request_init = {} + request_init["model_monitor"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).search_model_monitoring_stats._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["modelMonitor"] = "model_monitor_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).search_model_monitoring_stats._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "modelMonitor" in jsonified_request + assert jsonified_request["modelMonitor"] == "model_monitor_value" + + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = model_monitoring_service.SearchModelMonitoringStatsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = ( + model_monitoring_service.SearchModelMonitoringStatsResponse.pb( + return_value + ) + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.search_model_monitoring_stats(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_search_model_monitoring_stats_rest_unset_required_fields(): + transport = transports.ModelMonitoringServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.search_model_monitoring_stats._get_unset_required_fields( + {} + ) + assert set(unset_fields) == (set(()) & set(("modelMonitor",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_search_model_monitoring_stats_rest_interceptors(null_interceptor): + transport = transports.ModelMonitoringServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.ModelMonitoringServiceRestInterceptor(), + ) + client = ModelMonitoringServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.ModelMonitoringServiceRestInterceptor, + "post_search_model_monitoring_stats", + ) as post, mock.patch.object( + transports.ModelMonitoringServiceRestInterceptor, + "pre_search_model_monitoring_stats", + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = model_monitoring_service.SearchModelMonitoringStatsRequest.pb( + model_monitoring_service.SearchModelMonitoringStatsRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = ( + model_monitoring_service.SearchModelMonitoringStatsResponse.to_json( + model_monitoring_service.SearchModelMonitoringStatsResponse() + ) + ) + + request = model_monitoring_service.SearchModelMonitoringStatsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = ( + model_monitoring_service.SearchModelMonitoringStatsResponse() + ) + + client.search_model_monitoring_stats( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_search_model_monitoring_stats_rest_bad_request( + transport: str = "rest", + request_type=model_monitoring_service.SearchModelMonitoringStatsRequest, +): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = { + "model_monitor": "projects/sample1/locations/sample2/modelMonitors/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.search_model_monitoring_stats(request) + + +def test_search_model_monitoring_stats_rest_flattened(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = model_monitoring_service.SearchModelMonitoringStatsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = { + "model_monitor": "projects/sample1/locations/sample2/modelMonitors/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + model_monitor="model_monitor_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = model_monitoring_service.SearchModelMonitoringStatsResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.search_model_monitoring_stats(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta1/{model_monitor=projects/*/locations/*/modelMonitors/*}:searchModelMonitoringStats" + % client.transport._host, + args[1], + ) + + +def test_search_model_monitoring_stats_rest_flattened_error(transport: str = "rest"): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.search_model_monitoring_stats( + model_monitoring_service.SearchModelMonitoringStatsRequest(), + model_monitor="model_monitor_value", + ) + + +def test_search_model_monitoring_stats_rest_pager(transport: str = "rest"): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + model_monitoring_service.SearchModelMonitoringStatsResponse( + monitoring_stats=[ + model_monitoring_stats.ModelMonitoringStats(), + model_monitoring_stats.ModelMonitoringStats(), + model_monitoring_stats.ModelMonitoringStats(), + ], + next_page_token="abc", + ), + model_monitoring_service.SearchModelMonitoringStatsResponse( + monitoring_stats=[], + next_page_token="def", + ), + model_monitoring_service.SearchModelMonitoringStatsResponse( + monitoring_stats=[ + model_monitoring_stats.ModelMonitoringStats(), + ], + next_page_token="ghi", + ), + model_monitoring_service.SearchModelMonitoringStatsResponse( + monitoring_stats=[ + model_monitoring_stats.ModelMonitoringStats(), + model_monitoring_stats.ModelMonitoringStats(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + model_monitoring_service.SearchModelMonitoringStatsResponse.to_json(x) + for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = { + "model_monitor": "projects/sample1/locations/sample2/modelMonitors/sample3" + } + + pager = client.search_model_monitoring_stats(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, model_monitoring_stats.ModelMonitoringStats) for i in results + ) + + pages = list(client.search_model_monitoring_stats(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + model_monitoring_service.SearchModelMonitoringAlertsRequest, + dict, + ], +) +def test_search_model_monitoring_alerts_rest(request_type): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = { + "model_monitor": "projects/sample1/locations/sample2/modelMonitors/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = model_monitoring_service.SearchModelMonitoringAlertsResponse( + total_number_alerts=2038, + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = model_monitoring_service.SearchModelMonitoringAlertsResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.search_model_monitoring_alerts(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.SearchModelMonitoringAlertsPager) + assert response.total_number_alerts == 2038 + assert response.next_page_token == "next_page_token_value" + + +def test_search_model_monitoring_alerts_rest_required_fields( + request_type=model_monitoring_service.SearchModelMonitoringAlertsRequest, +): + transport_class = transports.ModelMonitoringServiceRestTransport + + request_init = {} + request_init["model_monitor"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).search_model_monitoring_alerts._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["modelMonitor"] = "model_monitor_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).search_model_monitoring_alerts._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "modelMonitor" in jsonified_request + assert jsonified_request["modelMonitor"] == "model_monitor_value" + + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = model_monitoring_service.SearchModelMonitoringAlertsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = ( + model_monitoring_service.SearchModelMonitoringAlertsResponse.pb( + return_value + ) + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.search_model_monitoring_alerts(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_search_model_monitoring_alerts_rest_unset_required_fields(): + transport = transports.ModelMonitoringServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.search_model_monitoring_alerts._get_unset_required_fields( + {} + ) + assert set(unset_fields) == (set(()) & set(("modelMonitor",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_search_model_monitoring_alerts_rest_interceptors(null_interceptor): + transport = transports.ModelMonitoringServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.ModelMonitoringServiceRestInterceptor(), + ) + client = ModelMonitoringServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.ModelMonitoringServiceRestInterceptor, + "post_search_model_monitoring_alerts", + ) as post, mock.patch.object( + transports.ModelMonitoringServiceRestInterceptor, + "pre_search_model_monitoring_alerts", + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = model_monitoring_service.SearchModelMonitoringAlertsRequest.pb( + model_monitoring_service.SearchModelMonitoringAlertsRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = ( + model_monitoring_service.SearchModelMonitoringAlertsResponse.to_json( + model_monitoring_service.SearchModelMonitoringAlertsResponse() + ) + ) + + request = model_monitoring_service.SearchModelMonitoringAlertsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = ( + model_monitoring_service.SearchModelMonitoringAlertsResponse() + ) + + client.search_model_monitoring_alerts( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_search_model_monitoring_alerts_rest_bad_request( + transport: str = "rest", + request_type=model_monitoring_service.SearchModelMonitoringAlertsRequest, +): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = { + "model_monitor": "projects/sample1/locations/sample2/modelMonitors/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.search_model_monitoring_alerts(request) + + +def test_search_model_monitoring_alerts_rest_flattened(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = model_monitoring_service.SearchModelMonitoringAlertsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = { + "model_monitor": "projects/sample1/locations/sample2/modelMonitors/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + model_monitor="model_monitor_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = model_monitoring_service.SearchModelMonitoringAlertsResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.search_model_monitoring_alerts(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta1/{model_monitor=projects/*/locations/*/modelMonitors/*}:searchModelMonitoringAlerts" + % client.transport._host, + args[1], + ) + + +def test_search_model_monitoring_alerts_rest_flattened_error(transport: str = "rest"): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.search_model_monitoring_alerts( + model_monitoring_service.SearchModelMonitoringAlertsRequest(), + model_monitor="model_monitor_value", + ) + + +def test_search_model_monitoring_alerts_rest_pager(transport: str = "rest"): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + model_monitoring_service.SearchModelMonitoringAlertsResponse( + model_monitoring_alerts=[ + model_monitoring_alert.ModelMonitoringAlert(), + model_monitoring_alert.ModelMonitoringAlert(), + model_monitoring_alert.ModelMonitoringAlert(), + ], + next_page_token="abc", + ), + model_monitoring_service.SearchModelMonitoringAlertsResponse( + model_monitoring_alerts=[], + next_page_token="def", + ), + model_monitoring_service.SearchModelMonitoringAlertsResponse( + model_monitoring_alerts=[ + model_monitoring_alert.ModelMonitoringAlert(), + ], + next_page_token="ghi", + ), + model_monitoring_service.SearchModelMonitoringAlertsResponse( + model_monitoring_alerts=[ + model_monitoring_alert.ModelMonitoringAlert(), + model_monitoring_alert.ModelMonitoringAlert(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + model_monitoring_service.SearchModelMonitoringAlertsResponse.to_json(x) + for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = { + "model_monitor": "projects/sample1/locations/sample2/modelMonitors/sample3" + } + + pager = client.search_model_monitoring_alerts(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, model_monitoring_alert.ModelMonitoringAlert) for i in results + ) + + pages = list( + client.search_model_monitoring_alerts(request=sample_request).pages + ) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.ModelMonitoringServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.ModelMonitoringServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ModelMonitoringServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.ModelMonitoringServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = ModelMonitoringServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = ModelMonitoringServiceClient( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.ModelMonitoringServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ModelMonitoringServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.ModelMonitoringServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = ModelMonitoringServiceClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.ModelMonitoringServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.ModelMonitoringServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.ModelMonitoringServiceGrpcTransport, + transports.ModelMonitoringServiceGrpcAsyncIOTransport, + transports.ModelMonitoringServiceRestTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "rest", + ], +) +def test_transport_kind(transport_name): + transport = ModelMonitoringServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.ModelMonitoringServiceGrpcTransport, + ) + + +def test_model_monitoring_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.ModelMonitoringServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_model_monitoring_service_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.aiplatform_v1beta1.services.model_monitoring_service.transports.ModelMonitoringServiceTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.ModelMonitoringServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "create_model_monitor", + "update_model_monitor", + "get_model_monitor", + "list_model_monitors", + "delete_model_monitor", + "create_model_monitoring_job", + "get_model_monitoring_job", + "list_model_monitoring_jobs", + "delete_model_monitoring_job", + "search_model_monitoring_stats", + "search_model_monitoring_alerts", + "set_iam_policy", + "get_iam_policy", + "test_iam_permissions", + "get_location", + "list_locations", + "get_operation", + "wait_operation", + "cancel_operation", + "delete_operation", + "list_operations", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_model_monitoring_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch( + "google.cloud.aiplatform_v1beta1.services.model_monitoring_service.transports.ModelMonitoringServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ModelMonitoringServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=None, + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_model_monitoring_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch( + "google.cloud.aiplatform_v1beta1.services.model_monitoring_service.transports.ModelMonitoringServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ModelMonitoringServiceTransport() + adc.assert_called_once() + + +def test_model_monitoring_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + ModelMonitoringServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.ModelMonitoringServiceGrpcTransport, + transports.ModelMonitoringServiceGrpcAsyncIOTransport, + ], +) +def test_model_monitoring_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.ModelMonitoringServiceGrpcTransport, + transports.ModelMonitoringServiceGrpcAsyncIOTransport, + transports.ModelMonitoringServiceRestTransport, + ], +) +def test_model_monitoring_service_transport_auth_gdch_credentials(transport_class): + host = "https://language.com" + api_audience_tests = [None, "https://language2.com"] + api_audience_expect = [host, "https://language2.com"] + for t, e in zip(api_audience_tests, api_audience_expect): + with mock.patch.object(google.auth, "default", autospec=True) as adc: + gdch_mock = mock.MagicMock() + type(gdch_mock).with_gdch_audience = mock.PropertyMock( + return_value=gdch_mock + ) + adc.return_value = (gdch_mock, None) + transport_class(host=host, api_audience=t) + gdch_mock.with_gdch_audience.assert_called_once_with(e) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.ModelMonitoringServiceGrpcTransport, grpc_helpers), + (transports.ModelMonitoringServiceGrpcAsyncIOTransport, grpc_helpers_async), + ], +) +def test_model_monitoring_service_transport_create_channel( + transport_class, grpc_helpers +): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.ModelMonitoringServiceGrpcTransport, + transports.ModelMonitoringServiceGrpcAsyncIOTransport, + ], +) +def test_model_monitoring_service_grpc_transport_client_cert_source_for_mtls( + transport_class, +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + +def test_model_monitoring_service_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch( + "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" + ) as mock_configure_mtls_channel: + transports.ModelMonitoringServiceRestTransport( + credentials=cred, client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_model_monitoring_service_rest_lro_client(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.AbstractOperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + "rest", + ], +) +def test_model_monitoring_service_host_no_port(transport_name): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="aiplatform.googleapis.com" + ), + transport=transport_name, + ) + assert client.transport._host == ( + "aiplatform.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://aiplatform.googleapis.com" + ) + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + "rest", + ], +) +def test_model_monitoring_service_host_with_port(transport_name): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="aiplatform.googleapis.com:8000" + ), + transport=transport_name, + ) + assert client.transport._host == ( + "aiplatform.googleapis.com:8000" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://aiplatform.googleapis.com:8000" + ) + + +@pytest.mark.parametrize( + "transport_name", + [ + "rest", + ], +) +def test_model_monitoring_service_client_transport_session_collision(transport_name): + creds1 = ga_credentials.AnonymousCredentials() + creds2 = ga_credentials.AnonymousCredentials() + client1 = ModelMonitoringServiceClient( + credentials=creds1, + transport=transport_name, + ) + client2 = ModelMonitoringServiceClient( + credentials=creds2, + transport=transport_name, + ) + session1 = client1.transport.create_model_monitor._session + session2 = client2.transport.create_model_monitor._session + assert session1 != session2 + session1 = client1.transport.update_model_monitor._session + session2 = client2.transport.update_model_monitor._session + assert session1 != session2 + session1 = client1.transport.get_model_monitor._session + session2 = client2.transport.get_model_monitor._session + assert session1 != session2 + session1 = client1.transport.list_model_monitors._session + session2 = client2.transport.list_model_monitors._session + assert session1 != session2 + session1 = client1.transport.delete_model_monitor._session + session2 = client2.transport.delete_model_monitor._session + assert session1 != session2 + session1 = client1.transport.create_model_monitoring_job._session + session2 = client2.transport.create_model_monitoring_job._session + assert session1 != session2 + session1 = client1.transport.get_model_monitoring_job._session + session2 = client2.transport.get_model_monitoring_job._session + assert session1 != session2 + session1 = client1.transport.list_model_monitoring_jobs._session + session2 = client2.transport.list_model_monitoring_jobs._session + assert session1 != session2 + session1 = client1.transport.delete_model_monitoring_job._session + session2 = client2.transport.delete_model_monitoring_job._session + assert session1 != session2 + session1 = client1.transport.search_model_monitoring_stats._session + session2 = client2.transport.search_model_monitoring_stats._session + assert session1 != session2 + session1 = client1.transport.search_model_monitoring_alerts._session + session2 = client2.transport.search_model_monitoring_alerts._session + assert session1 != session2 + + +def test_model_monitoring_service_grpc_transport_channel(): + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.ModelMonitoringServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_model_monitoring_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.ModelMonitoringServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.ModelMonitoringServiceGrpcTransport, + transports.ModelMonitoringServiceGrpcAsyncIOTransport, + ], +) +def test_model_monitoring_service_transport_channel_mtls_with_client_cert_source( + transport_class, +): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.ModelMonitoringServiceGrpcTransport, + transports.ModelMonitoringServiceGrpcAsyncIOTransport, + ], +) +def test_model_monitoring_service_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_model_monitoring_service_grpc_lro_client(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_model_monitoring_service_grpc_lro_async_client(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_batch_prediction_job_path(): + project = "squid" + location = "clam" + batch_prediction_job = "whelk" + expected = "projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}".format( + project=project, + location=location, + batch_prediction_job=batch_prediction_job, + ) + actual = ModelMonitoringServiceClient.batch_prediction_job_path( + project, location, batch_prediction_job + ) + assert expected == actual + + +def test_parse_batch_prediction_job_path(): + expected = { + "project": "octopus", + "location": "oyster", + "batch_prediction_job": "nudibranch", + } + path = ModelMonitoringServiceClient.batch_prediction_job_path(**expected) + + # Check that the path construction is reversible. + actual = ModelMonitoringServiceClient.parse_batch_prediction_job_path(path) + assert expected == actual + + +def test_dataset_path(): + project = "cuttlefish" + location = "mussel" + dataset = "winkle" + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( + project=project, + location=location, + dataset=dataset, + ) + actual = ModelMonitoringServiceClient.dataset_path(project, location, dataset) + assert expected == actual + + +def test_parse_dataset_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "dataset": "abalone", + } + path = ModelMonitoringServiceClient.dataset_path(**expected) + + # Check that the path construction is reversible. + actual = ModelMonitoringServiceClient.parse_dataset_path(path) + assert expected == actual + + +def test_endpoint_path(): + project = "squid" + location = "clam" + endpoint = "whelk" + expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format( + project=project, + location=location, + endpoint=endpoint, + ) + actual = ModelMonitoringServiceClient.endpoint_path(project, location, endpoint) + assert expected == actual + + +def test_parse_endpoint_path(): + expected = { + "project": "octopus", + "location": "oyster", + "endpoint": "nudibranch", + } + path = ModelMonitoringServiceClient.endpoint_path(**expected) + + # Check that the path construction is reversible. + actual = ModelMonitoringServiceClient.parse_endpoint_path(path) + assert expected == actual + + +def test_model_path(): + project = "cuttlefish" + location = "mussel" + model = "winkle" + expected = "projects/{project}/locations/{location}/models/{model}".format( + project=project, + location=location, + model=model, + ) + actual = ModelMonitoringServiceClient.model_path(project, location, model) + assert expected == actual + + +def test_parse_model_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "model": "abalone", + } + path = ModelMonitoringServiceClient.model_path(**expected) + + # Check that the path construction is reversible. + actual = ModelMonitoringServiceClient.parse_model_path(path) + assert expected == actual + + +def test_model_monitor_path(): + project = "squid" + location = "clam" + model_monitor = "whelk" + expected = ( + "projects/{project}/locations/{location}/modelMonitors/{model_monitor}".format( + project=project, + location=location, + model_monitor=model_monitor, + ) + ) + actual = ModelMonitoringServiceClient.model_monitor_path( + project, location, model_monitor + ) + assert expected == actual + + +def test_parse_model_monitor_path(): + expected = { + "project": "octopus", + "location": "oyster", + "model_monitor": "nudibranch", + } + path = ModelMonitoringServiceClient.model_monitor_path(**expected) + + # Check that the path construction is reversible. + actual = ModelMonitoringServiceClient.parse_model_monitor_path(path) + assert expected == actual + + +def test_model_monitoring_job_path(): + project = "cuttlefish" + location = "mussel" + model_monitor = "winkle" + model_monitoring_job = "nautilus" + expected = "projects/{project}/locations/{location}/modelMonitors/{model_monitor}/modelMonitoringJobs/{model_monitoring_job}".format( + project=project, + location=location, + model_monitor=model_monitor, + model_monitoring_job=model_monitoring_job, + ) + actual = ModelMonitoringServiceClient.model_monitoring_job_path( + project, location, model_monitor, model_monitoring_job + ) + assert expected == actual + + +def test_parse_model_monitoring_job_path(): + expected = { + "project": "scallop", + "location": "abalone", + "model_monitor": "squid", + "model_monitoring_job": "clam", + } + path = ModelMonitoringServiceClient.model_monitoring_job_path(**expected) + + # Check that the path construction is reversible. + actual = ModelMonitoringServiceClient.parse_model_monitoring_job_path(path) + assert expected == actual + + +def test_schedule_path(): + project = "whelk" + location = "octopus" + schedule = "oyster" + expected = "projects/{project}/locations/{location}/schedules/{schedule}".format( + project=project, + location=location, + schedule=schedule, + ) + actual = ModelMonitoringServiceClient.schedule_path(project, location, schedule) + assert expected == actual + + +def test_parse_schedule_path(): + expected = { + "project": "nudibranch", + "location": "cuttlefish", + "schedule": "mussel", + } + path = ModelMonitoringServiceClient.schedule_path(**expected) + + # Check that the path construction is reversible. + actual = ModelMonitoringServiceClient.parse_schedule_path(path) + assert expected == actual + + +def test_common_billing_account_path(): + billing_account = "winkle" + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = ModelMonitoringServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "nautilus", + } + path = ModelMonitoringServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = ModelMonitoringServiceClient.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "scallop" + expected = "folders/{folder}".format( + folder=folder, + ) + actual = ModelMonitoringServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "abalone", + } + path = ModelMonitoringServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = ModelMonitoringServiceClient.parse_common_folder_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "squid" + expected = "organizations/{organization}".format( + organization=organization, + ) + actual = ModelMonitoringServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "clam", + } + path = ModelMonitoringServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = ModelMonitoringServiceClient.parse_common_organization_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "whelk" + expected = "projects/{project}".format( + project=project, + ) + actual = ModelMonitoringServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "octopus", + } + path = ModelMonitoringServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = ModelMonitoringServiceClient.parse_common_project_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "oyster" + location = "nudibranch" + expected = "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + actual = ModelMonitoringServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "cuttlefish", + "location": "mussel", + } + path = ModelMonitoringServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = ModelMonitoringServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.ModelMonitoringServiceTransport, "_prep_wrapped_messages" + ) as prep: + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.ModelMonitoringServiceTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = ModelMonitoringServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_get_location_rest_bad_request( + transport: str = "rest", request_type=locations_pb2.GetLocationRequest +): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_location(request) + + +@pytest.mark.parametrize( + "request_type", + [ + locations_pb2.GetLocationRequest, + dict, + ], +) +def test_get_location_rest(request_type): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = locations_pb2.Location() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_location(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + + +def test_list_locations_rest_bad_request( + transport: str = "rest", request_type=locations_pb2.ListLocationsRequest +): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict({"name": "projects/sample1"}, request) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_locations(request) + + +@pytest.mark.parametrize( + "request_type", + [ + locations_pb2.ListLocationsRequest, + dict, + ], +) +def test_list_locations_rest(request_type): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = locations_pb2.ListLocationsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_locations(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + + +def test_get_iam_policy_rest_bad_request( + transport: str = "rest", request_type=iam_policy_pb2.GetIamPolicyRequest +): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"resource": "projects/sample1/locations/sample2/featurestores/sample3"}, + request, + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_iam_policy(request) + + +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.GetIamPolicyRequest, + dict, + ], +) +def test_get_iam_policy_rest(request_type): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = { + "resource": "projects/sample1/locations/sample2/featurestores/sample3" + } + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + +def test_set_iam_policy_rest_bad_request( + transport: str = "rest", request_type=iam_policy_pb2.SetIamPolicyRequest +): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"resource": "projects/sample1/locations/sample2/featurestores/sample3"}, + request, + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_iam_policy(request) + + +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.SetIamPolicyRequest, + dict, + ], +) +def test_set_iam_policy_rest(request_type): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = { + "resource": "projects/sample1/locations/sample2/featurestores/sample3" + } + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.set_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + +def test_test_iam_permissions_rest_bad_request( + transport: str = "rest", request_type=iam_policy_pb2.TestIamPermissionsRequest +): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"resource": "projects/sample1/locations/sample2/featurestores/sample3"}, + request, + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.test_iam_permissions(request) + + +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.TestIamPermissionsRequest, + dict, + ], +) +def test_test_iam_permissions_rest(request_type): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = { + "resource": "projects/sample1/locations/sample2/featurestores/sample3" + } + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = iam_policy_pb2.TestIamPermissionsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.test_iam_permissions(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + +def test_cancel_operation_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.CancelOperationRequest +): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2/operations/sample3"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.cancel_operation(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.CancelOperationRequest, + dict, + ], +) +def test_cancel_operation_rest(request_type): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "{}" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.cancel_operation(request) + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_operation_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.DeleteOperationRequest +): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2/operations/sample3"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_operation(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.DeleteOperationRequest, + dict, + ], +) +def test_delete_operation_rest(request_type): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "{}" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.delete_operation(request) + + # Establish that the response is the type that we expect. + assert response is None + + +def test_get_operation_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.GetOperationRequest +): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2/operations/sample3"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_operation(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.GetOperationRequest, + dict, + ], +) +def test_get_operation_rest(request_type): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_operation(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +def test_list_operations_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.ListOperationsRequest +): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_operations(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.ListOperationsRequest, + dict, + ], +) +def test_list_operations_rest(request_type): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.ListOperationsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_operations(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + + +def test_wait_operation_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.WaitOperationRequest +): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2/operations/sample3"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.wait_operation(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.WaitOperationRequest, + dict, + ], +) +def test_wait_operation_rest(request_type): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.wait_operation(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +def test_delete_operation(transport: str = "grpc"): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_operation_async(transport: str = "grpc_asyncio"): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_operation_field_headers(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = None + + client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_operation_field_headers_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_delete_operation_from_dict(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_delete_operation_from_dict_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_cancel_operation(transport: str = "grpc"): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_cancel_operation_async(transport: str = "grpc_asyncio"): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_cancel_operation_field_headers(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = None + + client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_cancel_operation_field_headers_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_cancel_operation_from_dict(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_cancel_operation_from_dict_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_wait_operation(transport: str = "grpc"): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +@pytest.mark.asyncio +async def test_wait_operation(transport: str = "grpc_asyncio"): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +def test_wait_operation_field_headers(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_wait_operation_field_headers_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_wait_operation_from_dict(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_wait_operation_from_dict_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_operation(transport: str = "grpc"): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +@pytest.mark.asyncio +async def test_get_operation_async(transport: str = "grpc_asyncio"): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +def test_get_operation_field_headers(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_operation_field_headers_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_get_operation_from_dict(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_get_operation_from_dict_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_operations(transport: str = "grpc"): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + response = client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + + +@pytest.mark.asyncio +async def test_list_operations_async(transport: str = "grpc_asyncio"): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + + +def test_list_operations_field_headers(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = operations_pb2.ListOperationsResponse() + + client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_operations_field_headers_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_list_operations_from_dict(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + + response = client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_list_operations_from_dict_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_locations(transport: str = "grpc"): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + response = client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + + +@pytest.mark.asyncio +async def test_list_locations_async(transport: str = "grpc_asyncio"): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + + +def test_list_locations_field_headers(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = locations_pb2.ListLocationsResponse() + + client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_locations_field_headers_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_list_locations_from_dict(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + + response = client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_list_locations_from_dict_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_location(transport: str = "grpc"): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + response = client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + + +@pytest.mark.asyncio +async def test_get_location_async(transport: str = "grpc_asyncio"): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + + +def test_get_location_field_headers(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials() + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = locations_pb2.Location() + + client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations/abc", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_location_field_headers_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials() + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations/abc", + ) in kw["metadata"] + + +def test_get_location_from_dict(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + + response = client.get_location( + request={ + "name": "locations/abc", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_get_location_from_dict_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_set_iam_policy(transport: str = "grpc"): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + response = client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_set_iam_policy_async(transport: str = "grpc_asyncio"): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + ) + response = await client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_set_iam_policy_field_headers(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +def test_set_iam_policy_from_dict(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_set_iam_policy_from_dict_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + response = await client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +def test_get_iam_policy(transport: str = "grpc"): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_get_iam_policy_async(transport: str = "grpc_asyncio"): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + ) + + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_field_headers(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +def test_get_iam_policy_from_dict(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_get_iam_policy_from_dict_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + response = await client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + + +def test_test_iam_permissions(transport: str = "grpc"): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + ) + + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_field_headers(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +def test_test_iam_permissions_from_dict(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + response = client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_test_iam_permissions_from_dict_async(): + client = ModelMonitoringServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + response = await client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + + +def test_transport_close(): + transports = { + "rest": "_session", + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "rest", + "grpc", + ] + for transport in transports: + client = ModelMonitoringServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + + +@pytest.mark.parametrize( + "client_class,transport_class", + [ + (ModelMonitoringServiceClient, transports.ModelMonitoringServiceGrpcTransport), + ( + ModelMonitoringServiceAsyncClient, + transports.ModelMonitoringServiceGrpcAsyncIOTransport, + ), + ], +) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_schedule_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_schedule_service.py index 072b03eb99..7f0248bcd3 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_schedule_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_schedule_service.py @@ -57,10 +57,20 @@ ) from google.cloud.aiplatform_v1beta1.services.schedule_service import pagers from google.cloud.aiplatform_v1beta1.services.schedule_service import transports +from google.cloud.aiplatform_v1beta1.types import accelerator_type from google.cloud.aiplatform_v1beta1.types import artifact from google.cloud.aiplatform_v1beta1.types import context from google.cloud.aiplatform_v1beta1.types import encryption_spec from google.cloud.aiplatform_v1beta1.types import execution +from google.cloud.aiplatform_v1beta1.types import explanation +from google.cloud.aiplatform_v1beta1.types import explanation_metadata +from google.cloud.aiplatform_v1beta1.types import io +from google.cloud.aiplatform_v1beta1.types import job_state +from google.cloud.aiplatform_v1beta1.types import machine_resources +from google.cloud.aiplatform_v1beta1.types import model_monitoring_alert +from google.cloud.aiplatform_v1beta1.types import model_monitoring_job +from google.cloud.aiplatform_v1beta1.types import model_monitoring_service +from google.cloud.aiplatform_v1beta1.types import model_monitoring_spec from google.cloud.aiplatform_v1beta1.types import operation as gca_operation from google.cloud.aiplatform_v1beta1.types import pipeline_failure_policy from google.cloud.aiplatform_v1beta1.types import pipeline_job @@ -82,6 +92,7 @@ from google.protobuf import struct_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore from google.rpc import status_pb2 # type: ignore +from google.type import interval_pb2 # type: ignore import google.auth @@ -3564,6 +3575,149 @@ def test_create_schedule_rest(request_type): }, "pipeline_job_id": "pipeline_job_id_value", }, + "create_model_monitoring_job_request": { + "parent": "parent_value", + "model_monitoring_job": { + "name": "name_value", + "display_name": "display_name_value", + "model_monitoring_spec": { + "objective_spec": { + "tabular_objective": { + "feature_drift_spec": { + "features": ["features_value1", "features_value2"], + "categorical_metric_type": "categorical_metric_type_value", + "numeric_metric_type": "numeric_metric_type_value", + "default_categorical_alert_condition": { + "threshold": 0.973 + }, + "default_numeric_alert_condition": {}, + "feature_alert_conditions": {}, + }, + "prediction_output_drift_spec": {}, + "feature_attribution_spec": { + "features": ["features_value1", "features_value2"], + "default_alert_condition": {}, + "feature_alert_conditions": {}, + "batch_explanation_dedicated_resources": { + "machine_spec": { + "machine_type": "machine_type_value", + "accelerator_type": 1, + "accelerator_count": 1805, + "tpu_topology": "tpu_topology_value", + }, + "starting_replica_count": 2355, + "max_replica_count": 1805, + }, + }, + }, + "explanation_spec": { + "parameters": { + "sampled_shapley_attribution": {"path_count": 1077}, + "integrated_gradients_attribution": { + "step_count": 1092, + "smooth_grad_config": { + "noise_sigma": 0.11660000000000001, + "feature_noise_sigma": { + "noise_sigma": [ + {"name": "name_value", "sigma": 0.529} + ] + }, + "noisy_sample_count": 1947, + }, + "blur_baseline_config": {"max_blur_sigma": 0.1482}, + }, + "xrai_attribution": { + "step_count": 1092, + "smooth_grad_config": {}, + "blur_baseline_config": {}, + }, + "examples": { + "example_gcs_source": { + "data_format": 1, + "gcs_source": { + "uris": ["uris_value1", "uris_value2"] + }, + }, + "nearest_neighbor_search_config": { + "null_value": 0, + "number_value": 0.1285, + "string_value": "string_value_value", + "bool_value": True, + "struct_value": {}, + "list_value": {"values": {}}, + }, + "presets": {"query": 1, "modality": 1}, + "gcs_source": {}, + "neighbor_count": 1494, + }, + "top_k": 541, + "output_indices": {}, + }, + "metadata": { + "inputs": {}, + "outputs": {}, + "feature_attributions_schema_uri": "feature_attributions_schema_uri_value", + "latent_space_source": "latent_space_source_value", + }, + }, + "baseline_dataset": { + "columnized_dataset": { + "vertex_dataset": "vertex_dataset_value", + "gcs_source": { + "gcs_uri": "gcs_uri_value", + "format_": 1, + }, + "bigquery_source": { + "table_uri": "table_uri_value", + "query": "query_value", + }, + "timestamp_field": "timestamp_field_value", + }, + "batch_prediction_output": { + "batch_prediction_job": "batch_prediction_job_value" + }, + "vertex_endpoint_logs": { + "endpoints": ["endpoints_value1", "endpoints_value2"] + }, + "time_interval": {"start_time": {}, "end_time": {}}, + "time_offset": { + "offset": "offset_value", + "window": "window_value", + }, + }, + "target_dataset": {}, + }, + "notification_spec": { + "email_config": { + "user_emails": ["user_emails_value1", "user_emails_value2"] + }, + "enable_cloud_logging": True, + "notification_channel_configs": [ + {"notification_channel": "notification_channel_value"} + ], + }, + "output_spec": { + "gcs_base_directory": { + "output_uri_prefix": "output_uri_prefix_value" + } + }, + }, + "create_time": {}, + "update_time": {}, + "state": 1, + "schedule": "schedule_value", + "job_execution_detail": { + "baseline_datasets": [ + {"location": "location_value", "time_range": {}} + ], + "target_datasets": {}, + "objective_status": {}, + "error": {}, + }, + "schedule_time": {}, + }, + "model_monitoring_job_id": "model_monitoring_job_id_value", + }, "name": "name_value", "display_name": "display_name_value", "start_time": {}, @@ -5453,6 +5607,149 @@ def test_update_schedule_rest(request_type): }, "pipeline_job_id": "pipeline_job_id_value", }, + "create_model_monitoring_job_request": { + "parent": "parent_value", + "model_monitoring_job": { + "name": "name_value", + "display_name": "display_name_value", + "model_monitoring_spec": { + "objective_spec": { + "tabular_objective": { + "feature_drift_spec": { + "features": ["features_value1", "features_value2"], + "categorical_metric_type": "categorical_metric_type_value", + "numeric_metric_type": "numeric_metric_type_value", + "default_categorical_alert_condition": { + "threshold": 0.973 + }, + "default_numeric_alert_condition": {}, + "feature_alert_conditions": {}, + }, + "prediction_output_drift_spec": {}, + "feature_attribution_spec": { + "features": ["features_value1", "features_value2"], + "default_alert_condition": {}, + "feature_alert_conditions": {}, + "batch_explanation_dedicated_resources": { + "machine_spec": { + "machine_type": "machine_type_value", + "accelerator_type": 1, + "accelerator_count": 1805, + "tpu_topology": "tpu_topology_value", + }, + "starting_replica_count": 2355, + "max_replica_count": 1805, + }, + }, + }, + "explanation_spec": { + "parameters": { + "sampled_shapley_attribution": {"path_count": 1077}, + "integrated_gradients_attribution": { + "step_count": 1092, + "smooth_grad_config": { + "noise_sigma": 0.11660000000000001, + "feature_noise_sigma": { + "noise_sigma": [ + {"name": "name_value", "sigma": 0.529} + ] + }, + "noisy_sample_count": 1947, + }, + "blur_baseline_config": {"max_blur_sigma": 0.1482}, + }, + "xrai_attribution": { + "step_count": 1092, + "smooth_grad_config": {}, + "blur_baseline_config": {}, + }, + "examples": { + "example_gcs_source": { + "data_format": 1, + "gcs_source": { + "uris": ["uris_value1", "uris_value2"] + }, + }, + "nearest_neighbor_search_config": { + "null_value": 0, + "number_value": 0.1285, + "string_value": "string_value_value", + "bool_value": True, + "struct_value": {}, + "list_value": {"values": {}}, + }, + "presets": {"query": 1, "modality": 1}, + "gcs_source": {}, + "neighbor_count": 1494, + }, + "top_k": 541, + "output_indices": {}, + }, + "metadata": { + "inputs": {}, + "outputs": {}, + "feature_attributions_schema_uri": "feature_attributions_schema_uri_value", + "latent_space_source": "latent_space_source_value", + }, + }, + "baseline_dataset": { + "columnized_dataset": { + "vertex_dataset": "vertex_dataset_value", + "gcs_source": { + "gcs_uri": "gcs_uri_value", + "format_": 1, + }, + "bigquery_source": { + "table_uri": "table_uri_value", + "query": "query_value", + }, + "timestamp_field": "timestamp_field_value", + }, + "batch_prediction_output": { + "batch_prediction_job": "batch_prediction_job_value" + }, + "vertex_endpoint_logs": { + "endpoints": ["endpoints_value1", "endpoints_value2"] + }, + "time_interval": {"start_time": {}, "end_time": {}}, + "time_offset": { + "offset": "offset_value", + "window": "window_value", + }, + }, + "target_dataset": {}, + }, + "notification_spec": { + "email_config": { + "user_emails": ["user_emails_value1", "user_emails_value2"] + }, + "enable_cloud_logging": True, + "notification_channel_configs": [ + {"notification_channel": "notification_channel_value"} + ], + }, + "output_spec": { + "gcs_base_directory": { + "output_uri_prefix": "output_uri_prefix_value" + } + }, + }, + "create_time": {}, + "update_time": {}, + "state": 1, + "schedule": "schedule_value", + "job_execution_detail": { + "baseline_datasets": [ + {"location": "location_value", "time_range": {}} + ], + "target_datasets": {}, + "objective_status": {}, + "error": {}, + }, + "schedule_time": {}, + }, + "model_monitoring_job_id": "model_monitoring_job_id_value", + }, "name": "projects/sample1/locations/sample2/schedules/sample3", "display_name": "display_name_value", "start_time": {}, @@ -6469,11 +6766,39 @@ def test_parse_artifact_path(): assert expected == actual -def test_context_path(): +def test_batch_prediction_job_path(): project = "winkle" location = "nautilus" - metadata_store = "scallop" - context = "abalone" + batch_prediction_job = "scallop" + expected = "projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}".format( + project=project, + location=location, + batch_prediction_job=batch_prediction_job, + ) + actual = ScheduleServiceClient.batch_prediction_job_path( + project, location, batch_prediction_job + ) + assert expected == actual + + +def test_parse_batch_prediction_job_path(): + expected = { + "project": "abalone", + "location": "squid", + "batch_prediction_job": "clam", + } + path = ScheduleServiceClient.batch_prediction_job_path(**expected) + + # Check that the path construction is reversible. + actual = ScheduleServiceClient.parse_batch_prediction_job_path(path) + assert expected == actual + + +def test_context_path(): + project = "whelk" + location = "octopus" + metadata_store = "oyster" + context = "nudibranch" expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}".format( project=project, location=location, @@ -6488,10 +6813,10 @@ def test_context_path(): def test_parse_context_path(): expected = { - "project": "squid", - "location": "clam", - "metadata_store": "whelk", - "context": "octopus", + "project": "cuttlefish", + "location": "mussel", + "metadata_store": "winkle", + "context": "nautilus", } path = ScheduleServiceClient.context_path(**expected) @@ -6501,9 +6826,9 @@ def test_parse_context_path(): def test_custom_job_path(): - project = "oyster" - location = "nudibranch" - custom_job = "cuttlefish" + project = "scallop" + location = "abalone" + custom_job = "squid" expected = "projects/{project}/locations/{location}/customJobs/{custom_job}".format( project=project, location=location, @@ -6515,9 +6840,9 @@ def test_custom_job_path(): def test_parse_custom_job_path(): expected = { - "project": "mussel", - "location": "winkle", - "custom_job": "nautilus", + "project": "clam", + "location": "whelk", + "custom_job": "octopus", } path = ScheduleServiceClient.custom_job_path(**expected) @@ -6526,11 +6851,63 @@ def test_parse_custom_job_path(): assert expected == actual -def test_execution_path(): +def test_dataset_path(): + project = "oyster" + location = "nudibranch" + dataset = "cuttlefish" + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( + project=project, + location=location, + dataset=dataset, + ) + actual = ScheduleServiceClient.dataset_path(project, location, dataset) + assert expected == actual + + +def test_parse_dataset_path(): + expected = { + "project": "mussel", + "location": "winkle", + "dataset": "nautilus", + } + path = ScheduleServiceClient.dataset_path(**expected) + + # Check that the path construction is reversible. + actual = ScheduleServiceClient.parse_dataset_path(path) + assert expected == actual + + +def test_endpoint_path(): project = "scallop" location = "abalone" - metadata_store = "squid" - execution = "clam" + endpoint = "squid" + expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format( + project=project, + location=location, + endpoint=endpoint, + ) + actual = ScheduleServiceClient.endpoint_path(project, location, endpoint) + assert expected == actual + + +def test_parse_endpoint_path(): + expected = { + "project": "clam", + "location": "whelk", + "endpoint": "octopus", + } + path = ScheduleServiceClient.endpoint_path(**expected) + + # Check that the path construction is reversible. + actual = ScheduleServiceClient.parse_endpoint_path(path) + assert expected == actual + + +def test_execution_path(): + project = "oyster" + location = "nudibranch" + metadata_store = "cuttlefish" + execution = "mussel" expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}".format( project=project, location=location, @@ -6545,10 +6922,10 @@ def test_execution_path(): def test_parse_execution_path(): expected = { - "project": "whelk", - "location": "octopus", - "metadata_store": "oyster", - "execution": "nudibranch", + "project": "winkle", + "location": "nautilus", + "metadata_store": "scallop", + "execution": "abalone", } path = ScheduleServiceClient.execution_path(**expected) @@ -6557,9 +6934,68 @@ def test_parse_execution_path(): assert expected == actual -def test_network_path(): +def test_model_monitor_path(): + project = "squid" + location = "clam" + model_monitor = "whelk" + expected = ( + "projects/{project}/locations/{location}/modelMonitors/{model_monitor}".format( + project=project, + location=location, + model_monitor=model_monitor, + ) + ) + actual = ScheduleServiceClient.model_monitor_path(project, location, model_monitor) + assert expected == actual + + +def test_parse_model_monitor_path(): + expected = { + "project": "octopus", + "location": "oyster", + "model_monitor": "nudibranch", + } + path = ScheduleServiceClient.model_monitor_path(**expected) + + # Check that the path construction is reversible. + actual = ScheduleServiceClient.parse_model_monitor_path(path) + assert expected == actual + + +def test_model_monitoring_job_path(): project = "cuttlefish" - network = "mussel" + location = "mussel" + model_monitor = "winkle" + model_monitoring_job = "nautilus" + expected = "projects/{project}/locations/{location}/modelMonitors/{model_monitor}/modelMonitoringJobs/{model_monitoring_job}".format( + project=project, + location=location, + model_monitor=model_monitor, + model_monitoring_job=model_monitoring_job, + ) + actual = ScheduleServiceClient.model_monitoring_job_path( + project, location, model_monitor, model_monitoring_job + ) + assert expected == actual + + +def test_parse_model_monitoring_job_path(): + expected = { + "project": "scallop", + "location": "abalone", + "model_monitor": "squid", + "model_monitoring_job": "clam", + } + path = ScheduleServiceClient.model_monitoring_job_path(**expected) + + # Check that the path construction is reversible. + actual = ScheduleServiceClient.parse_model_monitoring_job_path(path) + assert expected == actual + + +def test_network_path(): + project = "whelk" + network = "octopus" expected = "projects/{project}/global/networks/{network}".format( project=project, network=network, @@ -6570,8 +7006,8 @@ def test_network_path(): def test_parse_network_path(): expected = { - "project": "winkle", - "network": "nautilus", + "project": "oyster", + "network": "nudibranch", } path = ScheduleServiceClient.network_path(**expected) @@ -6581,9 +7017,9 @@ def test_parse_network_path(): def test_pipeline_job_path(): - project = "scallop" - location = "abalone" - pipeline_job = "squid" + project = "cuttlefish" + location = "mussel" + pipeline_job = "winkle" expected = ( "projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}".format( project=project, @@ -6597,9 +7033,9 @@ def test_pipeline_job_path(): def test_parse_pipeline_job_path(): expected = { - "project": "clam", - "location": "whelk", - "pipeline_job": "octopus", + "project": "nautilus", + "location": "scallop", + "pipeline_job": "abalone", } path = ScheduleServiceClient.pipeline_job_path(**expected) @@ -6609,9 +7045,9 @@ def test_parse_pipeline_job_path(): def test_schedule_path(): - project = "oyster" - location = "nudibranch" - schedule = "cuttlefish" + project = "squid" + location = "clam" + schedule = "whelk" expected = "projects/{project}/locations/{location}/schedules/{schedule}".format( project=project, location=location, @@ -6623,9 +7059,9 @@ def test_schedule_path(): def test_parse_schedule_path(): expected = { - "project": "mussel", - "location": "winkle", - "schedule": "nautilus", + "project": "octopus", + "location": "oyster", + "schedule": "nudibranch", } path = ScheduleServiceClient.schedule_path(**expected) @@ -6635,7 +7071,7 @@ def test_parse_schedule_path(): def test_common_billing_account_path(): - billing_account = "scallop" + billing_account = "cuttlefish" expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -6645,7 +7081,7 @@ def test_common_billing_account_path(): def test_parse_common_billing_account_path(): expected = { - "billing_account": "abalone", + "billing_account": "mussel", } path = ScheduleServiceClient.common_billing_account_path(**expected) @@ -6655,7 +7091,7 @@ def test_parse_common_billing_account_path(): def test_common_folder_path(): - folder = "squid" + folder = "winkle" expected = "folders/{folder}".format( folder=folder, ) @@ -6665,7 +7101,7 @@ def test_common_folder_path(): def test_parse_common_folder_path(): expected = { - "folder": "clam", + "folder": "nautilus", } path = ScheduleServiceClient.common_folder_path(**expected) @@ -6675,7 +7111,7 @@ def test_parse_common_folder_path(): def test_common_organization_path(): - organization = "whelk" + organization = "scallop" expected = "organizations/{organization}".format( organization=organization, ) @@ -6685,7 +7121,7 @@ def test_common_organization_path(): def test_parse_common_organization_path(): expected = { - "organization": "octopus", + "organization": "abalone", } path = ScheduleServiceClient.common_organization_path(**expected) @@ -6695,7 +7131,7 @@ def test_parse_common_organization_path(): def test_common_project_path(): - project = "oyster" + project = "squid" expected = "projects/{project}".format( project=project, ) @@ -6705,7 +7141,7 @@ def test_common_project_path(): def test_parse_common_project_path(): expected = { - "project": "nudibranch", + "project": "clam", } path = ScheduleServiceClient.common_project_path(**expected) @@ -6715,8 +7151,8 @@ def test_parse_common_project_path(): def test_common_location_path(): - project = "cuttlefish" - location = "mussel" + project = "whelk" + location = "octopus" expected = "projects/{project}/locations/{location}".format( project=project, location=location, @@ -6727,8 +7163,8 @@ def test_common_location_path(): def test_parse_common_location_path(): expected = { - "project": "winkle", - "location": "nautilus", + "project": "oyster", + "location": "nudibranch", } path = ScheduleServiceClient.common_location_path(**expected)